1 /*- 2 * BSD LICENSE 3 * 4 * Copyright(c) 2010-2017 Intel Corporation. All rights reserved. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of Intel Corporation nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include <stdarg.h> 35 #include <stdio.h> 36 #include <stdlib.h> 37 #include <signal.h> 38 #include <string.h> 39 #include <time.h> 40 #include <fcntl.h> 41 #include <sys/mman.h> 42 #include <sys/types.h> 43 #include <errno.h> 44 45 #include <sys/queue.h> 46 #include <sys/stat.h> 47 48 #include <stdint.h> 49 #include <unistd.h> 50 #include <inttypes.h> 51 52 #include <rte_common.h> 53 #include <rte_errno.h> 54 #include <rte_byteorder.h> 55 #include <rte_log.h> 56 #include <rte_debug.h> 57 #include <rte_cycles.h> 58 #include <rte_memory.h> 59 #include <rte_memcpy.h> 60 #include <rte_memzone.h> 61 #include <rte_launch.h> 62 #include <rte_eal.h> 63 #include <rte_alarm.h> 64 #include <rte_per_lcore.h> 65 #include <rte_lcore.h> 66 #include <rte_atomic.h> 67 #include <rte_branch_prediction.h> 68 #include <rte_mempool.h> 69 #include <rte_malloc.h> 70 #include <rte_mbuf.h> 71 #include <rte_interrupts.h> 72 #include <rte_pci.h> 73 #include <rte_ether.h> 74 #include <rte_ethdev.h> 75 #include <rte_dev.h> 76 #include <rte_string_fns.h> 77 #ifdef RTE_LIBRTE_IXGBE_PMD 78 #include <rte_pmd_ixgbe.h> 79 #endif 80 #ifdef RTE_LIBRTE_PDUMP 81 #include <rte_pdump.h> 82 #endif 83 #include <rte_flow.h> 84 #include <rte_metrics.h> 85 #ifdef RTE_LIBRTE_BITRATE 86 #include <rte_bitrate.h> 87 #endif 88 #ifdef RTE_LIBRTE_LATENCY_STATS 89 #include <rte_latencystats.h> 90 #endif 91 92 #include "testpmd.h" 93 94 uint16_t verbose_level = 0; /**< Silent by default. */ 95 96 /* use master core for command line ? */ 97 uint8_t interactive = 0; 98 uint8_t auto_start = 0; 99 uint8_t tx_first; 100 char cmdline_filename[PATH_MAX] = {0}; 101 102 /* 103 * NUMA support configuration. 104 * When set, the NUMA support attempts to dispatch the allocation of the 105 * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the 106 * probed ports among the CPU sockets 0 and 1. 107 * Otherwise, all memory is allocated from CPU socket 0. 108 */ 109 uint8_t numa_support = 1; /**< numa enabled by default */ 110 111 /* 112 * In UMA mode,all memory is allocated from socket 0 if --socket-num is 113 * not configured. 114 */ 115 uint8_t socket_num = UMA_NO_CONFIG; 116 117 /* 118 * Use ANONYMOUS mapped memory (might be not physically continuous) for mbufs. 119 */ 120 uint8_t mp_anon = 0; 121 122 /* 123 * Record the Ethernet address of peer target ports to which packets are 124 * forwarded. 125 * Must be instantiated with the ethernet addresses of peer traffic generator 126 * ports. 127 */ 128 struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS]; 129 portid_t nb_peer_eth_addrs = 0; 130 131 /* 132 * Probed Target Environment. 133 */ 134 struct rte_port *ports; /**< For all probed ethernet ports. */ 135 portid_t nb_ports; /**< Number of probed ethernet ports. */ 136 struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */ 137 lcoreid_t nb_lcores; /**< Number of probed logical cores. */ 138 139 /* 140 * Test Forwarding Configuration. 141 * nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores 142 * nb_fwd_ports <= nb_cfg_ports <= nb_ports 143 */ 144 lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */ 145 lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */ 146 portid_t nb_cfg_ports; /**< Number of configured ports. */ 147 portid_t nb_fwd_ports; /**< Number of forwarding ports. */ 148 149 unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */ 150 portid_t fwd_ports_ids[RTE_MAX_ETHPORTS]; /**< Port ids configuration. */ 151 152 struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */ 153 streamid_t nb_fwd_streams; /**< Is equal to (nb_ports * nb_rxq). */ 154 155 /* 156 * Forwarding engines. 157 */ 158 struct fwd_engine * fwd_engines[] = { 159 &io_fwd_engine, 160 &mac_fwd_engine, 161 &mac_swap_engine, 162 &flow_gen_engine, 163 &rx_only_engine, 164 &tx_only_engine, 165 &csum_fwd_engine, 166 &icmp_echo_engine, 167 #if defined RTE_LIBRTE_PMD_SOFTNIC && defined RTE_LIBRTE_SCHED 168 &softnic_tm_engine, 169 &softnic_tm_bypass_engine, 170 #endif 171 #ifdef RTE_LIBRTE_IEEE1588 172 &ieee1588_fwd_engine, 173 #endif 174 NULL, 175 }; 176 177 struct fwd_config cur_fwd_config; 178 struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */ 179 uint32_t retry_enabled; 180 uint32_t burst_tx_delay_time = BURST_TX_WAIT_US; 181 uint32_t burst_tx_retry_num = BURST_TX_RETRIES; 182 183 uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */ 184 uint32_t param_total_num_mbufs = 0; /**< number of mbufs in all pools - if 185 * specified on command-line. */ 186 uint16_t stats_period; /**< Period to show statistics (disabled by default) */ 187 188 /* 189 * In container, it cannot terminate the process which running with 'stats-period' 190 * option. Set flag to exit stats period loop after received SIGINT/SIGTERM. 191 */ 192 uint8_t f_quit; 193 194 /* 195 * Configuration of packet segments used by the "txonly" processing engine. 196 */ 197 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */ 198 uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = { 199 TXONLY_DEF_PACKET_LEN, 200 }; 201 uint8_t tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */ 202 203 enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF; 204 /**< Split policy for packets to TX. */ 205 206 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */ 207 uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */ 208 209 /* current configuration is in DCB or not,0 means it is not in DCB mode */ 210 uint8_t dcb_config = 0; 211 212 /* Whether the dcb is in testing status */ 213 uint8_t dcb_test = 0; 214 215 /* 216 * Configurable number of RX/TX queues. 217 */ 218 queueid_t nb_rxq = 1; /**< Number of RX queues per port. */ 219 queueid_t nb_txq = 1; /**< Number of TX queues per port. */ 220 221 /* 222 * Configurable number of RX/TX ring descriptors. 223 */ 224 #define RTE_TEST_RX_DESC_DEFAULT 128 225 #define RTE_TEST_TX_DESC_DEFAULT 512 226 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */ 227 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */ 228 229 #define RTE_PMD_PARAM_UNSET -1 230 /* 231 * Configurable values of RX and TX ring threshold registers. 232 */ 233 234 int8_t rx_pthresh = RTE_PMD_PARAM_UNSET; 235 int8_t rx_hthresh = RTE_PMD_PARAM_UNSET; 236 int8_t rx_wthresh = RTE_PMD_PARAM_UNSET; 237 238 int8_t tx_pthresh = RTE_PMD_PARAM_UNSET; 239 int8_t tx_hthresh = RTE_PMD_PARAM_UNSET; 240 int8_t tx_wthresh = RTE_PMD_PARAM_UNSET; 241 242 /* 243 * Configurable value of RX free threshold. 244 */ 245 int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET; 246 247 /* 248 * Configurable value of RX drop enable. 249 */ 250 int8_t rx_drop_en = RTE_PMD_PARAM_UNSET; 251 252 /* 253 * Configurable value of TX free threshold. 254 */ 255 int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET; 256 257 /* 258 * Configurable value of TX RS bit threshold. 259 */ 260 int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET; 261 262 /* 263 * Configurable value of TX queue flags. 264 */ 265 int32_t txq_flags = RTE_PMD_PARAM_UNSET; 266 267 /* 268 * Receive Side Scaling (RSS) configuration. 269 */ 270 uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */ 271 272 /* 273 * Port topology configuration 274 */ 275 uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */ 276 277 /* 278 * Avoids to flush all the RX streams before starts forwarding. 279 */ 280 uint8_t no_flush_rx = 0; /* flush by default */ 281 282 /* 283 * Flow API isolated mode. 284 */ 285 uint8_t flow_isolate_all; 286 287 /* 288 * Avoids to check link status when starting/stopping a port. 289 */ 290 uint8_t no_link_check = 0; /* check by default */ 291 292 /* 293 * Enable link status change notification 294 */ 295 uint8_t lsc_interrupt = 1; /* enabled by default */ 296 297 /* 298 * Enable device removal notification. 299 */ 300 uint8_t rmv_interrupt = 1; /* enabled by default */ 301 302 /* 303 * Display or mask ether events 304 * Default to all events except VF_MBOX 305 */ 306 uint32_t event_print_mask = (UINT32_C(1) << RTE_ETH_EVENT_UNKNOWN) | 307 (UINT32_C(1) << RTE_ETH_EVENT_INTR_LSC) | 308 (UINT32_C(1) << RTE_ETH_EVENT_QUEUE_STATE) | 309 (UINT32_C(1) << RTE_ETH_EVENT_INTR_RESET) | 310 (UINT32_C(1) << RTE_ETH_EVENT_MACSEC) | 311 (UINT32_C(1) << RTE_ETH_EVENT_INTR_RMV); 312 313 /* 314 * NIC bypass mode configuration options. 315 */ 316 317 #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS 318 /* The NIC bypass watchdog timeout. */ 319 uint32_t bypass_timeout = RTE_PMD_IXGBE_BYPASS_TMT_OFF; 320 #endif 321 322 323 #ifdef RTE_LIBRTE_LATENCY_STATS 324 325 /* 326 * Set when latency stats is enabled in the commandline 327 */ 328 uint8_t latencystats_enabled; 329 330 /* 331 * Lcore ID to serive latency statistics. 332 */ 333 lcoreid_t latencystats_lcore_id = -1; 334 335 #endif 336 337 /* 338 * Ethernet device configuration. 339 */ 340 struct rte_eth_rxmode rx_mode = { 341 .max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */ 342 .split_hdr_size = 0, 343 .header_split = 0, /**< Header Split disabled. */ 344 .hw_ip_checksum = 0, /**< IP checksum offload disabled. */ 345 .hw_vlan_filter = 1, /**< VLAN filtering enabled. */ 346 .hw_vlan_strip = 1, /**< VLAN strip enabled. */ 347 .hw_vlan_extend = 0, /**< Extended VLAN disabled. */ 348 .jumbo_frame = 0, /**< Jumbo Frame Support disabled. */ 349 .hw_strip_crc = 1, /**< CRC stripping by hardware enabled. */ 350 .hw_timestamp = 0, /**< HW timestamp enabled. */ 351 }; 352 353 struct rte_fdir_conf fdir_conf = { 354 .mode = RTE_FDIR_MODE_NONE, 355 .pballoc = RTE_FDIR_PBALLOC_64K, 356 .status = RTE_FDIR_REPORT_STATUS, 357 .mask = { 358 .vlan_tci_mask = 0x0, 359 .ipv4_mask = { 360 .src_ip = 0xFFFFFFFF, 361 .dst_ip = 0xFFFFFFFF, 362 }, 363 .ipv6_mask = { 364 .src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF}, 365 .dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF}, 366 }, 367 .src_port_mask = 0xFFFF, 368 .dst_port_mask = 0xFFFF, 369 .mac_addr_byte_mask = 0xFF, 370 .tunnel_type_mask = 1, 371 .tunnel_id_mask = 0xFFFFFFFF, 372 }, 373 .drop_queue = 127, 374 }; 375 376 volatile int test_done = 1; /* stop packet forwarding when set to 1. */ 377 378 struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS]; 379 struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS]; 380 381 struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array; 382 struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array; 383 384 uint16_t nb_tx_queue_stats_mappings = 0; 385 uint16_t nb_rx_queue_stats_mappings = 0; 386 387 unsigned int num_sockets = 0; 388 unsigned int socket_ids[RTE_MAX_NUMA_NODES]; 389 390 #ifdef RTE_LIBRTE_BITRATE 391 /* Bitrate statistics */ 392 struct rte_stats_bitrates *bitrate_data; 393 lcoreid_t bitrate_lcore_id; 394 uint8_t bitrate_enabled; 395 #endif 396 397 struct gro_status gro_ports[RTE_MAX_ETHPORTS]; 398 uint8_t gro_flush_cycles = GRO_DEFAULT_FLUSH_CYCLES; 399 400 /* Forward function declarations */ 401 static void map_port_queue_stats_mapping_registers(portid_t pi, 402 struct rte_port *port); 403 static void check_all_ports_link_status(uint32_t port_mask); 404 static int eth_event_callback(portid_t port_id, 405 enum rte_eth_event_type type, 406 void *param, void *ret_param); 407 408 /* 409 * Check if all the ports are started. 410 * If yes, return positive value. If not, return zero. 411 */ 412 static int all_ports_started(void); 413 414 struct gso_status gso_ports[RTE_MAX_ETHPORTS]; 415 uint16_t gso_max_segment_size = ETHER_MAX_LEN - ETHER_CRC_LEN; 416 417 /* 418 * Helper function to check if socket is already discovered. 419 * If yes, return positive value. If not, return zero. 420 */ 421 int 422 new_socket_id(unsigned int socket_id) 423 { 424 unsigned int i; 425 426 for (i = 0; i < num_sockets; i++) { 427 if (socket_ids[i] == socket_id) 428 return 0; 429 } 430 return 1; 431 } 432 433 /* 434 * Setup default configuration. 435 */ 436 static void 437 set_default_fwd_lcores_config(void) 438 { 439 unsigned int i; 440 unsigned int nb_lc; 441 unsigned int sock_num; 442 443 nb_lc = 0; 444 for (i = 0; i < RTE_MAX_LCORE; i++) { 445 sock_num = rte_lcore_to_socket_id(i); 446 if (new_socket_id(sock_num)) { 447 if (num_sockets >= RTE_MAX_NUMA_NODES) { 448 rte_exit(EXIT_FAILURE, 449 "Total sockets greater than %u\n", 450 RTE_MAX_NUMA_NODES); 451 } 452 socket_ids[num_sockets++] = sock_num; 453 } 454 if (!rte_lcore_is_enabled(i)) 455 continue; 456 if (i == rte_get_master_lcore()) 457 continue; 458 fwd_lcores_cpuids[nb_lc++] = i; 459 } 460 nb_lcores = (lcoreid_t) nb_lc; 461 nb_cfg_lcores = nb_lcores; 462 nb_fwd_lcores = 1; 463 } 464 465 static void 466 set_def_peer_eth_addrs(void) 467 { 468 portid_t i; 469 470 for (i = 0; i < RTE_MAX_ETHPORTS; i++) { 471 peer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR; 472 peer_eth_addrs[i].addr_bytes[5] = i; 473 } 474 } 475 476 static void 477 set_default_fwd_ports_config(void) 478 { 479 portid_t pt_id; 480 int i = 0; 481 482 RTE_ETH_FOREACH_DEV(pt_id) 483 fwd_ports_ids[i++] = pt_id; 484 485 nb_cfg_ports = nb_ports; 486 nb_fwd_ports = nb_ports; 487 } 488 489 void 490 set_def_fwd_config(void) 491 { 492 set_default_fwd_lcores_config(); 493 set_def_peer_eth_addrs(); 494 set_default_fwd_ports_config(); 495 } 496 497 /* 498 * Configuration initialisation done once at init time. 499 */ 500 static void 501 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf, 502 unsigned int socket_id) 503 { 504 char pool_name[RTE_MEMPOOL_NAMESIZE]; 505 struct rte_mempool *rte_mp = NULL; 506 uint32_t mb_size; 507 508 mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size; 509 mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name)); 510 511 RTE_LOG(INFO, USER1, 512 "create a new mbuf pool <%s>: n=%u, size=%u, socket=%u\n", 513 pool_name, nb_mbuf, mbuf_seg_size, socket_id); 514 515 if (mp_anon != 0) { 516 rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf, 517 mb_size, (unsigned) mb_mempool_cache, 518 sizeof(struct rte_pktmbuf_pool_private), 519 socket_id, 0); 520 if (rte_mp == NULL) 521 goto err; 522 523 if (rte_mempool_populate_anon(rte_mp) == 0) { 524 rte_mempool_free(rte_mp); 525 rte_mp = NULL; 526 goto err; 527 } 528 rte_pktmbuf_pool_init(rte_mp, NULL); 529 rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL); 530 } else { 531 /* wrapper to rte_mempool_create() */ 532 rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf, 533 mb_mempool_cache, 0, mbuf_seg_size, socket_id); 534 } 535 536 err: 537 if (rte_mp == NULL) { 538 rte_exit(EXIT_FAILURE, 539 "Creation of mbuf pool for socket %u failed: %s\n", 540 socket_id, rte_strerror(rte_errno)); 541 } else if (verbose_level > 0) { 542 rte_mempool_dump(stdout, rte_mp); 543 } 544 } 545 546 /* 547 * Check given socket id is valid or not with NUMA mode, 548 * if valid, return 0, else return -1 549 */ 550 static int 551 check_socket_id(const unsigned int socket_id) 552 { 553 static int warning_once = 0; 554 555 if (new_socket_id(socket_id)) { 556 if (!warning_once && numa_support) 557 printf("Warning: NUMA should be configured manually by" 558 " using --port-numa-config and" 559 " --ring-numa-config parameters along with" 560 " --numa.\n"); 561 warning_once = 1; 562 return -1; 563 } 564 return 0; 565 } 566 567 static void 568 init_config(void) 569 { 570 portid_t pid; 571 struct rte_port *port; 572 struct rte_mempool *mbp; 573 unsigned int nb_mbuf_per_pool; 574 lcoreid_t lc_id; 575 uint8_t port_per_socket[RTE_MAX_NUMA_NODES]; 576 struct rte_gro_param gro_param; 577 uint32_t gso_types; 578 579 memset(port_per_socket,0,RTE_MAX_NUMA_NODES); 580 581 if (numa_support) { 582 memset(port_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS); 583 memset(rxring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS); 584 memset(txring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS); 585 } 586 587 /* Configuration of logical cores. */ 588 fwd_lcores = rte_zmalloc("testpmd: fwd_lcores", 589 sizeof(struct fwd_lcore *) * nb_lcores, 590 RTE_CACHE_LINE_SIZE); 591 if (fwd_lcores == NULL) { 592 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) " 593 "failed\n", nb_lcores); 594 } 595 for (lc_id = 0; lc_id < nb_lcores; lc_id++) { 596 fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore", 597 sizeof(struct fwd_lcore), 598 RTE_CACHE_LINE_SIZE); 599 if (fwd_lcores[lc_id] == NULL) { 600 rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) " 601 "failed\n"); 602 } 603 fwd_lcores[lc_id]->cpuid_idx = lc_id; 604 } 605 606 RTE_ETH_FOREACH_DEV(pid) { 607 port = &ports[pid]; 608 rte_eth_dev_info_get(pid, &port->dev_info); 609 610 if (numa_support) { 611 if (port_numa[pid] != NUMA_NO_CONFIG) 612 port_per_socket[port_numa[pid]]++; 613 else { 614 uint32_t socket_id = rte_eth_dev_socket_id(pid); 615 616 /* if socket_id is invalid, set to 0 */ 617 if (check_socket_id(socket_id) < 0) 618 socket_id = 0; 619 port_per_socket[socket_id]++; 620 } 621 } 622 623 /* set flag to initialize port/queue */ 624 port->need_reconfig = 1; 625 port->need_reconfig_queues = 1; 626 } 627 628 /* 629 * Create pools of mbuf. 630 * If NUMA support is disabled, create a single pool of mbuf in 631 * socket 0 memory by default. 632 * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1. 633 * 634 * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and 635 * nb_txd can be configured at run time. 636 */ 637 if (param_total_num_mbufs) 638 nb_mbuf_per_pool = param_total_num_mbufs; 639 else { 640 nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX + 641 (nb_lcores * mb_mempool_cache) + 642 RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST; 643 nb_mbuf_per_pool *= RTE_MAX_ETHPORTS; 644 } 645 646 if (numa_support) { 647 uint8_t i; 648 649 for (i = 0; i < num_sockets; i++) 650 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 651 socket_ids[i]); 652 } else { 653 if (socket_num == UMA_NO_CONFIG) 654 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0); 655 else 656 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 657 socket_num); 658 } 659 660 init_port_config(); 661 662 gso_types = DEV_TX_OFFLOAD_TCP_TSO | DEV_TX_OFFLOAD_VXLAN_TNL_TSO | 663 DEV_TX_OFFLOAD_GRE_TNL_TSO; 664 /* 665 * Records which Mbuf pool to use by each logical core, if needed. 666 */ 667 for (lc_id = 0; lc_id < nb_lcores; lc_id++) { 668 mbp = mbuf_pool_find( 669 rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id])); 670 671 if (mbp == NULL) 672 mbp = mbuf_pool_find(0); 673 fwd_lcores[lc_id]->mbp = mbp; 674 /* initialize GSO context */ 675 fwd_lcores[lc_id]->gso_ctx.direct_pool = mbp; 676 fwd_lcores[lc_id]->gso_ctx.indirect_pool = mbp; 677 fwd_lcores[lc_id]->gso_ctx.gso_types = gso_types; 678 fwd_lcores[lc_id]->gso_ctx.gso_size = ETHER_MAX_LEN - 679 ETHER_CRC_LEN; 680 fwd_lcores[lc_id]->gso_ctx.flag = 0; 681 } 682 683 /* Configuration of packet forwarding streams. */ 684 if (init_fwd_streams() < 0) 685 rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n"); 686 687 fwd_config_setup(); 688 689 /* create a gro context for each lcore */ 690 gro_param.gro_types = RTE_GRO_TCP_IPV4; 691 gro_param.max_flow_num = GRO_MAX_FLUSH_CYCLES; 692 gro_param.max_item_per_flow = MAX_PKT_BURST; 693 for (lc_id = 0; lc_id < nb_lcores; lc_id++) { 694 gro_param.socket_id = rte_lcore_to_socket_id( 695 fwd_lcores_cpuids[lc_id]); 696 fwd_lcores[lc_id]->gro_ctx = rte_gro_ctx_create(&gro_param); 697 if (fwd_lcores[lc_id]->gro_ctx == NULL) { 698 rte_exit(EXIT_FAILURE, 699 "rte_gro_ctx_create() failed\n"); 700 } 701 } 702 } 703 704 705 void 706 reconfig(portid_t new_port_id, unsigned socket_id) 707 { 708 struct rte_port *port; 709 710 /* Reconfiguration of Ethernet ports. */ 711 port = &ports[new_port_id]; 712 rte_eth_dev_info_get(new_port_id, &port->dev_info); 713 714 /* set flag to initialize port/queue */ 715 port->need_reconfig = 1; 716 port->need_reconfig_queues = 1; 717 port->socket_id = socket_id; 718 719 init_port_config(); 720 } 721 722 723 int 724 init_fwd_streams(void) 725 { 726 portid_t pid; 727 struct rte_port *port; 728 streamid_t sm_id, nb_fwd_streams_new; 729 queueid_t q; 730 731 /* set socket id according to numa or not */ 732 RTE_ETH_FOREACH_DEV(pid) { 733 port = &ports[pid]; 734 if (nb_rxq > port->dev_info.max_rx_queues) { 735 printf("Fail: nb_rxq(%d) is greater than " 736 "max_rx_queues(%d)\n", nb_rxq, 737 port->dev_info.max_rx_queues); 738 return -1; 739 } 740 if (nb_txq > port->dev_info.max_tx_queues) { 741 printf("Fail: nb_txq(%d) is greater than " 742 "max_tx_queues(%d)\n", nb_txq, 743 port->dev_info.max_tx_queues); 744 return -1; 745 } 746 if (numa_support) { 747 if (port_numa[pid] != NUMA_NO_CONFIG) 748 port->socket_id = port_numa[pid]; 749 else { 750 port->socket_id = rte_eth_dev_socket_id(pid); 751 752 /* if socket_id is invalid, set to 0 */ 753 if (check_socket_id(port->socket_id) < 0) 754 port->socket_id = 0; 755 } 756 } 757 else { 758 if (socket_num == UMA_NO_CONFIG) 759 port->socket_id = 0; 760 else 761 port->socket_id = socket_num; 762 } 763 } 764 765 q = RTE_MAX(nb_rxq, nb_txq); 766 if (q == 0) { 767 printf("Fail: Cannot allocate fwd streams as number of queues is 0\n"); 768 return -1; 769 } 770 nb_fwd_streams_new = (streamid_t)(nb_ports * q); 771 if (nb_fwd_streams_new == nb_fwd_streams) 772 return 0; 773 /* clear the old */ 774 if (fwd_streams != NULL) { 775 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) { 776 if (fwd_streams[sm_id] == NULL) 777 continue; 778 rte_free(fwd_streams[sm_id]); 779 fwd_streams[sm_id] = NULL; 780 } 781 rte_free(fwd_streams); 782 fwd_streams = NULL; 783 } 784 785 /* init new */ 786 nb_fwd_streams = nb_fwd_streams_new; 787 fwd_streams = rte_zmalloc("testpmd: fwd_streams", 788 sizeof(struct fwd_stream *) * nb_fwd_streams, RTE_CACHE_LINE_SIZE); 789 if (fwd_streams == NULL) 790 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_stream *)) " 791 "failed\n", nb_fwd_streams); 792 793 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) { 794 fwd_streams[sm_id] = rte_zmalloc("testpmd: struct fwd_stream", 795 sizeof(struct fwd_stream), RTE_CACHE_LINE_SIZE); 796 if (fwd_streams[sm_id] == NULL) 797 rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_stream)" 798 " failed\n"); 799 } 800 801 return 0; 802 } 803 804 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS 805 static void 806 pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs) 807 { 808 unsigned int total_burst; 809 unsigned int nb_burst; 810 unsigned int burst_stats[3]; 811 uint16_t pktnb_stats[3]; 812 uint16_t nb_pkt; 813 int burst_percent[3]; 814 815 /* 816 * First compute the total number of packet bursts and the 817 * two highest numbers of bursts of the same number of packets. 818 */ 819 total_burst = 0; 820 burst_stats[0] = burst_stats[1] = burst_stats[2] = 0; 821 pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0; 822 for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) { 823 nb_burst = pbs->pkt_burst_spread[nb_pkt]; 824 if (nb_burst == 0) 825 continue; 826 total_burst += nb_burst; 827 if (nb_burst > burst_stats[0]) { 828 burst_stats[1] = burst_stats[0]; 829 pktnb_stats[1] = pktnb_stats[0]; 830 burst_stats[0] = nb_burst; 831 pktnb_stats[0] = nb_pkt; 832 } 833 } 834 if (total_burst == 0) 835 return; 836 burst_percent[0] = (burst_stats[0] * 100) / total_burst; 837 printf(" %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst, 838 burst_percent[0], (int) pktnb_stats[0]); 839 if (burst_stats[0] == total_burst) { 840 printf("]\n"); 841 return; 842 } 843 if (burst_stats[0] + burst_stats[1] == total_burst) { 844 printf(" + %d%% of %d pkts]\n", 845 100 - burst_percent[0], pktnb_stats[1]); 846 return; 847 } 848 burst_percent[1] = (burst_stats[1] * 100) / total_burst; 849 burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]); 850 if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) { 851 printf(" + %d%% of others]\n", 100 - burst_percent[0]); 852 return; 853 } 854 printf(" + %d%% of %d pkts + %d%% of others]\n", 855 burst_percent[1], (int) pktnb_stats[1], burst_percent[2]); 856 } 857 #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */ 858 859 static void 860 fwd_port_stats_display(portid_t port_id, struct rte_eth_stats *stats) 861 { 862 struct rte_port *port; 863 uint8_t i; 864 865 static const char *fwd_stats_border = "----------------------"; 866 867 port = &ports[port_id]; 868 printf("\n %s Forward statistics for port %-2d %s\n", 869 fwd_stats_border, port_id, fwd_stats_border); 870 871 if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) { 872 printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: " 873 "%-"PRIu64"\n", 874 stats->ipackets, stats->imissed, 875 (uint64_t) (stats->ipackets + stats->imissed)); 876 877 if (cur_fwd_eng == &csum_fwd_engine) 878 printf(" Bad-ipcsum: %-14"PRIu64" Bad-l4csum: %-14"PRIu64" \n", 879 port->rx_bad_ip_csum, port->rx_bad_l4_csum); 880 if ((stats->ierrors + stats->rx_nombuf) > 0) { 881 printf(" RX-error: %-"PRIu64"\n", stats->ierrors); 882 printf(" RX-nombufs: %-14"PRIu64"\n", stats->rx_nombuf); 883 } 884 885 printf(" TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: " 886 "%-"PRIu64"\n", 887 stats->opackets, port->tx_dropped, 888 (uint64_t) (stats->opackets + port->tx_dropped)); 889 } 890 else { 891 printf(" RX-packets: %14"PRIu64" RX-dropped:%14"PRIu64" RX-total:" 892 "%14"PRIu64"\n", 893 stats->ipackets, stats->imissed, 894 (uint64_t) (stats->ipackets + stats->imissed)); 895 896 if (cur_fwd_eng == &csum_fwd_engine) 897 printf(" Bad-ipcsum:%14"PRIu64" Bad-l4csum:%14"PRIu64"\n", 898 port->rx_bad_ip_csum, port->rx_bad_l4_csum); 899 if ((stats->ierrors + stats->rx_nombuf) > 0) { 900 printf(" RX-error:%"PRIu64"\n", stats->ierrors); 901 printf(" RX-nombufs: %14"PRIu64"\n", 902 stats->rx_nombuf); 903 } 904 905 printf(" TX-packets: %14"PRIu64" TX-dropped:%14"PRIu64" TX-total:" 906 "%14"PRIu64"\n", 907 stats->opackets, port->tx_dropped, 908 (uint64_t) (stats->opackets + port->tx_dropped)); 909 } 910 911 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS 912 if (port->rx_stream) 913 pkt_burst_stats_display("RX", 914 &port->rx_stream->rx_burst_stats); 915 if (port->tx_stream) 916 pkt_burst_stats_display("TX", 917 &port->tx_stream->tx_burst_stats); 918 #endif 919 920 if (port->rx_queue_stats_mapping_enabled) { 921 printf("\n"); 922 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) { 923 printf(" Stats reg %2d RX-packets:%14"PRIu64 924 " RX-errors:%14"PRIu64 925 " RX-bytes:%14"PRIu64"\n", 926 i, stats->q_ipackets[i], stats->q_errors[i], stats->q_ibytes[i]); 927 } 928 printf("\n"); 929 } 930 if (port->tx_queue_stats_mapping_enabled) { 931 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) { 932 printf(" Stats reg %2d TX-packets:%14"PRIu64 933 " TX-bytes:%14"PRIu64"\n", 934 i, stats->q_opackets[i], stats->q_obytes[i]); 935 } 936 } 937 938 printf(" %s--------------------------------%s\n", 939 fwd_stats_border, fwd_stats_border); 940 } 941 942 static void 943 fwd_stream_stats_display(streamid_t stream_id) 944 { 945 struct fwd_stream *fs; 946 static const char *fwd_top_stats_border = "-------"; 947 948 fs = fwd_streams[stream_id]; 949 if ((fs->rx_packets == 0) && (fs->tx_packets == 0) && 950 (fs->fwd_dropped == 0)) 951 return; 952 printf("\n %s Forward Stats for RX Port=%2d/Queue=%2d -> " 953 "TX Port=%2d/Queue=%2d %s\n", 954 fwd_top_stats_border, fs->rx_port, fs->rx_queue, 955 fs->tx_port, fs->tx_queue, fwd_top_stats_border); 956 printf(" RX-packets: %-14u TX-packets: %-14u TX-dropped: %-14u", 957 fs->rx_packets, fs->tx_packets, fs->fwd_dropped); 958 959 /* if checksum mode */ 960 if (cur_fwd_eng == &csum_fwd_engine) { 961 printf(" RX- bad IP checksum: %-14u Rx- bad L4 checksum: " 962 "%-14u\n", fs->rx_bad_ip_csum, fs->rx_bad_l4_csum); 963 } 964 965 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS 966 pkt_burst_stats_display("RX", &fs->rx_burst_stats); 967 pkt_burst_stats_display("TX", &fs->tx_burst_stats); 968 #endif 969 } 970 971 static void 972 flush_fwd_rx_queues(void) 973 { 974 struct rte_mbuf *pkts_burst[MAX_PKT_BURST]; 975 portid_t rxp; 976 portid_t port_id; 977 queueid_t rxq; 978 uint16_t nb_rx; 979 uint16_t i; 980 uint8_t j; 981 uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0; 982 uint64_t timer_period; 983 984 /* convert to number of cycles */ 985 timer_period = rte_get_timer_hz(); /* 1 second timeout */ 986 987 for (j = 0; j < 2; j++) { 988 for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) { 989 for (rxq = 0; rxq < nb_rxq; rxq++) { 990 port_id = fwd_ports_ids[rxp]; 991 /** 992 * testpmd can stuck in the below do while loop 993 * if rte_eth_rx_burst() always returns nonzero 994 * packets. So timer is added to exit this loop 995 * after 1sec timer expiry. 996 */ 997 prev_tsc = rte_rdtsc(); 998 do { 999 nb_rx = rte_eth_rx_burst(port_id, rxq, 1000 pkts_burst, MAX_PKT_BURST); 1001 for (i = 0; i < nb_rx; i++) 1002 rte_pktmbuf_free(pkts_burst[i]); 1003 1004 cur_tsc = rte_rdtsc(); 1005 diff_tsc = cur_tsc - prev_tsc; 1006 timer_tsc += diff_tsc; 1007 } while ((nb_rx > 0) && 1008 (timer_tsc < timer_period)); 1009 timer_tsc = 0; 1010 } 1011 } 1012 rte_delay_ms(10); /* wait 10 milli-seconds before retrying */ 1013 } 1014 } 1015 1016 static void 1017 run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd) 1018 { 1019 struct fwd_stream **fsm; 1020 streamid_t nb_fs; 1021 streamid_t sm_id; 1022 #ifdef RTE_LIBRTE_BITRATE 1023 uint64_t tics_per_1sec; 1024 uint64_t tics_datum; 1025 uint64_t tics_current; 1026 uint8_t idx_port, cnt_ports; 1027 1028 cnt_ports = rte_eth_dev_count(); 1029 tics_datum = rte_rdtsc(); 1030 tics_per_1sec = rte_get_timer_hz(); 1031 #endif 1032 fsm = &fwd_streams[fc->stream_idx]; 1033 nb_fs = fc->stream_nb; 1034 do { 1035 for (sm_id = 0; sm_id < nb_fs; sm_id++) 1036 (*pkt_fwd)(fsm[sm_id]); 1037 #ifdef RTE_LIBRTE_BITRATE 1038 if (bitrate_enabled != 0 && 1039 bitrate_lcore_id == rte_lcore_id()) { 1040 tics_current = rte_rdtsc(); 1041 if (tics_current - tics_datum >= tics_per_1sec) { 1042 /* Periodic bitrate calculation */ 1043 for (idx_port = 0; 1044 idx_port < cnt_ports; 1045 idx_port++) 1046 rte_stats_bitrate_calc(bitrate_data, 1047 idx_port); 1048 tics_datum = tics_current; 1049 } 1050 } 1051 #endif 1052 #ifdef RTE_LIBRTE_LATENCY_STATS 1053 if (latencystats_enabled != 0 && 1054 latencystats_lcore_id == rte_lcore_id()) 1055 rte_latencystats_update(); 1056 #endif 1057 1058 } while (! fc->stopped); 1059 } 1060 1061 static int 1062 start_pkt_forward_on_core(void *fwd_arg) 1063 { 1064 run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg, 1065 cur_fwd_config.fwd_eng->packet_fwd); 1066 return 0; 1067 } 1068 1069 /* 1070 * Run the TXONLY packet forwarding engine to send a single burst of packets. 1071 * Used to start communication flows in network loopback test configurations. 1072 */ 1073 static int 1074 run_one_txonly_burst_on_core(void *fwd_arg) 1075 { 1076 struct fwd_lcore *fwd_lc; 1077 struct fwd_lcore tmp_lcore; 1078 1079 fwd_lc = (struct fwd_lcore *) fwd_arg; 1080 tmp_lcore = *fwd_lc; 1081 tmp_lcore.stopped = 1; 1082 run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd); 1083 return 0; 1084 } 1085 1086 /* 1087 * Launch packet forwarding: 1088 * - Setup per-port forwarding context. 1089 * - launch logical cores with their forwarding configuration. 1090 */ 1091 static void 1092 launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore) 1093 { 1094 port_fwd_begin_t port_fwd_begin; 1095 unsigned int i; 1096 unsigned int lc_id; 1097 int diag; 1098 1099 port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin; 1100 if (port_fwd_begin != NULL) { 1101 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) 1102 (*port_fwd_begin)(fwd_ports_ids[i]); 1103 } 1104 for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) { 1105 lc_id = fwd_lcores_cpuids[i]; 1106 if ((interactive == 0) || (lc_id != rte_lcore_id())) { 1107 fwd_lcores[i]->stopped = 0; 1108 diag = rte_eal_remote_launch(pkt_fwd_on_lcore, 1109 fwd_lcores[i], lc_id); 1110 if (diag != 0) 1111 printf("launch lcore %u failed - diag=%d\n", 1112 lc_id, diag); 1113 } 1114 } 1115 } 1116 1117 /* 1118 * Launch packet forwarding configuration. 1119 */ 1120 void 1121 start_packet_forwarding(int with_tx_first) 1122 { 1123 port_fwd_begin_t port_fwd_begin; 1124 port_fwd_end_t port_fwd_end; 1125 struct rte_port *port; 1126 unsigned int i; 1127 portid_t pt_id; 1128 streamid_t sm_id; 1129 1130 if (strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") == 0 && !nb_rxq) 1131 rte_exit(EXIT_FAILURE, "rxq are 0, cannot use rxonly fwd mode\n"); 1132 1133 if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 && !nb_txq) 1134 rte_exit(EXIT_FAILURE, "txq are 0, cannot use txonly fwd mode\n"); 1135 1136 if ((strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") != 0 && 1137 strcmp(cur_fwd_eng->fwd_mode_name, "txonly") != 0) && 1138 (!nb_rxq || !nb_txq)) 1139 rte_exit(EXIT_FAILURE, 1140 "Either rxq or txq are 0, cannot use %s fwd mode\n", 1141 cur_fwd_eng->fwd_mode_name); 1142 1143 if (all_ports_started() == 0) { 1144 printf("Not all ports were started\n"); 1145 return; 1146 } 1147 if (test_done == 0) { 1148 printf("Packet forwarding already started\n"); 1149 return; 1150 } 1151 1152 if (init_fwd_streams() < 0) { 1153 printf("Fail from init_fwd_streams()\n"); 1154 return; 1155 } 1156 1157 if(dcb_test) { 1158 for (i = 0; i < nb_fwd_ports; i++) { 1159 pt_id = fwd_ports_ids[i]; 1160 port = &ports[pt_id]; 1161 if (!port->dcb_flag) { 1162 printf("In DCB mode, all forwarding ports must " 1163 "be configured in this mode.\n"); 1164 return; 1165 } 1166 } 1167 if (nb_fwd_lcores == 1) { 1168 printf("In DCB mode,the nb forwarding cores " 1169 "should be larger than 1.\n"); 1170 return; 1171 } 1172 } 1173 test_done = 0; 1174 1175 if(!no_flush_rx) 1176 flush_fwd_rx_queues(); 1177 1178 fwd_config_setup(); 1179 pkt_fwd_config_display(&cur_fwd_config); 1180 rxtx_config_display(); 1181 1182 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) { 1183 pt_id = fwd_ports_ids[i]; 1184 port = &ports[pt_id]; 1185 rte_eth_stats_get(pt_id, &port->stats); 1186 port->tx_dropped = 0; 1187 1188 map_port_queue_stats_mapping_registers(pt_id, port); 1189 } 1190 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) { 1191 fwd_streams[sm_id]->rx_packets = 0; 1192 fwd_streams[sm_id]->tx_packets = 0; 1193 fwd_streams[sm_id]->fwd_dropped = 0; 1194 fwd_streams[sm_id]->rx_bad_ip_csum = 0; 1195 fwd_streams[sm_id]->rx_bad_l4_csum = 0; 1196 1197 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS 1198 memset(&fwd_streams[sm_id]->rx_burst_stats, 0, 1199 sizeof(fwd_streams[sm_id]->rx_burst_stats)); 1200 memset(&fwd_streams[sm_id]->tx_burst_stats, 0, 1201 sizeof(fwd_streams[sm_id]->tx_burst_stats)); 1202 #endif 1203 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES 1204 fwd_streams[sm_id]->core_cycles = 0; 1205 #endif 1206 } 1207 if (with_tx_first) { 1208 port_fwd_begin = tx_only_engine.port_fwd_begin; 1209 if (port_fwd_begin != NULL) { 1210 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) 1211 (*port_fwd_begin)(fwd_ports_ids[i]); 1212 } 1213 while (with_tx_first--) { 1214 launch_packet_forwarding( 1215 run_one_txonly_burst_on_core); 1216 rte_eal_mp_wait_lcore(); 1217 } 1218 port_fwd_end = tx_only_engine.port_fwd_end; 1219 if (port_fwd_end != NULL) { 1220 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) 1221 (*port_fwd_end)(fwd_ports_ids[i]); 1222 } 1223 } 1224 launch_packet_forwarding(start_pkt_forward_on_core); 1225 } 1226 1227 void 1228 stop_packet_forwarding(void) 1229 { 1230 struct rte_eth_stats stats; 1231 struct rte_port *port; 1232 port_fwd_end_t port_fwd_end; 1233 int i; 1234 portid_t pt_id; 1235 streamid_t sm_id; 1236 lcoreid_t lc_id; 1237 uint64_t total_recv; 1238 uint64_t total_xmit; 1239 uint64_t total_rx_dropped; 1240 uint64_t total_tx_dropped; 1241 uint64_t total_rx_nombuf; 1242 uint64_t tx_dropped; 1243 uint64_t rx_bad_ip_csum; 1244 uint64_t rx_bad_l4_csum; 1245 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES 1246 uint64_t fwd_cycles; 1247 #endif 1248 1249 static const char *acc_stats_border = "+++++++++++++++"; 1250 1251 if (test_done) { 1252 printf("Packet forwarding not started\n"); 1253 return; 1254 } 1255 printf("Telling cores to stop..."); 1256 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) 1257 fwd_lcores[lc_id]->stopped = 1; 1258 printf("\nWaiting for lcores to finish...\n"); 1259 rte_eal_mp_wait_lcore(); 1260 port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end; 1261 if (port_fwd_end != NULL) { 1262 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) { 1263 pt_id = fwd_ports_ids[i]; 1264 (*port_fwd_end)(pt_id); 1265 } 1266 } 1267 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES 1268 fwd_cycles = 0; 1269 #endif 1270 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) { 1271 if (cur_fwd_config.nb_fwd_streams > 1272 cur_fwd_config.nb_fwd_ports) { 1273 fwd_stream_stats_display(sm_id); 1274 ports[fwd_streams[sm_id]->tx_port].tx_stream = NULL; 1275 ports[fwd_streams[sm_id]->rx_port].rx_stream = NULL; 1276 } else { 1277 ports[fwd_streams[sm_id]->tx_port].tx_stream = 1278 fwd_streams[sm_id]; 1279 ports[fwd_streams[sm_id]->rx_port].rx_stream = 1280 fwd_streams[sm_id]; 1281 } 1282 tx_dropped = ports[fwd_streams[sm_id]->tx_port].tx_dropped; 1283 tx_dropped = (uint64_t) (tx_dropped + 1284 fwd_streams[sm_id]->fwd_dropped); 1285 ports[fwd_streams[sm_id]->tx_port].tx_dropped = tx_dropped; 1286 1287 rx_bad_ip_csum = 1288 ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum; 1289 rx_bad_ip_csum = (uint64_t) (rx_bad_ip_csum + 1290 fwd_streams[sm_id]->rx_bad_ip_csum); 1291 ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum = 1292 rx_bad_ip_csum; 1293 1294 rx_bad_l4_csum = 1295 ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum; 1296 rx_bad_l4_csum = (uint64_t) (rx_bad_l4_csum + 1297 fwd_streams[sm_id]->rx_bad_l4_csum); 1298 ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum = 1299 rx_bad_l4_csum; 1300 1301 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES 1302 fwd_cycles = (uint64_t) (fwd_cycles + 1303 fwd_streams[sm_id]->core_cycles); 1304 #endif 1305 } 1306 total_recv = 0; 1307 total_xmit = 0; 1308 total_rx_dropped = 0; 1309 total_tx_dropped = 0; 1310 total_rx_nombuf = 0; 1311 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) { 1312 pt_id = fwd_ports_ids[i]; 1313 1314 port = &ports[pt_id]; 1315 rte_eth_stats_get(pt_id, &stats); 1316 stats.ipackets -= port->stats.ipackets; 1317 port->stats.ipackets = 0; 1318 stats.opackets -= port->stats.opackets; 1319 port->stats.opackets = 0; 1320 stats.ibytes -= port->stats.ibytes; 1321 port->stats.ibytes = 0; 1322 stats.obytes -= port->stats.obytes; 1323 port->stats.obytes = 0; 1324 stats.imissed -= port->stats.imissed; 1325 port->stats.imissed = 0; 1326 stats.oerrors -= port->stats.oerrors; 1327 port->stats.oerrors = 0; 1328 stats.rx_nombuf -= port->stats.rx_nombuf; 1329 port->stats.rx_nombuf = 0; 1330 1331 total_recv += stats.ipackets; 1332 total_xmit += stats.opackets; 1333 total_rx_dropped += stats.imissed; 1334 total_tx_dropped += port->tx_dropped; 1335 total_rx_nombuf += stats.rx_nombuf; 1336 1337 fwd_port_stats_display(pt_id, &stats); 1338 } 1339 1340 printf("\n %s Accumulated forward statistics for all ports" 1341 "%s\n", 1342 acc_stats_border, acc_stats_border); 1343 printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: " 1344 "%-"PRIu64"\n" 1345 " TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: " 1346 "%-"PRIu64"\n", 1347 total_recv, total_rx_dropped, total_recv + total_rx_dropped, 1348 total_xmit, total_tx_dropped, total_xmit + total_tx_dropped); 1349 if (total_rx_nombuf > 0) 1350 printf(" RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf); 1351 printf(" %s++++++++++++++++++++++++++++++++++++++++++++++" 1352 "%s\n", 1353 acc_stats_border, acc_stats_border); 1354 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES 1355 if (total_recv > 0) 1356 printf("\n CPU cycles/packet=%u (total cycles=" 1357 "%"PRIu64" / total RX packets=%"PRIu64")\n", 1358 (unsigned int)(fwd_cycles / total_recv), 1359 fwd_cycles, total_recv); 1360 #endif 1361 printf("\nDone.\n"); 1362 test_done = 1; 1363 } 1364 1365 void 1366 dev_set_link_up(portid_t pid) 1367 { 1368 if (rte_eth_dev_set_link_up(pid) < 0) 1369 printf("\nSet link up fail.\n"); 1370 } 1371 1372 void 1373 dev_set_link_down(portid_t pid) 1374 { 1375 if (rte_eth_dev_set_link_down(pid) < 0) 1376 printf("\nSet link down fail.\n"); 1377 } 1378 1379 static int 1380 all_ports_started(void) 1381 { 1382 portid_t pi; 1383 struct rte_port *port; 1384 1385 RTE_ETH_FOREACH_DEV(pi) { 1386 port = &ports[pi]; 1387 /* Check if there is a port which is not started */ 1388 if ((port->port_status != RTE_PORT_STARTED) && 1389 (port->slave_flag == 0)) 1390 return 0; 1391 } 1392 1393 /* No port is not started */ 1394 return 1; 1395 } 1396 1397 int 1398 all_ports_stopped(void) 1399 { 1400 portid_t pi; 1401 struct rte_port *port; 1402 1403 RTE_ETH_FOREACH_DEV(pi) { 1404 port = &ports[pi]; 1405 if ((port->port_status != RTE_PORT_STOPPED) && 1406 (port->slave_flag == 0)) 1407 return 0; 1408 } 1409 1410 return 1; 1411 } 1412 1413 int 1414 port_is_started(portid_t port_id) 1415 { 1416 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1417 return 0; 1418 1419 if (ports[port_id].port_status != RTE_PORT_STARTED) 1420 return 0; 1421 1422 return 1; 1423 } 1424 1425 static int 1426 port_is_closed(portid_t port_id) 1427 { 1428 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1429 return 0; 1430 1431 if (ports[port_id].port_status != RTE_PORT_CLOSED) 1432 return 0; 1433 1434 return 1; 1435 } 1436 1437 int 1438 start_port(portid_t pid) 1439 { 1440 int diag, need_check_link_status = -1; 1441 portid_t pi; 1442 queueid_t qi; 1443 struct rte_port *port; 1444 struct ether_addr mac_addr; 1445 enum rte_eth_event_type event_type; 1446 1447 if (port_id_is_invalid(pid, ENABLED_WARN)) 1448 return 0; 1449 1450 if(dcb_config) 1451 dcb_test = 1; 1452 RTE_ETH_FOREACH_DEV(pi) { 1453 if (pid != pi && pid != (portid_t)RTE_PORT_ALL) 1454 continue; 1455 1456 need_check_link_status = 0; 1457 port = &ports[pi]; 1458 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED, 1459 RTE_PORT_HANDLING) == 0) { 1460 printf("Port %d is now not stopped\n", pi); 1461 continue; 1462 } 1463 1464 if (port->need_reconfig > 0) { 1465 port->need_reconfig = 0; 1466 1467 if (flow_isolate_all) { 1468 int ret = port_flow_isolate(pi, 1); 1469 if (ret) { 1470 printf("Failed to apply isolated" 1471 " mode on port %d\n", pi); 1472 return -1; 1473 } 1474 } 1475 1476 printf("Configuring Port %d (socket %u)\n", pi, 1477 port->socket_id); 1478 /* configure port */ 1479 diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq, 1480 &(port->dev_conf)); 1481 if (diag != 0) { 1482 if (rte_atomic16_cmpset(&(port->port_status), 1483 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0) 1484 printf("Port %d can not be set back " 1485 "to stopped\n", pi); 1486 printf("Fail to configure port %d\n", pi); 1487 /* try to reconfigure port next time */ 1488 port->need_reconfig = 1; 1489 return -1; 1490 } 1491 } 1492 if (port->need_reconfig_queues > 0) { 1493 port->need_reconfig_queues = 0; 1494 /* setup tx queues */ 1495 for (qi = 0; qi < nb_txq; qi++) { 1496 if ((numa_support) && 1497 (txring_numa[pi] != NUMA_NO_CONFIG)) 1498 diag = rte_eth_tx_queue_setup(pi, qi, 1499 nb_txd,txring_numa[pi], 1500 &(port->tx_conf)); 1501 else 1502 diag = rte_eth_tx_queue_setup(pi, qi, 1503 nb_txd,port->socket_id, 1504 &(port->tx_conf)); 1505 1506 if (diag == 0) 1507 continue; 1508 1509 /* Fail to setup tx queue, return */ 1510 if (rte_atomic16_cmpset(&(port->port_status), 1511 RTE_PORT_HANDLING, 1512 RTE_PORT_STOPPED) == 0) 1513 printf("Port %d can not be set back " 1514 "to stopped\n", pi); 1515 printf("Fail to configure port %d tx queues\n", pi); 1516 /* try to reconfigure queues next time */ 1517 port->need_reconfig_queues = 1; 1518 return -1; 1519 } 1520 /* setup rx queues */ 1521 for (qi = 0; qi < nb_rxq; qi++) { 1522 if ((numa_support) && 1523 (rxring_numa[pi] != NUMA_NO_CONFIG)) { 1524 struct rte_mempool * mp = 1525 mbuf_pool_find(rxring_numa[pi]); 1526 if (mp == NULL) { 1527 printf("Failed to setup RX queue:" 1528 "No mempool allocation" 1529 " on the socket %d\n", 1530 rxring_numa[pi]); 1531 return -1; 1532 } 1533 1534 diag = rte_eth_rx_queue_setup(pi, qi, 1535 nb_rxd,rxring_numa[pi], 1536 &(port->rx_conf),mp); 1537 } else { 1538 struct rte_mempool *mp = 1539 mbuf_pool_find(port->socket_id); 1540 if (mp == NULL) { 1541 printf("Failed to setup RX queue:" 1542 "No mempool allocation" 1543 " on the socket %d\n", 1544 port->socket_id); 1545 return -1; 1546 } 1547 diag = rte_eth_rx_queue_setup(pi, qi, 1548 nb_rxd,port->socket_id, 1549 &(port->rx_conf), mp); 1550 } 1551 if (diag == 0) 1552 continue; 1553 1554 /* Fail to setup rx queue, return */ 1555 if (rte_atomic16_cmpset(&(port->port_status), 1556 RTE_PORT_HANDLING, 1557 RTE_PORT_STOPPED) == 0) 1558 printf("Port %d can not be set back " 1559 "to stopped\n", pi); 1560 printf("Fail to configure port %d rx queues\n", pi); 1561 /* try to reconfigure queues next time */ 1562 port->need_reconfig_queues = 1; 1563 return -1; 1564 } 1565 } 1566 1567 for (event_type = RTE_ETH_EVENT_UNKNOWN; 1568 event_type < RTE_ETH_EVENT_MAX; 1569 event_type++) { 1570 diag = rte_eth_dev_callback_register(pi, 1571 event_type, 1572 eth_event_callback, 1573 NULL); 1574 if (diag) { 1575 printf("Failed to setup even callback for event %d\n", 1576 event_type); 1577 return -1; 1578 } 1579 } 1580 1581 /* start port */ 1582 if (rte_eth_dev_start(pi) < 0) { 1583 printf("Fail to start port %d\n", pi); 1584 1585 /* Fail to setup rx queue, return */ 1586 if (rte_atomic16_cmpset(&(port->port_status), 1587 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0) 1588 printf("Port %d can not be set back to " 1589 "stopped\n", pi); 1590 continue; 1591 } 1592 1593 if (rte_atomic16_cmpset(&(port->port_status), 1594 RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0) 1595 printf("Port %d can not be set into started\n", pi); 1596 1597 rte_eth_macaddr_get(pi, &mac_addr); 1598 printf("Port %d: %02X:%02X:%02X:%02X:%02X:%02X\n", pi, 1599 mac_addr.addr_bytes[0], mac_addr.addr_bytes[1], 1600 mac_addr.addr_bytes[2], mac_addr.addr_bytes[3], 1601 mac_addr.addr_bytes[4], mac_addr.addr_bytes[5]); 1602 1603 /* at least one port started, need checking link status */ 1604 need_check_link_status = 1; 1605 } 1606 1607 if (need_check_link_status == 1 && !no_link_check) 1608 check_all_ports_link_status(RTE_PORT_ALL); 1609 else if (need_check_link_status == 0) 1610 printf("Please stop the ports first\n"); 1611 1612 printf("Done\n"); 1613 return 0; 1614 } 1615 1616 void 1617 stop_port(portid_t pid) 1618 { 1619 portid_t pi; 1620 struct rte_port *port; 1621 int need_check_link_status = 0; 1622 1623 if (dcb_test) { 1624 dcb_test = 0; 1625 dcb_config = 0; 1626 } 1627 1628 if (port_id_is_invalid(pid, ENABLED_WARN)) 1629 return; 1630 1631 printf("Stopping ports...\n"); 1632 1633 RTE_ETH_FOREACH_DEV(pi) { 1634 if (pid != pi && pid != (portid_t)RTE_PORT_ALL) 1635 continue; 1636 1637 if (port_is_forwarding(pi) != 0 && test_done == 0) { 1638 printf("Please remove port %d from forwarding configuration.\n", pi); 1639 continue; 1640 } 1641 1642 if (port_is_bonding_slave(pi)) { 1643 printf("Please remove port %d from bonded device.\n", pi); 1644 continue; 1645 } 1646 1647 port = &ports[pi]; 1648 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED, 1649 RTE_PORT_HANDLING) == 0) 1650 continue; 1651 1652 rte_eth_dev_stop(pi); 1653 1654 if (rte_atomic16_cmpset(&(port->port_status), 1655 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0) 1656 printf("Port %d can not be set into stopped\n", pi); 1657 need_check_link_status = 1; 1658 } 1659 if (need_check_link_status && !no_link_check) 1660 check_all_ports_link_status(RTE_PORT_ALL); 1661 1662 printf("Done\n"); 1663 } 1664 1665 void 1666 close_port(portid_t pid) 1667 { 1668 portid_t pi; 1669 struct rte_port *port; 1670 1671 if (port_id_is_invalid(pid, ENABLED_WARN)) 1672 return; 1673 1674 printf("Closing ports...\n"); 1675 1676 RTE_ETH_FOREACH_DEV(pi) { 1677 if (pid != pi && pid != (portid_t)RTE_PORT_ALL) 1678 continue; 1679 1680 if (port_is_forwarding(pi) != 0 && test_done == 0) { 1681 printf("Please remove port %d from forwarding configuration.\n", pi); 1682 continue; 1683 } 1684 1685 if (port_is_bonding_slave(pi)) { 1686 printf("Please remove port %d from bonded device.\n", pi); 1687 continue; 1688 } 1689 1690 port = &ports[pi]; 1691 if (rte_atomic16_cmpset(&(port->port_status), 1692 RTE_PORT_CLOSED, RTE_PORT_CLOSED) == 1) { 1693 printf("Port %d is already closed\n", pi); 1694 continue; 1695 } 1696 1697 if (rte_atomic16_cmpset(&(port->port_status), 1698 RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) { 1699 printf("Port %d is now not stopped\n", pi); 1700 continue; 1701 } 1702 1703 if (port->flow_list) 1704 port_flow_flush(pi); 1705 rte_eth_dev_close(pi); 1706 1707 if (rte_atomic16_cmpset(&(port->port_status), 1708 RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0) 1709 printf("Port %d cannot be set to closed\n", pi); 1710 } 1711 1712 printf("Done\n"); 1713 } 1714 1715 void 1716 reset_port(portid_t pid) 1717 { 1718 int diag; 1719 portid_t pi; 1720 struct rte_port *port; 1721 1722 if (port_id_is_invalid(pid, ENABLED_WARN)) 1723 return; 1724 1725 printf("Resetting ports...\n"); 1726 1727 RTE_ETH_FOREACH_DEV(pi) { 1728 if (pid != pi && pid != (portid_t)RTE_PORT_ALL) 1729 continue; 1730 1731 if (port_is_forwarding(pi) != 0 && test_done == 0) { 1732 printf("Please remove port %d from forwarding " 1733 "configuration.\n", pi); 1734 continue; 1735 } 1736 1737 if (port_is_bonding_slave(pi)) { 1738 printf("Please remove port %d from bonded device.\n", 1739 pi); 1740 continue; 1741 } 1742 1743 diag = rte_eth_dev_reset(pi); 1744 if (diag == 0) { 1745 port = &ports[pi]; 1746 port->need_reconfig = 1; 1747 port->need_reconfig_queues = 1; 1748 } else { 1749 printf("Failed to reset port %d. diag=%d\n", pi, diag); 1750 } 1751 } 1752 1753 printf("Done\n"); 1754 } 1755 1756 void 1757 attach_port(char *identifier) 1758 { 1759 portid_t pi = 0; 1760 unsigned int socket_id; 1761 1762 printf("Attaching a new port...\n"); 1763 1764 if (identifier == NULL) { 1765 printf("Invalid parameters are specified\n"); 1766 return; 1767 } 1768 1769 if (rte_eth_dev_attach(identifier, &pi)) 1770 return; 1771 1772 socket_id = (unsigned)rte_eth_dev_socket_id(pi); 1773 /* if socket_id is invalid, set to 0 */ 1774 if (check_socket_id(socket_id) < 0) 1775 socket_id = 0; 1776 reconfig(pi, socket_id); 1777 rte_eth_promiscuous_enable(pi); 1778 1779 nb_ports = rte_eth_dev_count(); 1780 1781 ports[pi].port_status = RTE_PORT_STOPPED; 1782 1783 printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports); 1784 printf("Done\n"); 1785 } 1786 1787 void 1788 detach_port(portid_t port_id) 1789 { 1790 char name[RTE_ETH_NAME_MAX_LEN]; 1791 1792 printf("Detaching a port...\n"); 1793 1794 if (!port_is_closed(port_id)) { 1795 printf("Please close port first\n"); 1796 return; 1797 } 1798 1799 if (ports[port_id].flow_list) 1800 port_flow_flush(port_id); 1801 1802 if (rte_eth_dev_detach(port_id, name)) { 1803 RTE_LOG(ERR, USER1, "Failed to detach port '%s'\n", name); 1804 return; 1805 } 1806 1807 nb_ports = rte_eth_dev_count(); 1808 1809 printf("Port '%s' is detached. Now total ports is %d\n", 1810 name, nb_ports); 1811 printf("Done\n"); 1812 return; 1813 } 1814 1815 void 1816 pmd_test_exit(void) 1817 { 1818 portid_t pt_id; 1819 1820 if (test_done == 0) 1821 stop_packet_forwarding(); 1822 1823 if (ports != NULL) { 1824 no_link_check = 1; 1825 RTE_ETH_FOREACH_DEV(pt_id) { 1826 printf("\nShutting down port %d...\n", pt_id); 1827 fflush(stdout); 1828 stop_port(pt_id); 1829 close_port(pt_id); 1830 } 1831 } 1832 printf("\nBye...\n"); 1833 } 1834 1835 typedef void (*cmd_func_t)(void); 1836 struct pmd_test_command { 1837 const char *cmd_name; 1838 cmd_func_t cmd_func; 1839 }; 1840 1841 #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0])) 1842 1843 /* Check the link status of all ports in up to 9s, and print them finally */ 1844 static void 1845 check_all_ports_link_status(uint32_t port_mask) 1846 { 1847 #define CHECK_INTERVAL 100 /* 100ms */ 1848 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */ 1849 portid_t portid; 1850 uint8_t count, all_ports_up, print_flag = 0; 1851 struct rte_eth_link link; 1852 1853 printf("Checking link statuses...\n"); 1854 fflush(stdout); 1855 for (count = 0; count <= MAX_CHECK_TIME; count++) { 1856 all_ports_up = 1; 1857 RTE_ETH_FOREACH_DEV(portid) { 1858 if ((port_mask & (1 << portid)) == 0) 1859 continue; 1860 memset(&link, 0, sizeof(link)); 1861 rte_eth_link_get_nowait(portid, &link); 1862 /* print link status if flag set */ 1863 if (print_flag == 1) { 1864 if (link.link_status) 1865 printf( 1866 "Port%d Link Up. speed %u Mbps- %s\n", 1867 portid, link.link_speed, 1868 (link.link_duplex == ETH_LINK_FULL_DUPLEX) ? 1869 ("full-duplex") : ("half-duplex\n")); 1870 else 1871 printf("Port %d Link Down\n", portid); 1872 continue; 1873 } 1874 /* clear all_ports_up flag if any link down */ 1875 if (link.link_status == ETH_LINK_DOWN) { 1876 all_ports_up = 0; 1877 break; 1878 } 1879 } 1880 /* after finally printing all link status, get out */ 1881 if (print_flag == 1) 1882 break; 1883 1884 if (all_ports_up == 0) { 1885 fflush(stdout); 1886 rte_delay_ms(CHECK_INTERVAL); 1887 } 1888 1889 /* set the print_flag if all ports up or timeout */ 1890 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) { 1891 print_flag = 1; 1892 } 1893 1894 if (lsc_interrupt) 1895 break; 1896 } 1897 } 1898 1899 static void 1900 rmv_event_callback(void *arg) 1901 { 1902 struct rte_eth_dev *dev; 1903 portid_t port_id = (intptr_t)arg; 1904 1905 RTE_ETH_VALID_PORTID_OR_RET(port_id); 1906 dev = &rte_eth_devices[port_id]; 1907 1908 stop_port(port_id); 1909 close_port(port_id); 1910 printf("removing device %s\n", dev->device->name); 1911 if (rte_eal_dev_detach(dev->device)) 1912 RTE_LOG(ERR, USER1, "Failed to detach device %s\n", 1913 dev->device->name); 1914 } 1915 1916 /* This function is used by the interrupt thread */ 1917 static int 1918 eth_event_callback(portid_t port_id, enum rte_eth_event_type type, void *param, 1919 void *ret_param) 1920 { 1921 static const char * const event_desc[] = { 1922 [RTE_ETH_EVENT_UNKNOWN] = "Unknown", 1923 [RTE_ETH_EVENT_INTR_LSC] = "LSC", 1924 [RTE_ETH_EVENT_QUEUE_STATE] = "Queue state", 1925 [RTE_ETH_EVENT_INTR_RESET] = "Interrupt reset", 1926 [RTE_ETH_EVENT_VF_MBOX] = "VF Mbox", 1927 [RTE_ETH_EVENT_MACSEC] = "MACsec", 1928 [RTE_ETH_EVENT_INTR_RMV] = "device removal", 1929 [RTE_ETH_EVENT_MAX] = NULL, 1930 }; 1931 1932 RTE_SET_USED(param); 1933 RTE_SET_USED(ret_param); 1934 1935 if (type >= RTE_ETH_EVENT_MAX) { 1936 fprintf(stderr, "\nPort %" PRIu8 ": %s called upon invalid event %d\n", 1937 port_id, __func__, type); 1938 fflush(stderr); 1939 } else if (event_print_mask & (UINT32_C(1) << type)) { 1940 printf("\nPort %" PRIu8 ": %s event\n", port_id, 1941 event_desc[type]); 1942 fflush(stdout); 1943 } 1944 1945 switch (type) { 1946 case RTE_ETH_EVENT_INTR_RMV: 1947 if (rte_eal_alarm_set(100000, 1948 rmv_event_callback, (void *)(intptr_t)port_id)) 1949 fprintf(stderr, "Could not set up deferred device removal\n"); 1950 break; 1951 default: 1952 break; 1953 } 1954 return 0; 1955 } 1956 1957 static int 1958 set_tx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port) 1959 { 1960 uint16_t i; 1961 int diag; 1962 uint8_t mapping_found = 0; 1963 1964 for (i = 0; i < nb_tx_queue_stats_mappings; i++) { 1965 if ((tx_queue_stats_mappings[i].port_id == port_id) && 1966 (tx_queue_stats_mappings[i].queue_id < nb_txq )) { 1967 diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id, 1968 tx_queue_stats_mappings[i].queue_id, 1969 tx_queue_stats_mappings[i].stats_counter_id); 1970 if (diag != 0) 1971 return diag; 1972 mapping_found = 1; 1973 } 1974 } 1975 if (mapping_found) 1976 port->tx_queue_stats_mapping_enabled = 1; 1977 return 0; 1978 } 1979 1980 static int 1981 set_rx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port) 1982 { 1983 uint16_t i; 1984 int diag; 1985 uint8_t mapping_found = 0; 1986 1987 for (i = 0; i < nb_rx_queue_stats_mappings; i++) { 1988 if ((rx_queue_stats_mappings[i].port_id == port_id) && 1989 (rx_queue_stats_mappings[i].queue_id < nb_rxq )) { 1990 diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id, 1991 rx_queue_stats_mappings[i].queue_id, 1992 rx_queue_stats_mappings[i].stats_counter_id); 1993 if (diag != 0) 1994 return diag; 1995 mapping_found = 1; 1996 } 1997 } 1998 if (mapping_found) 1999 port->rx_queue_stats_mapping_enabled = 1; 2000 return 0; 2001 } 2002 2003 static void 2004 map_port_queue_stats_mapping_registers(portid_t pi, struct rte_port *port) 2005 { 2006 int diag = 0; 2007 2008 diag = set_tx_queue_stats_mapping_registers(pi, port); 2009 if (diag != 0) { 2010 if (diag == -ENOTSUP) { 2011 port->tx_queue_stats_mapping_enabled = 0; 2012 printf("TX queue stats mapping not supported port id=%d\n", pi); 2013 } 2014 else 2015 rte_exit(EXIT_FAILURE, 2016 "set_tx_queue_stats_mapping_registers " 2017 "failed for port id=%d diag=%d\n", 2018 pi, diag); 2019 } 2020 2021 diag = set_rx_queue_stats_mapping_registers(pi, port); 2022 if (diag != 0) { 2023 if (diag == -ENOTSUP) { 2024 port->rx_queue_stats_mapping_enabled = 0; 2025 printf("RX queue stats mapping not supported port id=%d\n", pi); 2026 } 2027 else 2028 rte_exit(EXIT_FAILURE, 2029 "set_rx_queue_stats_mapping_registers " 2030 "failed for port id=%d diag=%d\n", 2031 pi, diag); 2032 } 2033 } 2034 2035 static void 2036 rxtx_port_config(struct rte_port *port) 2037 { 2038 port->rx_conf = port->dev_info.default_rxconf; 2039 port->tx_conf = port->dev_info.default_txconf; 2040 2041 /* Check if any RX/TX parameters have been passed */ 2042 if (rx_pthresh != RTE_PMD_PARAM_UNSET) 2043 port->rx_conf.rx_thresh.pthresh = rx_pthresh; 2044 2045 if (rx_hthresh != RTE_PMD_PARAM_UNSET) 2046 port->rx_conf.rx_thresh.hthresh = rx_hthresh; 2047 2048 if (rx_wthresh != RTE_PMD_PARAM_UNSET) 2049 port->rx_conf.rx_thresh.wthresh = rx_wthresh; 2050 2051 if (rx_free_thresh != RTE_PMD_PARAM_UNSET) 2052 port->rx_conf.rx_free_thresh = rx_free_thresh; 2053 2054 if (rx_drop_en != RTE_PMD_PARAM_UNSET) 2055 port->rx_conf.rx_drop_en = rx_drop_en; 2056 2057 if (tx_pthresh != RTE_PMD_PARAM_UNSET) 2058 port->tx_conf.tx_thresh.pthresh = tx_pthresh; 2059 2060 if (tx_hthresh != RTE_PMD_PARAM_UNSET) 2061 port->tx_conf.tx_thresh.hthresh = tx_hthresh; 2062 2063 if (tx_wthresh != RTE_PMD_PARAM_UNSET) 2064 port->tx_conf.tx_thresh.wthresh = tx_wthresh; 2065 2066 if (tx_rs_thresh != RTE_PMD_PARAM_UNSET) 2067 port->tx_conf.tx_rs_thresh = tx_rs_thresh; 2068 2069 if (tx_free_thresh != RTE_PMD_PARAM_UNSET) 2070 port->tx_conf.tx_free_thresh = tx_free_thresh; 2071 2072 if (txq_flags != RTE_PMD_PARAM_UNSET) 2073 port->tx_conf.txq_flags = txq_flags; 2074 } 2075 2076 void 2077 init_port_config(void) 2078 { 2079 portid_t pid; 2080 struct rte_port *port; 2081 2082 RTE_ETH_FOREACH_DEV(pid) { 2083 port = &ports[pid]; 2084 port->dev_conf.rxmode = rx_mode; 2085 port->dev_conf.fdir_conf = fdir_conf; 2086 if (nb_rxq > 1) { 2087 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL; 2088 port->dev_conf.rx_adv_conf.rss_conf.rss_hf = rss_hf; 2089 } else { 2090 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL; 2091 port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0; 2092 } 2093 2094 if (port->dcb_flag == 0) { 2095 if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0) 2096 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_RSS; 2097 else 2098 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE; 2099 } 2100 2101 rxtx_port_config(port); 2102 2103 rte_eth_macaddr_get(pid, &port->eth_addr); 2104 2105 map_port_queue_stats_mapping_registers(pid, port); 2106 #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS 2107 rte_pmd_ixgbe_bypass_init(pid); 2108 #endif 2109 2110 if (lsc_interrupt && 2111 (rte_eth_devices[pid].data->dev_flags & 2112 RTE_ETH_DEV_INTR_LSC)) 2113 port->dev_conf.intr_conf.lsc = 1; 2114 if (rmv_interrupt && 2115 (rte_eth_devices[pid].data->dev_flags & 2116 RTE_ETH_DEV_INTR_RMV)) 2117 port->dev_conf.intr_conf.rmv = 1; 2118 2119 #if defined RTE_LIBRTE_PMD_SOFTNIC && defined RTE_LIBRTE_SCHED 2120 /* Detect softnic port */ 2121 if (!strcmp(port->dev_info.driver_name, "net_softnic")) { 2122 port->softnic_enable = 1; 2123 memset(&port->softport, 0, sizeof(struct softnic_port)); 2124 2125 if (!strcmp(cur_fwd_eng->fwd_mode_name, "tm")) 2126 port->softport.tm_flag = 1; 2127 } 2128 #endif 2129 } 2130 } 2131 2132 void set_port_slave_flag(portid_t slave_pid) 2133 { 2134 struct rte_port *port; 2135 2136 port = &ports[slave_pid]; 2137 port->slave_flag = 1; 2138 } 2139 2140 void clear_port_slave_flag(portid_t slave_pid) 2141 { 2142 struct rte_port *port; 2143 2144 port = &ports[slave_pid]; 2145 port->slave_flag = 0; 2146 } 2147 2148 uint8_t port_is_bonding_slave(portid_t slave_pid) 2149 { 2150 struct rte_port *port; 2151 2152 port = &ports[slave_pid]; 2153 return port->slave_flag; 2154 } 2155 2156 const uint16_t vlan_tags[] = { 2157 0, 1, 2, 3, 4, 5, 6, 7, 2158 8, 9, 10, 11, 12, 13, 14, 15, 2159 16, 17, 18, 19, 20, 21, 22, 23, 2160 24, 25, 26, 27, 28, 29, 30, 31 2161 }; 2162 2163 static int 2164 get_eth_dcb_conf(struct rte_eth_conf *eth_conf, 2165 enum dcb_mode_enable dcb_mode, 2166 enum rte_eth_nb_tcs num_tcs, 2167 uint8_t pfc_en) 2168 { 2169 uint8_t i; 2170 2171 /* 2172 * Builds up the correct configuration for dcb+vt based on the vlan tags array 2173 * given above, and the number of traffic classes available for use. 2174 */ 2175 if (dcb_mode == DCB_VT_ENABLED) { 2176 struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf = 2177 ð_conf->rx_adv_conf.vmdq_dcb_conf; 2178 struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf = 2179 ð_conf->tx_adv_conf.vmdq_dcb_tx_conf; 2180 2181 /* VMDQ+DCB RX and TX configurations */ 2182 vmdq_rx_conf->enable_default_pool = 0; 2183 vmdq_rx_conf->default_pool = 0; 2184 vmdq_rx_conf->nb_queue_pools = 2185 (num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS); 2186 vmdq_tx_conf->nb_queue_pools = 2187 (num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS); 2188 2189 vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools; 2190 for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) { 2191 vmdq_rx_conf->pool_map[i].vlan_id = vlan_tags[i]; 2192 vmdq_rx_conf->pool_map[i].pools = 2193 1 << (i % vmdq_rx_conf->nb_queue_pools); 2194 } 2195 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) { 2196 vmdq_rx_conf->dcb_tc[i] = i; 2197 vmdq_tx_conf->dcb_tc[i] = i; 2198 } 2199 2200 /* set DCB mode of RX and TX of multiple queues */ 2201 eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB; 2202 eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB; 2203 } else { 2204 struct rte_eth_dcb_rx_conf *rx_conf = 2205 ð_conf->rx_adv_conf.dcb_rx_conf; 2206 struct rte_eth_dcb_tx_conf *tx_conf = 2207 ð_conf->tx_adv_conf.dcb_tx_conf; 2208 2209 rx_conf->nb_tcs = num_tcs; 2210 tx_conf->nb_tcs = num_tcs; 2211 2212 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) { 2213 rx_conf->dcb_tc[i] = i % num_tcs; 2214 tx_conf->dcb_tc[i] = i % num_tcs; 2215 } 2216 eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB_RSS; 2217 eth_conf->rx_adv_conf.rss_conf.rss_hf = rss_hf; 2218 eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB; 2219 } 2220 2221 if (pfc_en) 2222 eth_conf->dcb_capability_en = 2223 ETH_DCB_PG_SUPPORT | ETH_DCB_PFC_SUPPORT; 2224 else 2225 eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT; 2226 2227 return 0; 2228 } 2229 2230 int 2231 init_port_dcb_config(portid_t pid, 2232 enum dcb_mode_enable dcb_mode, 2233 enum rte_eth_nb_tcs num_tcs, 2234 uint8_t pfc_en) 2235 { 2236 struct rte_eth_conf port_conf; 2237 struct rte_port *rte_port; 2238 int retval; 2239 uint16_t i; 2240 2241 rte_port = &ports[pid]; 2242 2243 memset(&port_conf, 0, sizeof(struct rte_eth_conf)); 2244 /* Enter DCB configuration status */ 2245 dcb_config = 1; 2246 2247 /*set configuration of DCB in vt mode and DCB in non-vt mode*/ 2248 retval = get_eth_dcb_conf(&port_conf, dcb_mode, num_tcs, pfc_en); 2249 if (retval < 0) 2250 return retval; 2251 port_conf.rxmode.hw_vlan_filter = 1; 2252 2253 /** 2254 * Write the configuration into the device. 2255 * Set the numbers of RX & TX queues to 0, so 2256 * the RX & TX queues will not be setup. 2257 */ 2258 rte_eth_dev_configure(pid, 0, 0, &port_conf); 2259 2260 rte_eth_dev_info_get(pid, &rte_port->dev_info); 2261 2262 /* If dev_info.vmdq_pool_base is greater than 0, 2263 * the queue id of vmdq pools is started after pf queues. 2264 */ 2265 if (dcb_mode == DCB_VT_ENABLED && 2266 rte_port->dev_info.vmdq_pool_base > 0) { 2267 printf("VMDQ_DCB multi-queue mode is nonsensical" 2268 " for port %d.", pid); 2269 return -1; 2270 } 2271 2272 /* Assume the ports in testpmd have the same dcb capability 2273 * and has the same number of rxq and txq in dcb mode 2274 */ 2275 if (dcb_mode == DCB_VT_ENABLED) { 2276 if (rte_port->dev_info.max_vfs > 0) { 2277 nb_rxq = rte_port->dev_info.nb_rx_queues; 2278 nb_txq = rte_port->dev_info.nb_tx_queues; 2279 } else { 2280 nb_rxq = rte_port->dev_info.max_rx_queues; 2281 nb_txq = rte_port->dev_info.max_tx_queues; 2282 } 2283 } else { 2284 /*if vt is disabled, use all pf queues */ 2285 if (rte_port->dev_info.vmdq_pool_base == 0) { 2286 nb_rxq = rte_port->dev_info.max_rx_queues; 2287 nb_txq = rte_port->dev_info.max_tx_queues; 2288 } else { 2289 nb_rxq = (queueid_t)num_tcs; 2290 nb_txq = (queueid_t)num_tcs; 2291 2292 } 2293 } 2294 rx_free_thresh = 64; 2295 2296 memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf)); 2297 2298 rxtx_port_config(rte_port); 2299 /* VLAN filter */ 2300 rte_port->dev_conf.rxmode.hw_vlan_filter = 1; 2301 for (i = 0; i < RTE_DIM(vlan_tags); i++) 2302 rx_vft_set(pid, vlan_tags[i], 1); 2303 2304 rte_eth_macaddr_get(pid, &rte_port->eth_addr); 2305 map_port_queue_stats_mapping_registers(pid, rte_port); 2306 2307 rte_port->dcb_flag = 1; 2308 2309 return 0; 2310 } 2311 2312 static void 2313 init_port(void) 2314 { 2315 /* Configuration of Ethernet ports. */ 2316 ports = rte_zmalloc("testpmd: ports", 2317 sizeof(struct rte_port) * RTE_MAX_ETHPORTS, 2318 RTE_CACHE_LINE_SIZE); 2319 if (ports == NULL) { 2320 rte_exit(EXIT_FAILURE, 2321 "rte_zmalloc(%d struct rte_port) failed\n", 2322 RTE_MAX_ETHPORTS); 2323 } 2324 } 2325 2326 static void 2327 force_quit(void) 2328 { 2329 pmd_test_exit(); 2330 prompt_exit(); 2331 } 2332 2333 static void 2334 print_stats(void) 2335 { 2336 uint8_t i; 2337 const char clr[] = { 27, '[', '2', 'J', '\0' }; 2338 const char top_left[] = { 27, '[', '1', ';', '1', 'H', '\0' }; 2339 2340 /* Clear screen and move to top left */ 2341 printf("%s%s", clr, top_left); 2342 2343 printf("\nPort statistics ===================================="); 2344 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) 2345 nic_stats_display(fwd_ports_ids[i]); 2346 } 2347 2348 static void 2349 signal_handler(int signum) 2350 { 2351 if (signum == SIGINT || signum == SIGTERM) { 2352 printf("\nSignal %d received, preparing to exit...\n", 2353 signum); 2354 #ifdef RTE_LIBRTE_PDUMP 2355 /* uninitialize packet capture framework */ 2356 rte_pdump_uninit(); 2357 #endif 2358 #ifdef RTE_LIBRTE_LATENCY_STATS 2359 rte_latencystats_uninit(); 2360 #endif 2361 force_quit(); 2362 /* Set flag to indicate the force termination. */ 2363 f_quit = 1; 2364 /* exit with the expected status */ 2365 signal(signum, SIG_DFL); 2366 kill(getpid(), signum); 2367 } 2368 } 2369 2370 int 2371 main(int argc, char** argv) 2372 { 2373 int diag; 2374 portid_t port_id; 2375 2376 signal(SIGINT, signal_handler); 2377 signal(SIGTERM, signal_handler); 2378 2379 diag = rte_eal_init(argc, argv); 2380 if (diag < 0) 2381 rte_panic("Cannot init EAL\n"); 2382 2383 if (mlockall(MCL_CURRENT | MCL_FUTURE)) { 2384 RTE_LOG(NOTICE, USER1, "mlockall() failed with error \"%s\"\n", 2385 strerror(errno)); 2386 } 2387 2388 #ifdef RTE_LIBRTE_PDUMP 2389 /* initialize packet capture framework */ 2390 rte_pdump_init(NULL); 2391 #endif 2392 2393 nb_ports = (portid_t) rte_eth_dev_count(); 2394 if (nb_ports == 0) 2395 RTE_LOG(WARNING, EAL, "No probed ethernet devices\n"); 2396 2397 /* allocate port structures, and init them */ 2398 init_port(); 2399 2400 set_def_fwd_config(); 2401 if (nb_lcores == 0) 2402 rte_panic("Empty set of forwarding logical cores - check the " 2403 "core mask supplied in the command parameters\n"); 2404 2405 /* Bitrate/latency stats disabled by default */ 2406 #ifdef RTE_LIBRTE_BITRATE 2407 bitrate_enabled = 0; 2408 #endif 2409 #ifdef RTE_LIBRTE_LATENCY_STATS 2410 latencystats_enabled = 0; 2411 #endif 2412 2413 argc -= diag; 2414 argv += diag; 2415 if (argc > 1) 2416 launch_args_parse(argc, argv); 2417 2418 if (tx_first && interactive) 2419 rte_exit(EXIT_FAILURE, "--tx-first cannot be used on " 2420 "interactive mode.\n"); 2421 2422 if (tx_first && lsc_interrupt) { 2423 printf("Warning: lsc_interrupt needs to be off when " 2424 " using tx_first. Disabling.\n"); 2425 lsc_interrupt = 0; 2426 } 2427 2428 if (!nb_rxq && !nb_txq) 2429 printf("Warning: Either rx or tx queues should be non-zero\n"); 2430 2431 if (nb_rxq > 1 && nb_rxq > nb_txq) 2432 printf("Warning: nb_rxq=%d enables RSS configuration, " 2433 "but nb_txq=%d will prevent to fully test it.\n", 2434 nb_rxq, nb_txq); 2435 2436 init_config(); 2437 if (start_port(RTE_PORT_ALL) != 0) 2438 rte_exit(EXIT_FAILURE, "Start ports failed\n"); 2439 2440 /* set all ports to promiscuous mode by default */ 2441 RTE_ETH_FOREACH_DEV(port_id) 2442 rte_eth_promiscuous_enable(port_id); 2443 2444 /* Init metrics library */ 2445 rte_metrics_init(rte_socket_id()); 2446 2447 #ifdef RTE_LIBRTE_LATENCY_STATS 2448 if (latencystats_enabled != 0) { 2449 int ret = rte_latencystats_init(1, NULL); 2450 if (ret) 2451 printf("Warning: latencystats init()" 2452 " returned error %d\n", ret); 2453 printf("Latencystats running on lcore %d\n", 2454 latencystats_lcore_id); 2455 } 2456 #endif 2457 2458 /* Setup bitrate stats */ 2459 #ifdef RTE_LIBRTE_BITRATE 2460 if (bitrate_enabled != 0) { 2461 bitrate_data = rte_stats_bitrate_create(); 2462 if (bitrate_data == NULL) 2463 rte_exit(EXIT_FAILURE, 2464 "Could not allocate bitrate data.\n"); 2465 rte_stats_bitrate_reg(bitrate_data); 2466 } 2467 #endif 2468 2469 #ifdef RTE_LIBRTE_CMDLINE 2470 if (strlen(cmdline_filename) != 0) 2471 cmdline_read_from_file(cmdline_filename); 2472 2473 if (interactive == 1) { 2474 if (auto_start) { 2475 printf("Start automatic packet forwarding\n"); 2476 start_packet_forwarding(0); 2477 } 2478 prompt(); 2479 pmd_test_exit(); 2480 } else 2481 #endif 2482 { 2483 char c; 2484 int rc; 2485 2486 f_quit = 0; 2487 2488 printf("No commandline core given, start packet forwarding\n"); 2489 start_packet_forwarding(tx_first); 2490 if (stats_period != 0) { 2491 uint64_t prev_time = 0, cur_time, diff_time = 0; 2492 uint64_t timer_period; 2493 2494 /* Convert to number of cycles */ 2495 timer_period = stats_period * rte_get_timer_hz(); 2496 2497 while (f_quit == 0) { 2498 cur_time = rte_get_timer_cycles(); 2499 diff_time += cur_time - prev_time; 2500 2501 if (diff_time >= timer_period) { 2502 print_stats(); 2503 /* Reset the timer */ 2504 diff_time = 0; 2505 } 2506 /* Sleep to avoid unnecessary checks */ 2507 prev_time = cur_time; 2508 sleep(1); 2509 } 2510 } 2511 2512 printf("Press enter to exit\n"); 2513 rc = read(0, &c, 1); 2514 pmd_test_exit(); 2515 if (rc < 0) 2516 return 1; 2517 } 2518 2519 return 0; 2520 } 2521