1 /*- 2 * BSD LICENSE 3 * 4 * Copyright(c) 2010-2017 Intel Corporation. All rights reserved. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of Intel Corporation nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include <stdarg.h> 35 #include <stdio.h> 36 #include <stdlib.h> 37 #include <signal.h> 38 #include <string.h> 39 #include <time.h> 40 #include <fcntl.h> 41 #include <sys/mman.h> 42 #include <sys/types.h> 43 #include <errno.h> 44 45 #include <sys/queue.h> 46 #include <sys/stat.h> 47 48 #include <stdint.h> 49 #include <unistd.h> 50 #include <inttypes.h> 51 52 #include <rte_common.h> 53 #include <rte_errno.h> 54 #include <rte_byteorder.h> 55 #include <rte_log.h> 56 #include <rte_debug.h> 57 #include <rte_cycles.h> 58 #include <rte_memory.h> 59 #include <rte_memcpy.h> 60 #include <rte_launch.h> 61 #include <rte_eal.h> 62 #include <rte_alarm.h> 63 #include <rte_per_lcore.h> 64 #include <rte_lcore.h> 65 #include <rte_atomic.h> 66 #include <rte_branch_prediction.h> 67 #include <rte_mempool.h> 68 #include <rte_malloc.h> 69 #include <rte_mbuf.h> 70 #include <rte_interrupts.h> 71 #include <rte_pci.h> 72 #include <rte_ether.h> 73 #include <rte_ethdev.h> 74 #include <rte_dev.h> 75 #include <rte_string_fns.h> 76 #ifdef RTE_LIBRTE_IXGBE_PMD 77 #include <rte_pmd_ixgbe.h> 78 #endif 79 #ifdef RTE_LIBRTE_PDUMP 80 #include <rte_pdump.h> 81 #endif 82 #include <rte_flow.h> 83 #include <rte_metrics.h> 84 #ifdef RTE_LIBRTE_BITRATE 85 #include <rte_bitrate.h> 86 #endif 87 #ifdef RTE_LIBRTE_LATENCY_STATS 88 #include <rte_latencystats.h> 89 #endif 90 91 #include "testpmd.h" 92 93 uint16_t verbose_level = 0; /**< Silent by default. */ 94 95 /* use master core for command line ? */ 96 uint8_t interactive = 0; 97 uint8_t auto_start = 0; 98 uint8_t tx_first; 99 char cmdline_filename[PATH_MAX] = {0}; 100 101 /* 102 * NUMA support configuration. 103 * When set, the NUMA support attempts to dispatch the allocation of the 104 * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the 105 * probed ports among the CPU sockets 0 and 1. 106 * Otherwise, all memory is allocated from CPU socket 0. 107 */ 108 uint8_t numa_support = 1; /**< numa enabled by default */ 109 110 /* 111 * In UMA mode,all memory is allocated from socket 0 if --socket-num is 112 * not configured. 113 */ 114 uint8_t socket_num = UMA_NO_CONFIG; 115 116 /* 117 * Use ANONYMOUS mapped memory (might be not physically continuous) for mbufs. 118 */ 119 uint8_t mp_anon = 0; 120 121 /* 122 * Record the Ethernet address of peer target ports to which packets are 123 * forwarded. 124 * Must be instantiated with the ethernet addresses of peer traffic generator 125 * ports. 126 */ 127 struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS]; 128 portid_t nb_peer_eth_addrs = 0; 129 130 /* 131 * Probed Target Environment. 132 */ 133 struct rte_port *ports; /**< For all probed ethernet ports. */ 134 portid_t nb_ports; /**< Number of probed ethernet ports. */ 135 struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */ 136 lcoreid_t nb_lcores; /**< Number of probed logical cores. */ 137 138 /* 139 * Test Forwarding Configuration. 140 * nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores 141 * nb_fwd_ports <= nb_cfg_ports <= nb_ports 142 */ 143 lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */ 144 lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */ 145 portid_t nb_cfg_ports; /**< Number of configured ports. */ 146 portid_t nb_fwd_ports; /**< Number of forwarding ports. */ 147 148 unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */ 149 portid_t fwd_ports_ids[RTE_MAX_ETHPORTS]; /**< Port ids configuration. */ 150 151 struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */ 152 streamid_t nb_fwd_streams; /**< Is equal to (nb_ports * nb_rxq). */ 153 154 /* 155 * Forwarding engines. 156 */ 157 struct fwd_engine * fwd_engines[] = { 158 &io_fwd_engine, 159 &mac_fwd_engine, 160 &mac_swap_engine, 161 &flow_gen_engine, 162 &rx_only_engine, 163 &tx_only_engine, 164 &csum_fwd_engine, 165 &icmp_echo_engine, 166 #if defined RTE_LIBRTE_PMD_SOFTNIC && defined RTE_LIBRTE_SCHED 167 &softnic_tm_engine, 168 &softnic_tm_bypass_engine, 169 #endif 170 #ifdef RTE_LIBRTE_IEEE1588 171 &ieee1588_fwd_engine, 172 #endif 173 NULL, 174 }; 175 176 struct fwd_config cur_fwd_config; 177 struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */ 178 uint32_t retry_enabled; 179 uint32_t burst_tx_delay_time = BURST_TX_WAIT_US; 180 uint32_t burst_tx_retry_num = BURST_TX_RETRIES; 181 182 uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */ 183 uint32_t param_total_num_mbufs = 0; /**< number of mbufs in all pools - if 184 * specified on command-line. */ 185 uint16_t stats_period; /**< Period to show statistics (disabled by default) */ 186 187 /* 188 * In container, it cannot terminate the process which running with 'stats-period' 189 * option. Set flag to exit stats period loop after received SIGINT/SIGTERM. 190 */ 191 uint8_t f_quit; 192 193 /* 194 * Configuration of packet segments used by the "txonly" processing engine. 195 */ 196 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */ 197 uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = { 198 TXONLY_DEF_PACKET_LEN, 199 }; 200 uint8_t tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */ 201 202 enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF; 203 /**< Split policy for packets to TX. */ 204 205 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */ 206 uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */ 207 208 /* current configuration is in DCB or not,0 means it is not in DCB mode */ 209 uint8_t dcb_config = 0; 210 211 /* Whether the dcb is in testing status */ 212 uint8_t dcb_test = 0; 213 214 /* 215 * Configurable number of RX/TX queues. 216 */ 217 queueid_t nb_rxq = 1; /**< Number of RX queues per port. */ 218 queueid_t nb_txq = 1; /**< Number of TX queues per port. */ 219 220 /* 221 * Configurable number of RX/TX ring descriptors. 222 */ 223 #define RTE_TEST_RX_DESC_DEFAULT 128 224 #define RTE_TEST_TX_DESC_DEFAULT 512 225 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */ 226 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */ 227 228 #define RTE_PMD_PARAM_UNSET -1 229 /* 230 * Configurable values of RX and TX ring threshold registers. 231 */ 232 233 int8_t rx_pthresh = RTE_PMD_PARAM_UNSET; 234 int8_t rx_hthresh = RTE_PMD_PARAM_UNSET; 235 int8_t rx_wthresh = RTE_PMD_PARAM_UNSET; 236 237 int8_t tx_pthresh = RTE_PMD_PARAM_UNSET; 238 int8_t tx_hthresh = RTE_PMD_PARAM_UNSET; 239 int8_t tx_wthresh = RTE_PMD_PARAM_UNSET; 240 241 /* 242 * Configurable value of RX free threshold. 243 */ 244 int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET; 245 246 /* 247 * Configurable value of RX drop enable. 248 */ 249 int8_t rx_drop_en = RTE_PMD_PARAM_UNSET; 250 251 /* 252 * Configurable value of TX free threshold. 253 */ 254 int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET; 255 256 /* 257 * Configurable value of TX RS bit threshold. 258 */ 259 int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET; 260 261 /* 262 * Configurable value of TX queue flags. 263 */ 264 int32_t txq_flags = RTE_PMD_PARAM_UNSET; 265 266 /* 267 * Receive Side Scaling (RSS) configuration. 268 */ 269 uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */ 270 271 /* 272 * Port topology configuration 273 */ 274 uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */ 275 276 /* 277 * Avoids to flush all the RX streams before starts forwarding. 278 */ 279 uint8_t no_flush_rx = 0; /* flush by default */ 280 281 /* 282 * Flow API isolated mode. 283 */ 284 uint8_t flow_isolate_all; 285 286 /* 287 * Avoids to check link status when starting/stopping a port. 288 */ 289 uint8_t no_link_check = 0; /* check by default */ 290 291 /* 292 * Enable link status change notification 293 */ 294 uint8_t lsc_interrupt = 1; /* enabled by default */ 295 296 /* 297 * Enable device removal notification. 298 */ 299 uint8_t rmv_interrupt = 1; /* enabled by default */ 300 301 /* 302 * Display or mask ether events 303 * Default to all events except VF_MBOX 304 */ 305 uint32_t event_print_mask = (UINT32_C(1) << RTE_ETH_EVENT_UNKNOWN) | 306 (UINT32_C(1) << RTE_ETH_EVENT_INTR_LSC) | 307 (UINT32_C(1) << RTE_ETH_EVENT_QUEUE_STATE) | 308 (UINT32_C(1) << RTE_ETH_EVENT_INTR_RESET) | 309 (UINT32_C(1) << RTE_ETH_EVENT_MACSEC) | 310 (UINT32_C(1) << RTE_ETH_EVENT_INTR_RMV); 311 312 /* 313 * NIC bypass mode configuration options. 314 */ 315 316 #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS 317 /* The NIC bypass watchdog timeout. */ 318 uint32_t bypass_timeout = RTE_PMD_IXGBE_BYPASS_TMT_OFF; 319 #endif 320 321 322 #ifdef RTE_LIBRTE_LATENCY_STATS 323 324 /* 325 * Set when latency stats is enabled in the commandline 326 */ 327 uint8_t latencystats_enabled; 328 329 /* 330 * Lcore ID to serive latency statistics. 331 */ 332 lcoreid_t latencystats_lcore_id = -1; 333 334 #endif 335 336 /* 337 * Ethernet device configuration. 338 */ 339 struct rte_eth_rxmode rx_mode = { 340 .max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */ 341 .split_hdr_size = 0, 342 .header_split = 0, /**< Header Split disabled. */ 343 .hw_ip_checksum = 0, /**< IP checksum offload disabled. */ 344 .hw_vlan_filter = 1, /**< VLAN filtering enabled. */ 345 .hw_vlan_strip = 1, /**< VLAN strip enabled. */ 346 .hw_vlan_extend = 0, /**< Extended VLAN disabled. */ 347 .jumbo_frame = 0, /**< Jumbo Frame Support disabled. */ 348 .hw_strip_crc = 1, /**< CRC stripping by hardware enabled. */ 349 .hw_timestamp = 0, /**< HW timestamp enabled. */ 350 }; 351 352 struct rte_fdir_conf fdir_conf = { 353 .mode = RTE_FDIR_MODE_NONE, 354 .pballoc = RTE_FDIR_PBALLOC_64K, 355 .status = RTE_FDIR_REPORT_STATUS, 356 .mask = { 357 .vlan_tci_mask = 0x0, 358 .ipv4_mask = { 359 .src_ip = 0xFFFFFFFF, 360 .dst_ip = 0xFFFFFFFF, 361 }, 362 .ipv6_mask = { 363 .src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF}, 364 .dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF}, 365 }, 366 .src_port_mask = 0xFFFF, 367 .dst_port_mask = 0xFFFF, 368 .mac_addr_byte_mask = 0xFF, 369 .tunnel_type_mask = 1, 370 .tunnel_id_mask = 0xFFFFFFFF, 371 }, 372 .drop_queue = 127, 373 }; 374 375 volatile int test_done = 1; /* stop packet forwarding when set to 1. */ 376 377 struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS]; 378 struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS]; 379 380 struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array; 381 struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array; 382 383 uint16_t nb_tx_queue_stats_mappings = 0; 384 uint16_t nb_rx_queue_stats_mappings = 0; 385 386 /* 387 * Display zero values by default for xstats 388 */ 389 uint8_t xstats_hide_zero; 390 391 unsigned int num_sockets = 0; 392 unsigned int socket_ids[RTE_MAX_NUMA_NODES]; 393 394 #ifdef RTE_LIBRTE_BITRATE 395 /* Bitrate statistics */ 396 struct rte_stats_bitrates *bitrate_data; 397 lcoreid_t bitrate_lcore_id; 398 uint8_t bitrate_enabled; 399 #endif 400 401 struct gro_status gro_ports[RTE_MAX_ETHPORTS]; 402 uint8_t gro_flush_cycles = GRO_DEFAULT_FLUSH_CYCLES; 403 404 /* Forward function declarations */ 405 static void map_port_queue_stats_mapping_registers(portid_t pi, 406 struct rte_port *port); 407 static void check_all_ports_link_status(uint32_t port_mask); 408 static int eth_event_callback(portid_t port_id, 409 enum rte_eth_event_type type, 410 void *param, void *ret_param); 411 412 /* 413 * Check if all the ports are started. 414 * If yes, return positive value. If not, return zero. 415 */ 416 static int all_ports_started(void); 417 418 struct gso_status gso_ports[RTE_MAX_ETHPORTS]; 419 uint16_t gso_max_segment_size = ETHER_MAX_LEN - ETHER_CRC_LEN; 420 421 /* 422 * Helper function to check if socket is already discovered. 423 * If yes, return positive value. If not, return zero. 424 */ 425 int 426 new_socket_id(unsigned int socket_id) 427 { 428 unsigned int i; 429 430 for (i = 0; i < num_sockets; i++) { 431 if (socket_ids[i] == socket_id) 432 return 0; 433 } 434 return 1; 435 } 436 437 /* 438 * Setup default configuration. 439 */ 440 static void 441 set_default_fwd_lcores_config(void) 442 { 443 unsigned int i; 444 unsigned int nb_lc; 445 unsigned int sock_num; 446 447 nb_lc = 0; 448 for (i = 0; i < RTE_MAX_LCORE; i++) { 449 sock_num = rte_lcore_to_socket_id(i); 450 if (new_socket_id(sock_num)) { 451 if (num_sockets >= RTE_MAX_NUMA_NODES) { 452 rte_exit(EXIT_FAILURE, 453 "Total sockets greater than %u\n", 454 RTE_MAX_NUMA_NODES); 455 } 456 socket_ids[num_sockets++] = sock_num; 457 } 458 if (!rte_lcore_is_enabled(i)) 459 continue; 460 if (i == rte_get_master_lcore()) 461 continue; 462 fwd_lcores_cpuids[nb_lc++] = i; 463 } 464 nb_lcores = (lcoreid_t) nb_lc; 465 nb_cfg_lcores = nb_lcores; 466 nb_fwd_lcores = 1; 467 } 468 469 static void 470 set_def_peer_eth_addrs(void) 471 { 472 portid_t i; 473 474 for (i = 0; i < RTE_MAX_ETHPORTS; i++) { 475 peer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR; 476 peer_eth_addrs[i].addr_bytes[5] = i; 477 } 478 } 479 480 static void 481 set_default_fwd_ports_config(void) 482 { 483 portid_t pt_id; 484 int i = 0; 485 486 RTE_ETH_FOREACH_DEV(pt_id) 487 fwd_ports_ids[i++] = pt_id; 488 489 nb_cfg_ports = nb_ports; 490 nb_fwd_ports = nb_ports; 491 } 492 493 void 494 set_def_fwd_config(void) 495 { 496 set_default_fwd_lcores_config(); 497 set_def_peer_eth_addrs(); 498 set_default_fwd_ports_config(); 499 } 500 501 /* 502 * Configuration initialisation done once at init time. 503 */ 504 static void 505 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf, 506 unsigned int socket_id) 507 { 508 char pool_name[RTE_MEMPOOL_NAMESIZE]; 509 struct rte_mempool *rte_mp = NULL; 510 uint32_t mb_size; 511 512 mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size; 513 mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name)); 514 515 RTE_LOG(INFO, USER1, 516 "create a new mbuf pool <%s>: n=%u, size=%u, socket=%u\n", 517 pool_name, nb_mbuf, mbuf_seg_size, socket_id); 518 519 if (mp_anon != 0) { 520 rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf, 521 mb_size, (unsigned) mb_mempool_cache, 522 sizeof(struct rte_pktmbuf_pool_private), 523 socket_id, 0); 524 if (rte_mp == NULL) 525 goto err; 526 527 if (rte_mempool_populate_anon(rte_mp) == 0) { 528 rte_mempool_free(rte_mp); 529 rte_mp = NULL; 530 goto err; 531 } 532 rte_pktmbuf_pool_init(rte_mp, NULL); 533 rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL); 534 } else { 535 /* wrapper to rte_mempool_create() */ 536 rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf, 537 mb_mempool_cache, 0, mbuf_seg_size, socket_id); 538 } 539 540 err: 541 if (rte_mp == NULL) { 542 rte_exit(EXIT_FAILURE, 543 "Creation of mbuf pool for socket %u failed: %s\n", 544 socket_id, rte_strerror(rte_errno)); 545 } else if (verbose_level > 0) { 546 rte_mempool_dump(stdout, rte_mp); 547 } 548 } 549 550 /* 551 * Check given socket id is valid or not with NUMA mode, 552 * if valid, return 0, else return -1 553 */ 554 static int 555 check_socket_id(const unsigned int socket_id) 556 { 557 static int warning_once = 0; 558 559 if (new_socket_id(socket_id)) { 560 if (!warning_once && numa_support) 561 printf("Warning: NUMA should be configured manually by" 562 " using --port-numa-config and" 563 " --ring-numa-config parameters along with" 564 " --numa.\n"); 565 warning_once = 1; 566 return -1; 567 } 568 return 0; 569 } 570 571 static void 572 init_config(void) 573 { 574 portid_t pid; 575 struct rte_port *port; 576 struct rte_mempool *mbp; 577 unsigned int nb_mbuf_per_pool; 578 lcoreid_t lc_id; 579 uint8_t port_per_socket[RTE_MAX_NUMA_NODES]; 580 struct rte_gro_param gro_param; 581 uint32_t gso_types; 582 583 memset(port_per_socket,0,RTE_MAX_NUMA_NODES); 584 585 if (numa_support) { 586 memset(port_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS); 587 memset(rxring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS); 588 memset(txring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS); 589 } 590 591 /* Configuration of logical cores. */ 592 fwd_lcores = rte_zmalloc("testpmd: fwd_lcores", 593 sizeof(struct fwd_lcore *) * nb_lcores, 594 RTE_CACHE_LINE_SIZE); 595 if (fwd_lcores == NULL) { 596 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) " 597 "failed\n", nb_lcores); 598 } 599 for (lc_id = 0; lc_id < nb_lcores; lc_id++) { 600 fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore", 601 sizeof(struct fwd_lcore), 602 RTE_CACHE_LINE_SIZE); 603 if (fwd_lcores[lc_id] == NULL) { 604 rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) " 605 "failed\n"); 606 } 607 fwd_lcores[lc_id]->cpuid_idx = lc_id; 608 } 609 610 RTE_ETH_FOREACH_DEV(pid) { 611 port = &ports[pid]; 612 rte_eth_dev_info_get(pid, &port->dev_info); 613 614 if (numa_support) { 615 if (port_numa[pid] != NUMA_NO_CONFIG) 616 port_per_socket[port_numa[pid]]++; 617 else { 618 uint32_t socket_id = rte_eth_dev_socket_id(pid); 619 620 /* if socket_id is invalid, set to 0 */ 621 if (check_socket_id(socket_id) < 0) 622 socket_id = 0; 623 port_per_socket[socket_id]++; 624 } 625 } 626 627 /* set flag to initialize port/queue */ 628 port->need_reconfig = 1; 629 port->need_reconfig_queues = 1; 630 } 631 632 /* 633 * Create pools of mbuf. 634 * If NUMA support is disabled, create a single pool of mbuf in 635 * socket 0 memory by default. 636 * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1. 637 * 638 * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and 639 * nb_txd can be configured at run time. 640 */ 641 if (param_total_num_mbufs) 642 nb_mbuf_per_pool = param_total_num_mbufs; 643 else { 644 nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX + 645 (nb_lcores * mb_mempool_cache) + 646 RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST; 647 nb_mbuf_per_pool *= RTE_MAX_ETHPORTS; 648 } 649 650 if (numa_support) { 651 uint8_t i; 652 653 for (i = 0; i < num_sockets; i++) 654 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 655 socket_ids[i]); 656 } else { 657 if (socket_num == UMA_NO_CONFIG) 658 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0); 659 else 660 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 661 socket_num); 662 } 663 664 init_port_config(); 665 666 gso_types = DEV_TX_OFFLOAD_TCP_TSO | DEV_TX_OFFLOAD_VXLAN_TNL_TSO | 667 DEV_TX_OFFLOAD_GRE_TNL_TSO; 668 /* 669 * Records which Mbuf pool to use by each logical core, if needed. 670 */ 671 for (lc_id = 0; lc_id < nb_lcores; lc_id++) { 672 mbp = mbuf_pool_find( 673 rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id])); 674 675 if (mbp == NULL) 676 mbp = mbuf_pool_find(0); 677 fwd_lcores[lc_id]->mbp = mbp; 678 /* initialize GSO context */ 679 fwd_lcores[lc_id]->gso_ctx.direct_pool = mbp; 680 fwd_lcores[lc_id]->gso_ctx.indirect_pool = mbp; 681 fwd_lcores[lc_id]->gso_ctx.gso_types = gso_types; 682 fwd_lcores[lc_id]->gso_ctx.gso_size = ETHER_MAX_LEN - 683 ETHER_CRC_LEN; 684 fwd_lcores[lc_id]->gso_ctx.flag = 0; 685 } 686 687 /* Configuration of packet forwarding streams. */ 688 if (init_fwd_streams() < 0) 689 rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n"); 690 691 fwd_config_setup(); 692 693 /* create a gro context for each lcore */ 694 gro_param.gro_types = RTE_GRO_TCP_IPV4; 695 gro_param.max_flow_num = GRO_MAX_FLUSH_CYCLES; 696 gro_param.max_item_per_flow = MAX_PKT_BURST; 697 for (lc_id = 0; lc_id < nb_lcores; lc_id++) { 698 gro_param.socket_id = rte_lcore_to_socket_id( 699 fwd_lcores_cpuids[lc_id]); 700 fwd_lcores[lc_id]->gro_ctx = rte_gro_ctx_create(&gro_param); 701 if (fwd_lcores[lc_id]->gro_ctx == NULL) { 702 rte_exit(EXIT_FAILURE, 703 "rte_gro_ctx_create() failed\n"); 704 } 705 } 706 } 707 708 709 void 710 reconfig(portid_t new_port_id, unsigned socket_id) 711 { 712 struct rte_port *port; 713 714 /* Reconfiguration of Ethernet ports. */ 715 port = &ports[new_port_id]; 716 rte_eth_dev_info_get(new_port_id, &port->dev_info); 717 718 /* set flag to initialize port/queue */ 719 port->need_reconfig = 1; 720 port->need_reconfig_queues = 1; 721 port->socket_id = socket_id; 722 723 init_port_config(); 724 } 725 726 727 int 728 init_fwd_streams(void) 729 { 730 portid_t pid; 731 struct rte_port *port; 732 streamid_t sm_id, nb_fwd_streams_new; 733 queueid_t q; 734 735 /* set socket id according to numa or not */ 736 RTE_ETH_FOREACH_DEV(pid) { 737 port = &ports[pid]; 738 if (nb_rxq > port->dev_info.max_rx_queues) { 739 printf("Fail: nb_rxq(%d) is greater than " 740 "max_rx_queues(%d)\n", nb_rxq, 741 port->dev_info.max_rx_queues); 742 return -1; 743 } 744 if (nb_txq > port->dev_info.max_tx_queues) { 745 printf("Fail: nb_txq(%d) is greater than " 746 "max_tx_queues(%d)\n", nb_txq, 747 port->dev_info.max_tx_queues); 748 return -1; 749 } 750 if (numa_support) { 751 if (port_numa[pid] != NUMA_NO_CONFIG) 752 port->socket_id = port_numa[pid]; 753 else { 754 port->socket_id = rte_eth_dev_socket_id(pid); 755 756 /* if socket_id is invalid, set to 0 */ 757 if (check_socket_id(port->socket_id) < 0) 758 port->socket_id = 0; 759 } 760 } 761 else { 762 if (socket_num == UMA_NO_CONFIG) 763 port->socket_id = 0; 764 else 765 port->socket_id = socket_num; 766 } 767 } 768 769 q = RTE_MAX(nb_rxq, nb_txq); 770 if (q == 0) { 771 printf("Fail: Cannot allocate fwd streams as number of queues is 0\n"); 772 return -1; 773 } 774 nb_fwd_streams_new = (streamid_t)(nb_ports * q); 775 if (nb_fwd_streams_new == nb_fwd_streams) 776 return 0; 777 /* clear the old */ 778 if (fwd_streams != NULL) { 779 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) { 780 if (fwd_streams[sm_id] == NULL) 781 continue; 782 rte_free(fwd_streams[sm_id]); 783 fwd_streams[sm_id] = NULL; 784 } 785 rte_free(fwd_streams); 786 fwd_streams = NULL; 787 } 788 789 /* init new */ 790 nb_fwd_streams = nb_fwd_streams_new; 791 fwd_streams = rte_zmalloc("testpmd: fwd_streams", 792 sizeof(struct fwd_stream *) * nb_fwd_streams, RTE_CACHE_LINE_SIZE); 793 if (fwd_streams == NULL) 794 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_stream *)) " 795 "failed\n", nb_fwd_streams); 796 797 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) { 798 fwd_streams[sm_id] = rte_zmalloc("testpmd: struct fwd_stream", 799 sizeof(struct fwd_stream), RTE_CACHE_LINE_SIZE); 800 if (fwd_streams[sm_id] == NULL) 801 rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_stream)" 802 " failed\n"); 803 } 804 805 return 0; 806 } 807 808 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS 809 static void 810 pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs) 811 { 812 unsigned int total_burst; 813 unsigned int nb_burst; 814 unsigned int burst_stats[3]; 815 uint16_t pktnb_stats[3]; 816 uint16_t nb_pkt; 817 int burst_percent[3]; 818 819 /* 820 * First compute the total number of packet bursts and the 821 * two highest numbers of bursts of the same number of packets. 822 */ 823 total_burst = 0; 824 burst_stats[0] = burst_stats[1] = burst_stats[2] = 0; 825 pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0; 826 for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) { 827 nb_burst = pbs->pkt_burst_spread[nb_pkt]; 828 if (nb_burst == 0) 829 continue; 830 total_burst += nb_burst; 831 if (nb_burst > burst_stats[0]) { 832 burst_stats[1] = burst_stats[0]; 833 pktnb_stats[1] = pktnb_stats[0]; 834 burst_stats[0] = nb_burst; 835 pktnb_stats[0] = nb_pkt; 836 } 837 } 838 if (total_burst == 0) 839 return; 840 burst_percent[0] = (burst_stats[0] * 100) / total_burst; 841 printf(" %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst, 842 burst_percent[0], (int) pktnb_stats[0]); 843 if (burst_stats[0] == total_burst) { 844 printf("]\n"); 845 return; 846 } 847 if (burst_stats[0] + burst_stats[1] == total_burst) { 848 printf(" + %d%% of %d pkts]\n", 849 100 - burst_percent[0], pktnb_stats[1]); 850 return; 851 } 852 burst_percent[1] = (burst_stats[1] * 100) / total_burst; 853 burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]); 854 if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) { 855 printf(" + %d%% of others]\n", 100 - burst_percent[0]); 856 return; 857 } 858 printf(" + %d%% of %d pkts + %d%% of others]\n", 859 burst_percent[1], (int) pktnb_stats[1], burst_percent[2]); 860 } 861 #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */ 862 863 static void 864 fwd_port_stats_display(portid_t port_id, struct rte_eth_stats *stats) 865 { 866 struct rte_port *port; 867 uint8_t i; 868 869 static const char *fwd_stats_border = "----------------------"; 870 871 port = &ports[port_id]; 872 printf("\n %s Forward statistics for port %-2d %s\n", 873 fwd_stats_border, port_id, fwd_stats_border); 874 875 if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) { 876 printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: " 877 "%-"PRIu64"\n", 878 stats->ipackets, stats->imissed, 879 (uint64_t) (stats->ipackets + stats->imissed)); 880 881 if (cur_fwd_eng == &csum_fwd_engine) 882 printf(" Bad-ipcsum: %-14"PRIu64" Bad-l4csum: %-14"PRIu64" \n", 883 port->rx_bad_ip_csum, port->rx_bad_l4_csum); 884 if ((stats->ierrors + stats->rx_nombuf) > 0) { 885 printf(" RX-error: %-"PRIu64"\n", stats->ierrors); 886 printf(" RX-nombufs: %-14"PRIu64"\n", stats->rx_nombuf); 887 } 888 889 printf(" TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: " 890 "%-"PRIu64"\n", 891 stats->opackets, port->tx_dropped, 892 (uint64_t) (stats->opackets + port->tx_dropped)); 893 } 894 else { 895 printf(" RX-packets: %14"PRIu64" RX-dropped:%14"PRIu64" RX-total:" 896 "%14"PRIu64"\n", 897 stats->ipackets, stats->imissed, 898 (uint64_t) (stats->ipackets + stats->imissed)); 899 900 if (cur_fwd_eng == &csum_fwd_engine) 901 printf(" Bad-ipcsum:%14"PRIu64" Bad-l4csum:%14"PRIu64"\n", 902 port->rx_bad_ip_csum, port->rx_bad_l4_csum); 903 if ((stats->ierrors + stats->rx_nombuf) > 0) { 904 printf(" RX-error:%"PRIu64"\n", stats->ierrors); 905 printf(" RX-nombufs: %14"PRIu64"\n", 906 stats->rx_nombuf); 907 } 908 909 printf(" TX-packets: %14"PRIu64" TX-dropped:%14"PRIu64" TX-total:" 910 "%14"PRIu64"\n", 911 stats->opackets, port->tx_dropped, 912 (uint64_t) (stats->opackets + port->tx_dropped)); 913 } 914 915 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS 916 if (port->rx_stream) 917 pkt_burst_stats_display("RX", 918 &port->rx_stream->rx_burst_stats); 919 if (port->tx_stream) 920 pkt_burst_stats_display("TX", 921 &port->tx_stream->tx_burst_stats); 922 #endif 923 924 if (port->rx_queue_stats_mapping_enabled) { 925 printf("\n"); 926 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) { 927 printf(" Stats reg %2d RX-packets:%14"PRIu64 928 " RX-errors:%14"PRIu64 929 " RX-bytes:%14"PRIu64"\n", 930 i, stats->q_ipackets[i], stats->q_errors[i], stats->q_ibytes[i]); 931 } 932 printf("\n"); 933 } 934 if (port->tx_queue_stats_mapping_enabled) { 935 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) { 936 printf(" Stats reg %2d TX-packets:%14"PRIu64 937 " TX-bytes:%14"PRIu64"\n", 938 i, stats->q_opackets[i], stats->q_obytes[i]); 939 } 940 } 941 942 printf(" %s--------------------------------%s\n", 943 fwd_stats_border, fwd_stats_border); 944 } 945 946 static void 947 fwd_stream_stats_display(streamid_t stream_id) 948 { 949 struct fwd_stream *fs; 950 static const char *fwd_top_stats_border = "-------"; 951 952 fs = fwd_streams[stream_id]; 953 if ((fs->rx_packets == 0) && (fs->tx_packets == 0) && 954 (fs->fwd_dropped == 0)) 955 return; 956 printf("\n %s Forward Stats for RX Port=%2d/Queue=%2d -> " 957 "TX Port=%2d/Queue=%2d %s\n", 958 fwd_top_stats_border, fs->rx_port, fs->rx_queue, 959 fs->tx_port, fs->tx_queue, fwd_top_stats_border); 960 printf(" RX-packets: %-14u TX-packets: %-14u TX-dropped: %-14u", 961 fs->rx_packets, fs->tx_packets, fs->fwd_dropped); 962 963 /* if checksum mode */ 964 if (cur_fwd_eng == &csum_fwd_engine) { 965 printf(" RX- bad IP checksum: %-14u Rx- bad L4 checksum: " 966 "%-14u\n", fs->rx_bad_ip_csum, fs->rx_bad_l4_csum); 967 } 968 969 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS 970 pkt_burst_stats_display("RX", &fs->rx_burst_stats); 971 pkt_burst_stats_display("TX", &fs->tx_burst_stats); 972 #endif 973 } 974 975 static void 976 flush_fwd_rx_queues(void) 977 { 978 struct rte_mbuf *pkts_burst[MAX_PKT_BURST]; 979 portid_t rxp; 980 portid_t port_id; 981 queueid_t rxq; 982 uint16_t nb_rx; 983 uint16_t i; 984 uint8_t j; 985 uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0; 986 uint64_t timer_period; 987 988 /* convert to number of cycles */ 989 timer_period = rte_get_timer_hz(); /* 1 second timeout */ 990 991 for (j = 0; j < 2; j++) { 992 for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) { 993 for (rxq = 0; rxq < nb_rxq; rxq++) { 994 port_id = fwd_ports_ids[rxp]; 995 /** 996 * testpmd can stuck in the below do while loop 997 * if rte_eth_rx_burst() always returns nonzero 998 * packets. So timer is added to exit this loop 999 * after 1sec timer expiry. 1000 */ 1001 prev_tsc = rte_rdtsc(); 1002 do { 1003 nb_rx = rte_eth_rx_burst(port_id, rxq, 1004 pkts_burst, MAX_PKT_BURST); 1005 for (i = 0; i < nb_rx; i++) 1006 rte_pktmbuf_free(pkts_burst[i]); 1007 1008 cur_tsc = rte_rdtsc(); 1009 diff_tsc = cur_tsc - prev_tsc; 1010 timer_tsc += diff_tsc; 1011 } while ((nb_rx > 0) && 1012 (timer_tsc < timer_period)); 1013 timer_tsc = 0; 1014 } 1015 } 1016 rte_delay_ms(10); /* wait 10 milli-seconds before retrying */ 1017 } 1018 } 1019 1020 static void 1021 run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd) 1022 { 1023 struct fwd_stream **fsm; 1024 streamid_t nb_fs; 1025 streamid_t sm_id; 1026 #ifdef RTE_LIBRTE_BITRATE 1027 uint64_t tics_per_1sec; 1028 uint64_t tics_datum; 1029 uint64_t tics_current; 1030 uint8_t idx_port, cnt_ports; 1031 1032 cnt_ports = rte_eth_dev_count(); 1033 tics_datum = rte_rdtsc(); 1034 tics_per_1sec = rte_get_timer_hz(); 1035 #endif 1036 fsm = &fwd_streams[fc->stream_idx]; 1037 nb_fs = fc->stream_nb; 1038 do { 1039 for (sm_id = 0; sm_id < nb_fs; sm_id++) 1040 (*pkt_fwd)(fsm[sm_id]); 1041 #ifdef RTE_LIBRTE_BITRATE 1042 if (bitrate_enabled != 0 && 1043 bitrate_lcore_id == rte_lcore_id()) { 1044 tics_current = rte_rdtsc(); 1045 if (tics_current - tics_datum >= tics_per_1sec) { 1046 /* Periodic bitrate calculation */ 1047 for (idx_port = 0; 1048 idx_port < cnt_ports; 1049 idx_port++) 1050 rte_stats_bitrate_calc(bitrate_data, 1051 idx_port); 1052 tics_datum = tics_current; 1053 } 1054 } 1055 #endif 1056 #ifdef RTE_LIBRTE_LATENCY_STATS 1057 if (latencystats_enabled != 0 && 1058 latencystats_lcore_id == rte_lcore_id()) 1059 rte_latencystats_update(); 1060 #endif 1061 1062 } while (! fc->stopped); 1063 } 1064 1065 static int 1066 start_pkt_forward_on_core(void *fwd_arg) 1067 { 1068 run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg, 1069 cur_fwd_config.fwd_eng->packet_fwd); 1070 return 0; 1071 } 1072 1073 /* 1074 * Run the TXONLY packet forwarding engine to send a single burst of packets. 1075 * Used to start communication flows in network loopback test configurations. 1076 */ 1077 static int 1078 run_one_txonly_burst_on_core(void *fwd_arg) 1079 { 1080 struct fwd_lcore *fwd_lc; 1081 struct fwd_lcore tmp_lcore; 1082 1083 fwd_lc = (struct fwd_lcore *) fwd_arg; 1084 tmp_lcore = *fwd_lc; 1085 tmp_lcore.stopped = 1; 1086 run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd); 1087 return 0; 1088 } 1089 1090 /* 1091 * Launch packet forwarding: 1092 * - Setup per-port forwarding context. 1093 * - launch logical cores with their forwarding configuration. 1094 */ 1095 static void 1096 launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore) 1097 { 1098 port_fwd_begin_t port_fwd_begin; 1099 unsigned int i; 1100 unsigned int lc_id; 1101 int diag; 1102 1103 port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin; 1104 if (port_fwd_begin != NULL) { 1105 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) 1106 (*port_fwd_begin)(fwd_ports_ids[i]); 1107 } 1108 for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) { 1109 lc_id = fwd_lcores_cpuids[i]; 1110 if ((interactive == 0) || (lc_id != rte_lcore_id())) { 1111 fwd_lcores[i]->stopped = 0; 1112 diag = rte_eal_remote_launch(pkt_fwd_on_lcore, 1113 fwd_lcores[i], lc_id); 1114 if (diag != 0) 1115 printf("launch lcore %u failed - diag=%d\n", 1116 lc_id, diag); 1117 } 1118 } 1119 } 1120 1121 /* 1122 * Launch packet forwarding configuration. 1123 */ 1124 void 1125 start_packet_forwarding(int with_tx_first) 1126 { 1127 port_fwd_begin_t port_fwd_begin; 1128 port_fwd_end_t port_fwd_end; 1129 struct rte_port *port; 1130 unsigned int i; 1131 portid_t pt_id; 1132 streamid_t sm_id; 1133 1134 if (strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") == 0 && !nb_rxq) 1135 rte_exit(EXIT_FAILURE, "rxq are 0, cannot use rxonly fwd mode\n"); 1136 1137 if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 && !nb_txq) 1138 rte_exit(EXIT_FAILURE, "txq are 0, cannot use txonly fwd mode\n"); 1139 1140 if ((strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") != 0 && 1141 strcmp(cur_fwd_eng->fwd_mode_name, "txonly") != 0) && 1142 (!nb_rxq || !nb_txq)) 1143 rte_exit(EXIT_FAILURE, 1144 "Either rxq or txq are 0, cannot use %s fwd mode\n", 1145 cur_fwd_eng->fwd_mode_name); 1146 1147 if (all_ports_started() == 0) { 1148 printf("Not all ports were started\n"); 1149 return; 1150 } 1151 if (test_done == 0) { 1152 printf("Packet forwarding already started\n"); 1153 return; 1154 } 1155 1156 if (init_fwd_streams() < 0) { 1157 printf("Fail from init_fwd_streams()\n"); 1158 return; 1159 } 1160 1161 if(dcb_test) { 1162 for (i = 0; i < nb_fwd_ports; i++) { 1163 pt_id = fwd_ports_ids[i]; 1164 port = &ports[pt_id]; 1165 if (!port->dcb_flag) { 1166 printf("In DCB mode, all forwarding ports must " 1167 "be configured in this mode.\n"); 1168 return; 1169 } 1170 } 1171 if (nb_fwd_lcores == 1) { 1172 printf("In DCB mode,the nb forwarding cores " 1173 "should be larger than 1.\n"); 1174 return; 1175 } 1176 } 1177 test_done = 0; 1178 1179 if(!no_flush_rx) 1180 flush_fwd_rx_queues(); 1181 1182 fwd_config_setup(); 1183 pkt_fwd_config_display(&cur_fwd_config); 1184 rxtx_config_display(); 1185 1186 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) { 1187 pt_id = fwd_ports_ids[i]; 1188 port = &ports[pt_id]; 1189 rte_eth_stats_get(pt_id, &port->stats); 1190 port->tx_dropped = 0; 1191 1192 map_port_queue_stats_mapping_registers(pt_id, port); 1193 } 1194 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) { 1195 fwd_streams[sm_id]->rx_packets = 0; 1196 fwd_streams[sm_id]->tx_packets = 0; 1197 fwd_streams[sm_id]->fwd_dropped = 0; 1198 fwd_streams[sm_id]->rx_bad_ip_csum = 0; 1199 fwd_streams[sm_id]->rx_bad_l4_csum = 0; 1200 1201 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS 1202 memset(&fwd_streams[sm_id]->rx_burst_stats, 0, 1203 sizeof(fwd_streams[sm_id]->rx_burst_stats)); 1204 memset(&fwd_streams[sm_id]->tx_burst_stats, 0, 1205 sizeof(fwd_streams[sm_id]->tx_burst_stats)); 1206 #endif 1207 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES 1208 fwd_streams[sm_id]->core_cycles = 0; 1209 #endif 1210 } 1211 if (with_tx_first) { 1212 port_fwd_begin = tx_only_engine.port_fwd_begin; 1213 if (port_fwd_begin != NULL) { 1214 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) 1215 (*port_fwd_begin)(fwd_ports_ids[i]); 1216 } 1217 while (with_tx_first--) { 1218 launch_packet_forwarding( 1219 run_one_txonly_burst_on_core); 1220 rte_eal_mp_wait_lcore(); 1221 } 1222 port_fwd_end = tx_only_engine.port_fwd_end; 1223 if (port_fwd_end != NULL) { 1224 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) 1225 (*port_fwd_end)(fwd_ports_ids[i]); 1226 } 1227 } 1228 launch_packet_forwarding(start_pkt_forward_on_core); 1229 } 1230 1231 void 1232 stop_packet_forwarding(void) 1233 { 1234 struct rte_eth_stats stats; 1235 struct rte_port *port; 1236 port_fwd_end_t port_fwd_end; 1237 int i; 1238 portid_t pt_id; 1239 streamid_t sm_id; 1240 lcoreid_t lc_id; 1241 uint64_t total_recv; 1242 uint64_t total_xmit; 1243 uint64_t total_rx_dropped; 1244 uint64_t total_tx_dropped; 1245 uint64_t total_rx_nombuf; 1246 uint64_t tx_dropped; 1247 uint64_t rx_bad_ip_csum; 1248 uint64_t rx_bad_l4_csum; 1249 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES 1250 uint64_t fwd_cycles; 1251 #endif 1252 1253 static const char *acc_stats_border = "+++++++++++++++"; 1254 1255 if (test_done) { 1256 printf("Packet forwarding not started\n"); 1257 return; 1258 } 1259 printf("Telling cores to stop..."); 1260 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) 1261 fwd_lcores[lc_id]->stopped = 1; 1262 printf("\nWaiting for lcores to finish...\n"); 1263 rte_eal_mp_wait_lcore(); 1264 port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end; 1265 if (port_fwd_end != NULL) { 1266 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) { 1267 pt_id = fwd_ports_ids[i]; 1268 (*port_fwd_end)(pt_id); 1269 } 1270 } 1271 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES 1272 fwd_cycles = 0; 1273 #endif 1274 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) { 1275 if (cur_fwd_config.nb_fwd_streams > 1276 cur_fwd_config.nb_fwd_ports) { 1277 fwd_stream_stats_display(sm_id); 1278 ports[fwd_streams[sm_id]->tx_port].tx_stream = NULL; 1279 ports[fwd_streams[sm_id]->rx_port].rx_stream = NULL; 1280 } else { 1281 ports[fwd_streams[sm_id]->tx_port].tx_stream = 1282 fwd_streams[sm_id]; 1283 ports[fwd_streams[sm_id]->rx_port].rx_stream = 1284 fwd_streams[sm_id]; 1285 } 1286 tx_dropped = ports[fwd_streams[sm_id]->tx_port].tx_dropped; 1287 tx_dropped = (uint64_t) (tx_dropped + 1288 fwd_streams[sm_id]->fwd_dropped); 1289 ports[fwd_streams[sm_id]->tx_port].tx_dropped = tx_dropped; 1290 1291 rx_bad_ip_csum = 1292 ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum; 1293 rx_bad_ip_csum = (uint64_t) (rx_bad_ip_csum + 1294 fwd_streams[sm_id]->rx_bad_ip_csum); 1295 ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum = 1296 rx_bad_ip_csum; 1297 1298 rx_bad_l4_csum = 1299 ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum; 1300 rx_bad_l4_csum = (uint64_t) (rx_bad_l4_csum + 1301 fwd_streams[sm_id]->rx_bad_l4_csum); 1302 ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum = 1303 rx_bad_l4_csum; 1304 1305 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES 1306 fwd_cycles = (uint64_t) (fwd_cycles + 1307 fwd_streams[sm_id]->core_cycles); 1308 #endif 1309 } 1310 total_recv = 0; 1311 total_xmit = 0; 1312 total_rx_dropped = 0; 1313 total_tx_dropped = 0; 1314 total_rx_nombuf = 0; 1315 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) { 1316 pt_id = fwd_ports_ids[i]; 1317 1318 port = &ports[pt_id]; 1319 rte_eth_stats_get(pt_id, &stats); 1320 stats.ipackets -= port->stats.ipackets; 1321 port->stats.ipackets = 0; 1322 stats.opackets -= port->stats.opackets; 1323 port->stats.opackets = 0; 1324 stats.ibytes -= port->stats.ibytes; 1325 port->stats.ibytes = 0; 1326 stats.obytes -= port->stats.obytes; 1327 port->stats.obytes = 0; 1328 stats.imissed -= port->stats.imissed; 1329 port->stats.imissed = 0; 1330 stats.oerrors -= port->stats.oerrors; 1331 port->stats.oerrors = 0; 1332 stats.rx_nombuf -= port->stats.rx_nombuf; 1333 port->stats.rx_nombuf = 0; 1334 1335 total_recv += stats.ipackets; 1336 total_xmit += stats.opackets; 1337 total_rx_dropped += stats.imissed; 1338 total_tx_dropped += port->tx_dropped; 1339 total_rx_nombuf += stats.rx_nombuf; 1340 1341 fwd_port_stats_display(pt_id, &stats); 1342 } 1343 1344 printf("\n %s Accumulated forward statistics for all ports" 1345 "%s\n", 1346 acc_stats_border, acc_stats_border); 1347 printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: " 1348 "%-"PRIu64"\n" 1349 " TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: " 1350 "%-"PRIu64"\n", 1351 total_recv, total_rx_dropped, total_recv + total_rx_dropped, 1352 total_xmit, total_tx_dropped, total_xmit + total_tx_dropped); 1353 if (total_rx_nombuf > 0) 1354 printf(" RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf); 1355 printf(" %s++++++++++++++++++++++++++++++++++++++++++++++" 1356 "%s\n", 1357 acc_stats_border, acc_stats_border); 1358 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES 1359 if (total_recv > 0) 1360 printf("\n CPU cycles/packet=%u (total cycles=" 1361 "%"PRIu64" / total RX packets=%"PRIu64")\n", 1362 (unsigned int)(fwd_cycles / total_recv), 1363 fwd_cycles, total_recv); 1364 #endif 1365 printf("\nDone.\n"); 1366 test_done = 1; 1367 } 1368 1369 void 1370 dev_set_link_up(portid_t pid) 1371 { 1372 if (rte_eth_dev_set_link_up(pid) < 0) 1373 printf("\nSet link up fail.\n"); 1374 } 1375 1376 void 1377 dev_set_link_down(portid_t pid) 1378 { 1379 if (rte_eth_dev_set_link_down(pid) < 0) 1380 printf("\nSet link down fail.\n"); 1381 } 1382 1383 static int 1384 all_ports_started(void) 1385 { 1386 portid_t pi; 1387 struct rte_port *port; 1388 1389 RTE_ETH_FOREACH_DEV(pi) { 1390 port = &ports[pi]; 1391 /* Check if there is a port which is not started */ 1392 if ((port->port_status != RTE_PORT_STARTED) && 1393 (port->slave_flag == 0)) 1394 return 0; 1395 } 1396 1397 /* No port is not started */ 1398 return 1; 1399 } 1400 1401 int 1402 all_ports_stopped(void) 1403 { 1404 portid_t pi; 1405 struct rte_port *port; 1406 1407 RTE_ETH_FOREACH_DEV(pi) { 1408 port = &ports[pi]; 1409 if ((port->port_status != RTE_PORT_STOPPED) && 1410 (port->slave_flag == 0)) 1411 return 0; 1412 } 1413 1414 return 1; 1415 } 1416 1417 int 1418 port_is_started(portid_t port_id) 1419 { 1420 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1421 return 0; 1422 1423 if (ports[port_id].port_status != RTE_PORT_STARTED) 1424 return 0; 1425 1426 return 1; 1427 } 1428 1429 static int 1430 port_is_closed(portid_t port_id) 1431 { 1432 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1433 return 0; 1434 1435 if (ports[port_id].port_status != RTE_PORT_CLOSED) 1436 return 0; 1437 1438 return 1; 1439 } 1440 1441 int 1442 start_port(portid_t pid) 1443 { 1444 int diag, need_check_link_status = -1; 1445 portid_t pi; 1446 queueid_t qi; 1447 struct rte_port *port; 1448 struct ether_addr mac_addr; 1449 enum rte_eth_event_type event_type; 1450 1451 if (port_id_is_invalid(pid, ENABLED_WARN)) 1452 return 0; 1453 1454 if(dcb_config) 1455 dcb_test = 1; 1456 RTE_ETH_FOREACH_DEV(pi) { 1457 if (pid != pi && pid != (portid_t)RTE_PORT_ALL) 1458 continue; 1459 1460 need_check_link_status = 0; 1461 port = &ports[pi]; 1462 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED, 1463 RTE_PORT_HANDLING) == 0) { 1464 printf("Port %d is now not stopped\n", pi); 1465 continue; 1466 } 1467 1468 if (port->need_reconfig > 0) { 1469 port->need_reconfig = 0; 1470 1471 if (flow_isolate_all) { 1472 int ret = port_flow_isolate(pi, 1); 1473 if (ret) { 1474 printf("Failed to apply isolated" 1475 " mode on port %d\n", pi); 1476 return -1; 1477 } 1478 } 1479 1480 printf("Configuring Port %d (socket %u)\n", pi, 1481 port->socket_id); 1482 /* configure port */ 1483 diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq, 1484 &(port->dev_conf)); 1485 if (diag != 0) { 1486 if (rte_atomic16_cmpset(&(port->port_status), 1487 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0) 1488 printf("Port %d can not be set back " 1489 "to stopped\n", pi); 1490 printf("Fail to configure port %d\n", pi); 1491 /* try to reconfigure port next time */ 1492 port->need_reconfig = 1; 1493 return -1; 1494 } 1495 } 1496 if (port->need_reconfig_queues > 0) { 1497 port->need_reconfig_queues = 0; 1498 /* setup tx queues */ 1499 for (qi = 0; qi < nb_txq; qi++) { 1500 if ((numa_support) && 1501 (txring_numa[pi] != NUMA_NO_CONFIG)) 1502 diag = rte_eth_tx_queue_setup(pi, qi, 1503 nb_txd,txring_numa[pi], 1504 &(port->tx_conf)); 1505 else 1506 diag = rte_eth_tx_queue_setup(pi, qi, 1507 nb_txd,port->socket_id, 1508 &(port->tx_conf)); 1509 1510 if (diag == 0) 1511 continue; 1512 1513 /* Fail to setup tx queue, return */ 1514 if (rte_atomic16_cmpset(&(port->port_status), 1515 RTE_PORT_HANDLING, 1516 RTE_PORT_STOPPED) == 0) 1517 printf("Port %d can not be set back " 1518 "to stopped\n", pi); 1519 printf("Fail to configure port %d tx queues\n", pi); 1520 /* try to reconfigure queues next time */ 1521 port->need_reconfig_queues = 1; 1522 return -1; 1523 } 1524 /* setup rx queues */ 1525 for (qi = 0; qi < nb_rxq; qi++) { 1526 if ((numa_support) && 1527 (rxring_numa[pi] != NUMA_NO_CONFIG)) { 1528 struct rte_mempool * mp = 1529 mbuf_pool_find(rxring_numa[pi]); 1530 if (mp == NULL) { 1531 printf("Failed to setup RX queue:" 1532 "No mempool allocation" 1533 " on the socket %d\n", 1534 rxring_numa[pi]); 1535 return -1; 1536 } 1537 1538 diag = rte_eth_rx_queue_setup(pi, qi, 1539 nb_rxd,rxring_numa[pi], 1540 &(port->rx_conf),mp); 1541 } else { 1542 struct rte_mempool *mp = 1543 mbuf_pool_find(port->socket_id); 1544 if (mp == NULL) { 1545 printf("Failed to setup RX queue:" 1546 "No mempool allocation" 1547 " on the socket %d\n", 1548 port->socket_id); 1549 return -1; 1550 } 1551 diag = rte_eth_rx_queue_setup(pi, qi, 1552 nb_rxd,port->socket_id, 1553 &(port->rx_conf), mp); 1554 } 1555 if (diag == 0) 1556 continue; 1557 1558 /* Fail to setup rx queue, return */ 1559 if (rte_atomic16_cmpset(&(port->port_status), 1560 RTE_PORT_HANDLING, 1561 RTE_PORT_STOPPED) == 0) 1562 printf("Port %d can not be set back " 1563 "to stopped\n", pi); 1564 printf("Fail to configure port %d rx queues\n", pi); 1565 /* try to reconfigure queues next time */ 1566 port->need_reconfig_queues = 1; 1567 return -1; 1568 } 1569 } 1570 1571 for (event_type = RTE_ETH_EVENT_UNKNOWN; 1572 event_type < RTE_ETH_EVENT_MAX; 1573 event_type++) { 1574 diag = rte_eth_dev_callback_register(pi, 1575 event_type, 1576 eth_event_callback, 1577 NULL); 1578 if (diag) { 1579 printf("Failed to setup even callback for event %d\n", 1580 event_type); 1581 return -1; 1582 } 1583 } 1584 1585 /* start port */ 1586 if (rte_eth_dev_start(pi) < 0) { 1587 printf("Fail to start port %d\n", pi); 1588 1589 /* Fail to setup rx queue, return */ 1590 if (rte_atomic16_cmpset(&(port->port_status), 1591 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0) 1592 printf("Port %d can not be set back to " 1593 "stopped\n", pi); 1594 continue; 1595 } 1596 1597 if (rte_atomic16_cmpset(&(port->port_status), 1598 RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0) 1599 printf("Port %d can not be set into started\n", pi); 1600 1601 rte_eth_macaddr_get(pi, &mac_addr); 1602 printf("Port %d: %02X:%02X:%02X:%02X:%02X:%02X\n", pi, 1603 mac_addr.addr_bytes[0], mac_addr.addr_bytes[1], 1604 mac_addr.addr_bytes[2], mac_addr.addr_bytes[3], 1605 mac_addr.addr_bytes[4], mac_addr.addr_bytes[5]); 1606 1607 /* at least one port started, need checking link status */ 1608 need_check_link_status = 1; 1609 } 1610 1611 if (need_check_link_status == 1 && !no_link_check) 1612 check_all_ports_link_status(RTE_PORT_ALL); 1613 else if (need_check_link_status == 0) 1614 printf("Please stop the ports first\n"); 1615 1616 printf("Done\n"); 1617 return 0; 1618 } 1619 1620 void 1621 stop_port(portid_t pid) 1622 { 1623 portid_t pi; 1624 struct rte_port *port; 1625 int need_check_link_status = 0; 1626 1627 if (dcb_test) { 1628 dcb_test = 0; 1629 dcb_config = 0; 1630 } 1631 1632 if (port_id_is_invalid(pid, ENABLED_WARN)) 1633 return; 1634 1635 printf("Stopping ports...\n"); 1636 1637 RTE_ETH_FOREACH_DEV(pi) { 1638 if (pid != pi && pid != (portid_t)RTE_PORT_ALL) 1639 continue; 1640 1641 if (port_is_forwarding(pi) != 0 && test_done == 0) { 1642 printf("Please remove port %d from forwarding configuration.\n", pi); 1643 continue; 1644 } 1645 1646 if (port_is_bonding_slave(pi)) { 1647 printf("Please remove port %d from bonded device.\n", pi); 1648 continue; 1649 } 1650 1651 port = &ports[pi]; 1652 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED, 1653 RTE_PORT_HANDLING) == 0) 1654 continue; 1655 1656 rte_eth_dev_stop(pi); 1657 1658 if (rte_atomic16_cmpset(&(port->port_status), 1659 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0) 1660 printf("Port %d can not be set into stopped\n", pi); 1661 need_check_link_status = 1; 1662 } 1663 if (need_check_link_status && !no_link_check) 1664 check_all_ports_link_status(RTE_PORT_ALL); 1665 1666 printf("Done\n"); 1667 } 1668 1669 void 1670 close_port(portid_t pid) 1671 { 1672 portid_t pi; 1673 struct rte_port *port; 1674 1675 if (port_id_is_invalid(pid, ENABLED_WARN)) 1676 return; 1677 1678 printf("Closing ports...\n"); 1679 1680 RTE_ETH_FOREACH_DEV(pi) { 1681 if (pid != pi && pid != (portid_t)RTE_PORT_ALL) 1682 continue; 1683 1684 if (port_is_forwarding(pi) != 0 && test_done == 0) { 1685 printf("Please remove port %d from forwarding configuration.\n", pi); 1686 continue; 1687 } 1688 1689 if (port_is_bonding_slave(pi)) { 1690 printf("Please remove port %d from bonded device.\n", pi); 1691 continue; 1692 } 1693 1694 port = &ports[pi]; 1695 if (rte_atomic16_cmpset(&(port->port_status), 1696 RTE_PORT_CLOSED, RTE_PORT_CLOSED) == 1) { 1697 printf("Port %d is already closed\n", pi); 1698 continue; 1699 } 1700 1701 if (rte_atomic16_cmpset(&(port->port_status), 1702 RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) { 1703 printf("Port %d is now not stopped\n", pi); 1704 continue; 1705 } 1706 1707 if (port->flow_list) 1708 port_flow_flush(pi); 1709 rte_eth_dev_close(pi); 1710 1711 if (rte_atomic16_cmpset(&(port->port_status), 1712 RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0) 1713 printf("Port %d cannot be set to closed\n", pi); 1714 } 1715 1716 printf("Done\n"); 1717 } 1718 1719 void 1720 reset_port(portid_t pid) 1721 { 1722 int diag; 1723 portid_t pi; 1724 struct rte_port *port; 1725 1726 if (port_id_is_invalid(pid, ENABLED_WARN)) 1727 return; 1728 1729 printf("Resetting ports...\n"); 1730 1731 RTE_ETH_FOREACH_DEV(pi) { 1732 if (pid != pi && pid != (portid_t)RTE_PORT_ALL) 1733 continue; 1734 1735 if (port_is_forwarding(pi) != 0 && test_done == 0) { 1736 printf("Please remove port %d from forwarding " 1737 "configuration.\n", pi); 1738 continue; 1739 } 1740 1741 if (port_is_bonding_slave(pi)) { 1742 printf("Please remove port %d from bonded device.\n", 1743 pi); 1744 continue; 1745 } 1746 1747 diag = rte_eth_dev_reset(pi); 1748 if (diag == 0) { 1749 port = &ports[pi]; 1750 port->need_reconfig = 1; 1751 port->need_reconfig_queues = 1; 1752 } else { 1753 printf("Failed to reset port %d. diag=%d\n", pi, diag); 1754 } 1755 } 1756 1757 printf("Done\n"); 1758 } 1759 1760 void 1761 attach_port(char *identifier) 1762 { 1763 portid_t pi = 0; 1764 unsigned int socket_id; 1765 1766 printf("Attaching a new port...\n"); 1767 1768 if (identifier == NULL) { 1769 printf("Invalid parameters are specified\n"); 1770 return; 1771 } 1772 1773 if (rte_eth_dev_attach(identifier, &pi)) 1774 return; 1775 1776 socket_id = (unsigned)rte_eth_dev_socket_id(pi); 1777 /* if socket_id is invalid, set to 0 */ 1778 if (check_socket_id(socket_id) < 0) 1779 socket_id = 0; 1780 reconfig(pi, socket_id); 1781 rte_eth_promiscuous_enable(pi); 1782 1783 nb_ports = rte_eth_dev_count(); 1784 1785 ports[pi].port_status = RTE_PORT_STOPPED; 1786 1787 printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports); 1788 printf("Done\n"); 1789 } 1790 1791 void 1792 detach_port(portid_t port_id) 1793 { 1794 char name[RTE_ETH_NAME_MAX_LEN]; 1795 1796 printf("Detaching a port...\n"); 1797 1798 if (!port_is_closed(port_id)) { 1799 printf("Please close port first\n"); 1800 return; 1801 } 1802 1803 if (ports[port_id].flow_list) 1804 port_flow_flush(port_id); 1805 1806 if (rte_eth_dev_detach(port_id, name)) { 1807 RTE_LOG(ERR, USER1, "Failed to detach port '%s'\n", name); 1808 return; 1809 } 1810 1811 nb_ports = rte_eth_dev_count(); 1812 1813 printf("Port '%s' is detached. Now total ports is %d\n", 1814 name, nb_ports); 1815 printf("Done\n"); 1816 return; 1817 } 1818 1819 void 1820 pmd_test_exit(void) 1821 { 1822 portid_t pt_id; 1823 1824 if (test_done == 0) 1825 stop_packet_forwarding(); 1826 1827 if (ports != NULL) { 1828 no_link_check = 1; 1829 RTE_ETH_FOREACH_DEV(pt_id) { 1830 printf("\nShutting down port %d...\n", pt_id); 1831 fflush(stdout); 1832 stop_port(pt_id); 1833 close_port(pt_id); 1834 } 1835 } 1836 printf("\nBye...\n"); 1837 } 1838 1839 typedef void (*cmd_func_t)(void); 1840 struct pmd_test_command { 1841 const char *cmd_name; 1842 cmd_func_t cmd_func; 1843 }; 1844 1845 #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0])) 1846 1847 /* Check the link status of all ports in up to 9s, and print them finally */ 1848 static void 1849 check_all_ports_link_status(uint32_t port_mask) 1850 { 1851 #define CHECK_INTERVAL 100 /* 100ms */ 1852 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */ 1853 portid_t portid; 1854 uint8_t count, all_ports_up, print_flag = 0; 1855 struct rte_eth_link link; 1856 1857 printf("Checking link statuses...\n"); 1858 fflush(stdout); 1859 for (count = 0; count <= MAX_CHECK_TIME; count++) { 1860 all_ports_up = 1; 1861 RTE_ETH_FOREACH_DEV(portid) { 1862 if ((port_mask & (1 << portid)) == 0) 1863 continue; 1864 memset(&link, 0, sizeof(link)); 1865 rte_eth_link_get_nowait(portid, &link); 1866 /* print link status if flag set */ 1867 if (print_flag == 1) { 1868 if (link.link_status) 1869 printf( 1870 "Port%d Link Up. speed %u Mbps- %s\n", 1871 portid, link.link_speed, 1872 (link.link_duplex == ETH_LINK_FULL_DUPLEX) ? 1873 ("full-duplex") : ("half-duplex\n")); 1874 else 1875 printf("Port %d Link Down\n", portid); 1876 continue; 1877 } 1878 /* clear all_ports_up flag if any link down */ 1879 if (link.link_status == ETH_LINK_DOWN) { 1880 all_ports_up = 0; 1881 break; 1882 } 1883 } 1884 /* after finally printing all link status, get out */ 1885 if (print_flag == 1) 1886 break; 1887 1888 if (all_ports_up == 0) { 1889 fflush(stdout); 1890 rte_delay_ms(CHECK_INTERVAL); 1891 } 1892 1893 /* set the print_flag if all ports up or timeout */ 1894 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) { 1895 print_flag = 1; 1896 } 1897 1898 if (lsc_interrupt) 1899 break; 1900 } 1901 } 1902 1903 static void 1904 rmv_event_callback(void *arg) 1905 { 1906 struct rte_eth_dev *dev; 1907 portid_t port_id = (intptr_t)arg; 1908 1909 RTE_ETH_VALID_PORTID_OR_RET(port_id); 1910 dev = &rte_eth_devices[port_id]; 1911 1912 stop_port(port_id); 1913 close_port(port_id); 1914 printf("removing device %s\n", dev->device->name); 1915 if (rte_eal_dev_detach(dev->device)) 1916 RTE_LOG(ERR, USER1, "Failed to detach device %s\n", 1917 dev->device->name); 1918 } 1919 1920 /* This function is used by the interrupt thread */ 1921 static int 1922 eth_event_callback(portid_t port_id, enum rte_eth_event_type type, void *param, 1923 void *ret_param) 1924 { 1925 static const char * const event_desc[] = { 1926 [RTE_ETH_EVENT_UNKNOWN] = "Unknown", 1927 [RTE_ETH_EVENT_INTR_LSC] = "LSC", 1928 [RTE_ETH_EVENT_QUEUE_STATE] = "Queue state", 1929 [RTE_ETH_EVENT_INTR_RESET] = "Interrupt reset", 1930 [RTE_ETH_EVENT_VF_MBOX] = "VF Mbox", 1931 [RTE_ETH_EVENT_MACSEC] = "MACsec", 1932 [RTE_ETH_EVENT_INTR_RMV] = "device removal", 1933 [RTE_ETH_EVENT_MAX] = NULL, 1934 }; 1935 1936 RTE_SET_USED(param); 1937 RTE_SET_USED(ret_param); 1938 1939 if (type >= RTE_ETH_EVENT_MAX) { 1940 fprintf(stderr, "\nPort %" PRIu8 ": %s called upon invalid event %d\n", 1941 port_id, __func__, type); 1942 fflush(stderr); 1943 } else if (event_print_mask & (UINT32_C(1) << type)) { 1944 printf("\nPort %" PRIu8 ": %s event\n", port_id, 1945 event_desc[type]); 1946 fflush(stdout); 1947 } 1948 1949 switch (type) { 1950 case RTE_ETH_EVENT_INTR_RMV: 1951 if (rte_eal_alarm_set(100000, 1952 rmv_event_callback, (void *)(intptr_t)port_id)) 1953 fprintf(stderr, "Could not set up deferred device removal\n"); 1954 break; 1955 default: 1956 break; 1957 } 1958 return 0; 1959 } 1960 1961 static int 1962 set_tx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port) 1963 { 1964 uint16_t i; 1965 int diag; 1966 uint8_t mapping_found = 0; 1967 1968 for (i = 0; i < nb_tx_queue_stats_mappings; i++) { 1969 if ((tx_queue_stats_mappings[i].port_id == port_id) && 1970 (tx_queue_stats_mappings[i].queue_id < nb_txq )) { 1971 diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id, 1972 tx_queue_stats_mappings[i].queue_id, 1973 tx_queue_stats_mappings[i].stats_counter_id); 1974 if (diag != 0) 1975 return diag; 1976 mapping_found = 1; 1977 } 1978 } 1979 if (mapping_found) 1980 port->tx_queue_stats_mapping_enabled = 1; 1981 return 0; 1982 } 1983 1984 static int 1985 set_rx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port) 1986 { 1987 uint16_t i; 1988 int diag; 1989 uint8_t mapping_found = 0; 1990 1991 for (i = 0; i < nb_rx_queue_stats_mappings; i++) { 1992 if ((rx_queue_stats_mappings[i].port_id == port_id) && 1993 (rx_queue_stats_mappings[i].queue_id < nb_rxq )) { 1994 diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id, 1995 rx_queue_stats_mappings[i].queue_id, 1996 rx_queue_stats_mappings[i].stats_counter_id); 1997 if (diag != 0) 1998 return diag; 1999 mapping_found = 1; 2000 } 2001 } 2002 if (mapping_found) 2003 port->rx_queue_stats_mapping_enabled = 1; 2004 return 0; 2005 } 2006 2007 static void 2008 map_port_queue_stats_mapping_registers(portid_t pi, struct rte_port *port) 2009 { 2010 int diag = 0; 2011 2012 diag = set_tx_queue_stats_mapping_registers(pi, port); 2013 if (diag != 0) { 2014 if (diag == -ENOTSUP) { 2015 port->tx_queue_stats_mapping_enabled = 0; 2016 printf("TX queue stats mapping not supported port id=%d\n", pi); 2017 } 2018 else 2019 rte_exit(EXIT_FAILURE, 2020 "set_tx_queue_stats_mapping_registers " 2021 "failed for port id=%d diag=%d\n", 2022 pi, diag); 2023 } 2024 2025 diag = set_rx_queue_stats_mapping_registers(pi, port); 2026 if (diag != 0) { 2027 if (diag == -ENOTSUP) { 2028 port->rx_queue_stats_mapping_enabled = 0; 2029 printf("RX queue stats mapping not supported port id=%d\n", pi); 2030 } 2031 else 2032 rte_exit(EXIT_FAILURE, 2033 "set_rx_queue_stats_mapping_registers " 2034 "failed for port id=%d diag=%d\n", 2035 pi, diag); 2036 } 2037 } 2038 2039 static void 2040 rxtx_port_config(struct rte_port *port) 2041 { 2042 port->rx_conf = port->dev_info.default_rxconf; 2043 port->tx_conf = port->dev_info.default_txconf; 2044 2045 /* Check if any RX/TX parameters have been passed */ 2046 if (rx_pthresh != RTE_PMD_PARAM_UNSET) 2047 port->rx_conf.rx_thresh.pthresh = rx_pthresh; 2048 2049 if (rx_hthresh != RTE_PMD_PARAM_UNSET) 2050 port->rx_conf.rx_thresh.hthresh = rx_hthresh; 2051 2052 if (rx_wthresh != RTE_PMD_PARAM_UNSET) 2053 port->rx_conf.rx_thresh.wthresh = rx_wthresh; 2054 2055 if (rx_free_thresh != RTE_PMD_PARAM_UNSET) 2056 port->rx_conf.rx_free_thresh = rx_free_thresh; 2057 2058 if (rx_drop_en != RTE_PMD_PARAM_UNSET) 2059 port->rx_conf.rx_drop_en = rx_drop_en; 2060 2061 if (tx_pthresh != RTE_PMD_PARAM_UNSET) 2062 port->tx_conf.tx_thresh.pthresh = tx_pthresh; 2063 2064 if (tx_hthresh != RTE_PMD_PARAM_UNSET) 2065 port->tx_conf.tx_thresh.hthresh = tx_hthresh; 2066 2067 if (tx_wthresh != RTE_PMD_PARAM_UNSET) 2068 port->tx_conf.tx_thresh.wthresh = tx_wthresh; 2069 2070 if (tx_rs_thresh != RTE_PMD_PARAM_UNSET) 2071 port->tx_conf.tx_rs_thresh = tx_rs_thresh; 2072 2073 if (tx_free_thresh != RTE_PMD_PARAM_UNSET) 2074 port->tx_conf.tx_free_thresh = tx_free_thresh; 2075 2076 if (txq_flags != RTE_PMD_PARAM_UNSET) 2077 port->tx_conf.txq_flags = txq_flags; 2078 } 2079 2080 void 2081 init_port_config(void) 2082 { 2083 portid_t pid; 2084 struct rte_port *port; 2085 2086 RTE_ETH_FOREACH_DEV(pid) { 2087 port = &ports[pid]; 2088 port->dev_conf.rxmode = rx_mode; 2089 port->dev_conf.fdir_conf = fdir_conf; 2090 if (nb_rxq > 1) { 2091 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL; 2092 port->dev_conf.rx_adv_conf.rss_conf.rss_hf = rss_hf; 2093 } else { 2094 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL; 2095 port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0; 2096 } 2097 2098 if (port->dcb_flag == 0) { 2099 if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0) 2100 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_RSS; 2101 else 2102 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE; 2103 } 2104 2105 rxtx_port_config(port); 2106 2107 rte_eth_macaddr_get(pid, &port->eth_addr); 2108 2109 map_port_queue_stats_mapping_registers(pid, port); 2110 #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS 2111 rte_pmd_ixgbe_bypass_init(pid); 2112 #endif 2113 2114 if (lsc_interrupt && 2115 (rte_eth_devices[pid].data->dev_flags & 2116 RTE_ETH_DEV_INTR_LSC)) 2117 port->dev_conf.intr_conf.lsc = 1; 2118 if (rmv_interrupt && 2119 (rte_eth_devices[pid].data->dev_flags & 2120 RTE_ETH_DEV_INTR_RMV)) 2121 port->dev_conf.intr_conf.rmv = 1; 2122 2123 #if defined RTE_LIBRTE_PMD_SOFTNIC && defined RTE_LIBRTE_SCHED 2124 /* Detect softnic port */ 2125 if (!strcmp(port->dev_info.driver_name, "net_softnic")) { 2126 port->softnic_enable = 1; 2127 memset(&port->softport, 0, sizeof(struct softnic_port)); 2128 2129 if (!strcmp(cur_fwd_eng->fwd_mode_name, "tm")) 2130 port->softport.tm_flag = 1; 2131 } 2132 #endif 2133 } 2134 } 2135 2136 void set_port_slave_flag(portid_t slave_pid) 2137 { 2138 struct rte_port *port; 2139 2140 port = &ports[slave_pid]; 2141 port->slave_flag = 1; 2142 } 2143 2144 void clear_port_slave_flag(portid_t slave_pid) 2145 { 2146 struct rte_port *port; 2147 2148 port = &ports[slave_pid]; 2149 port->slave_flag = 0; 2150 } 2151 2152 uint8_t port_is_bonding_slave(portid_t slave_pid) 2153 { 2154 struct rte_port *port; 2155 2156 port = &ports[slave_pid]; 2157 return port->slave_flag; 2158 } 2159 2160 const uint16_t vlan_tags[] = { 2161 0, 1, 2, 3, 4, 5, 6, 7, 2162 8, 9, 10, 11, 12, 13, 14, 15, 2163 16, 17, 18, 19, 20, 21, 22, 23, 2164 24, 25, 26, 27, 28, 29, 30, 31 2165 }; 2166 2167 static int 2168 get_eth_dcb_conf(struct rte_eth_conf *eth_conf, 2169 enum dcb_mode_enable dcb_mode, 2170 enum rte_eth_nb_tcs num_tcs, 2171 uint8_t pfc_en) 2172 { 2173 uint8_t i; 2174 2175 /* 2176 * Builds up the correct configuration for dcb+vt based on the vlan tags array 2177 * given above, and the number of traffic classes available for use. 2178 */ 2179 if (dcb_mode == DCB_VT_ENABLED) { 2180 struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf = 2181 ð_conf->rx_adv_conf.vmdq_dcb_conf; 2182 struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf = 2183 ð_conf->tx_adv_conf.vmdq_dcb_tx_conf; 2184 2185 /* VMDQ+DCB RX and TX configurations */ 2186 vmdq_rx_conf->enable_default_pool = 0; 2187 vmdq_rx_conf->default_pool = 0; 2188 vmdq_rx_conf->nb_queue_pools = 2189 (num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS); 2190 vmdq_tx_conf->nb_queue_pools = 2191 (num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS); 2192 2193 vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools; 2194 for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) { 2195 vmdq_rx_conf->pool_map[i].vlan_id = vlan_tags[i]; 2196 vmdq_rx_conf->pool_map[i].pools = 2197 1 << (i % vmdq_rx_conf->nb_queue_pools); 2198 } 2199 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) { 2200 vmdq_rx_conf->dcb_tc[i] = i % num_tcs; 2201 vmdq_tx_conf->dcb_tc[i] = i % num_tcs; 2202 } 2203 2204 /* set DCB mode of RX and TX of multiple queues */ 2205 eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB; 2206 eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB; 2207 } else { 2208 struct rte_eth_dcb_rx_conf *rx_conf = 2209 ð_conf->rx_adv_conf.dcb_rx_conf; 2210 struct rte_eth_dcb_tx_conf *tx_conf = 2211 ð_conf->tx_adv_conf.dcb_tx_conf; 2212 2213 rx_conf->nb_tcs = num_tcs; 2214 tx_conf->nb_tcs = num_tcs; 2215 2216 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) { 2217 rx_conf->dcb_tc[i] = i % num_tcs; 2218 tx_conf->dcb_tc[i] = i % num_tcs; 2219 } 2220 eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB_RSS; 2221 eth_conf->rx_adv_conf.rss_conf.rss_hf = rss_hf; 2222 eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB; 2223 } 2224 2225 if (pfc_en) 2226 eth_conf->dcb_capability_en = 2227 ETH_DCB_PG_SUPPORT | ETH_DCB_PFC_SUPPORT; 2228 else 2229 eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT; 2230 2231 return 0; 2232 } 2233 2234 int 2235 init_port_dcb_config(portid_t pid, 2236 enum dcb_mode_enable dcb_mode, 2237 enum rte_eth_nb_tcs num_tcs, 2238 uint8_t pfc_en) 2239 { 2240 struct rte_eth_conf port_conf; 2241 struct rte_port *rte_port; 2242 int retval; 2243 uint16_t i; 2244 2245 rte_port = &ports[pid]; 2246 2247 memset(&port_conf, 0, sizeof(struct rte_eth_conf)); 2248 /* Enter DCB configuration status */ 2249 dcb_config = 1; 2250 2251 /*set configuration of DCB in vt mode and DCB in non-vt mode*/ 2252 retval = get_eth_dcb_conf(&port_conf, dcb_mode, num_tcs, pfc_en); 2253 if (retval < 0) 2254 return retval; 2255 port_conf.rxmode.hw_vlan_filter = 1; 2256 2257 /** 2258 * Write the configuration into the device. 2259 * Set the numbers of RX & TX queues to 0, so 2260 * the RX & TX queues will not be setup. 2261 */ 2262 rte_eth_dev_configure(pid, 0, 0, &port_conf); 2263 2264 rte_eth_dev_info_get(pid, &rte_port->dev_info); 2265 2266 /* If dev_info.vmdq_pool_base is greater than 0, 2267 * the queue id of vmdq pools is started after pf queues. 2268 */ 2269 if (dcb_mode == DCB_VT_ENABLED && 2270 rte_port->dev_info.vmdq_pool_base > 0) { 2271 printf("VMDQ_DCB multi-queue mode is nonsensical" 2272 " for port %d.", pid); 2273 return -1; 2274 } 2275 2276 /* Assume the ports in testpmd have the same dcb capability 2277 * and has the same number of rxq and txq in dcb mode 2278 */ 2279 if (dcb_mode == DCB_VT_ENABLED) { 2280 if (rte_port->dev_info.max_vfs > 0) { 2281 nb_rxq = rte_port->dev_info.nb_rx_queues; 2282 nb_txq = rte_port->dev_info.nb_tx_queues; 2283 } else { 2284 nb_rxq = rte_port->dev_info.max_rx_queues; 2285 nb_txq = rte_port->dev_info.max_tx_queues; 2286 } 2287 } else { 2288 /*if vt is disabled, use all pf queues */ 2289 if (rte_port->dev_info.vmdq_pool_base == 0) { 2290 nb_rxq = rte_port->dev_info.max_rx_queues; 2291 nb_txq = rte_port->dev_info.max_tx_queues; 2292 } else { 2293 nb_rxq = (queueid_t)num_tcs; 2294 nb_txq = (queueid_t)num_tcs; 2295 2296 } 2297 } 2298 rx_free_thresh = 64; 2299 2300 memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf)); 2301 2302 rxtx_port_config(rte_port); 2303 /* VLAN filter */ 2304 rte_port->dev_conf.rxmode.hw_vlan_filter = 1; 2305 for (i = 0; i < RTE_DIM(vlan_tags); i++) 2306 rx_vft_set(pid, vlan_tags[i], 1); 2307 2308 rte_eth_macaddr_get(pid, &rte_port->eth_addr); 2309 map_port_queue_stats_mapping_registers(pid, rte_port); 2310 2311 rte_port->dcb_flag = 1; 2312 2313 return 0; 2314 } 2315 2316 static void 2317 init_port(void) 2318 { 2319 /* Configuration of Ethernet ports. */ 2320 ports = rte_zmalloc("testpmd: ports", 2321 sizeof(struct rte_port) * RTE_MAX_ETHPORTS, 2322 RTE_CACHE_LINE_SIZE); 2323 if (ports == NULL) { 2324 rte_exit(EXIT_FAILURE, 2325 "rte_zmalloc(%d struct rte_port) failed\n", 2326 RTE_MAX_ETHPORTS); 2327 } 2328 } 2329 2330 static void 2331 force_quit(void) 2332 { 2333 pmd_test_exit(); 2334 prompt_exit(); 2335 } 2336 2337 static void 2338 print_stats(void) 2339 { 2340 uint8_t i; 2341 const char clr[] = { 27, '[', '2', 'J', '\0' }; 2342 const char top_left[] = { 27, '[', '1', ';', '1', 'H', '\0' }; 2343 2344 /* Clear screen and move to top left */ 2345 printf("%s%s", clr, top_left); 2346 2347 printf("\nPort statistics ===================================="); 2348 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) 2349 nic_stats_display(fwd_ports_ids[i]); 2350 } 2351 2352 static void 2353 signal_handler(int signum) 2354 { 2355 if (signum == SIGINT || signum == SIGTERM) { 2356 printf("\nSignal %d received, preparing to exit...\n", 2357 signum); 2358 #ifdef RTE_LIBRTE_PDUMP 2359 /* uninitialize packet capture framework */ 2360 rte_pdump_uninit(); 2361 #endif 2362 #ifdef RTE_LIBRTE_LATENCY_STATS 2363 rte_latencystats_uninit(); 2364 #endif 2365 force_quit(); 2366 /* Set flag to indicate the force termination. */ 2367 f_quit = 1; 2368 /* exit with the expected status */ 2369 signal(signum, SIG_DFL); 2370 kill(getpid(), signum); 2371 } 2372 } 2373 2374 int 2375 main(int argc, char** argv) 2376 { 2377 int diag; 2378 portid_t port_id; 2379 2380 signal(SIGINT, signal_handler); 2381 signal(SIGTERM, signal_handler); 2382 2383 diag = rte_eal_init(argc, argv); 2384 if (diag < 0) 2385 rte_panic("Cannot init EAL\n"); 2386 2387 if (mlockall(MCL_CURRENT | MCL_FUTURE)) { 2388 RTE_LOG(NOTICE, USER1, "mlockall() failed with error \"%s\"\n", 2389 strerror(errno)); 2390 } 2391 2392 #ifdef RTE_LIBRTE_PDUMP 2393 /* initialize packet capture framework */ 2394 rte_pdump_init(NULL); 2395 #endif 2396 2397 nb_ports = (portid_t) rte_eth_dev_count(); 2398 if (nb_ports == 0) 2399 RTE_LOG(WARNING, EAL, "No probed ethernet devices\n"); 2400 2401 /* allocate port structures, and init them */ 2402 init_port(); 2403 2404 set_def_fwd_config(); 2405 if (nb_lcores == 0) 2406 rte_panic("Empty set of forwarding logical cores - check the " 2407 "core mask supplied in the command parameters\n"); 2408 2409 /* Bitrate/latency stats disabled by default */ 2410 #ifdef RTE_LIBRTE_BITRATE 2411 bitrate_enabled = 0; 2412 #endif 2413 #ifdef RTE_LIBRTE_LATENCY_STATS 2414 latencystats_enabled = 0; 2415 #endif 2416 2417 argc -= diag; 2418 argv += diag; 2419 if (argc > 1) 2420 launch_args_parse(argc, argv); 2421 2422 if (tx_first && interactive) 2423 rte_exit(EXIT_FAILURE, "--tx-first cannot be used on " 2424 "interactive mode.\n"); 2425 2426 if (tx_first && lsc_interrupt) { 2427 printf("Warning: lsc_interrupt needs to be off when " 2428 " using tx_first. Disabling.\n"); 2429 lsc_interrupt = 0; 2430 } 2431 2432 if (!nb_rxq && !nb_txq) 2433 printf("Warning: Either rx or tx queues should be non-zero\n"); 2434 2435 if (nb_rxq > 1 && nb_rxq > nb_txq) 2436 printf("Warning: nb_rxq=%d enables RSS configuration, " 2437 "but nb_txq=%d will prevent to fully test it.\n", 2438 nb_rxq, nb_txq); 2439 2440 init_config(); 2441 if (start_port(RTE_PORT_ALL) != 0) 2442 rte_exit(EXIT_FAILURE, "Start ports failed\n"); 2443 2444 /* set all ports to promiscuous mode by default */ 2445 RTE_ETH_FOREACH_DEV(port_id) 2446 rte_eth_promiscuous_enable(port_id); 2447 2448 /* Init metrics library */ 2449 rte_metrics_init(rte_socket_id()); 2450 2451 #ifdef RTE_LIBRTE_LATENCY_STATS 2452 if (latencystats_enabled != 0) { 2453 int ret = rte_latencystats_init(1, NULL); 2454 if (ret) 2455 printf("Warning: latencystats init()" 2456 " returned error %d\n", ret); 2457 printf("Latencystats running on lcore %d\n", 2458 latencystats_lcore_id); 2459 } 2460 #endif 2461 2462 /* Setup bitrate stats */ 2463 #ifdef RTE_LIBRTE_BITRATE 2464 if (bitrate_enabled != 0) { 2465 bitrate_data = rte_stats_bitrate_create(); 2466 if (bitrate_data == NULL) 2467 rte_exit(EXIT_FAILURE, 2468 "Could not allocate bitrate data.\n"); 2469 rte_stats_bitrate_reg(bitrate_data); 2470 } 2471 #endif 2472 2473 #ifdef RTE_LIBRTE_CMDLINE 2474 if (strlen(cmdline_filename) != 0) 2475 cmdline_read_from_file(cmdline_filename); 2476 2477 if (interactive == 1) { 2478 if (auto_start) { 2479 printf("Start automatic packet forwarding\n"); 2480 start_packet_forwarding(0); 2481 } 2482 prompt(); 2483 pmd_test_exit(); 2484 } else 2485 #endif 2486 { 2487 char c; 2488 int rc; 2489 2490 f_quit = 0; 2491 2492 printf("No commandline core given, start packet forwarding\n"); 2493 start_packet_forwarding(tx_first); 2494 if (stats_period != 0) { 2495 uint64_t prev_time = 0, cur_time, diff_time = 0; 2496 uint64_t timer_period; 2497 2498 /* Convert to number of cycles */ 2499 timer_period = stats_period * rte_get_timer_hz(); 2500 2501 while (f_quit == 0) { 2502 cur_time = rte_get_timer_cycles(); 2503 diff_time += cur_time - prev_time; 2504 2505 if (diff_time >= timer_period) { 2506 print_stats(); 2507 /* Reset the timer */ 2508 diff_time = 0; 2509 } 2510 /* Sleep to avoid unnecessary checks */ 2511 prev_time = cur_time; 2512 sleep(1); 2513 } 2514 } 2515 2516 printf("Press enter to exit\n"); 2517 rc = read(0, &c, 1); 2518 pmd_test_exit(); 2519 if (rc < 0) 2520 return 1; 2521 } 2522 2523 return 0; 2524 } 2525