1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2010-2017 Intel Corporation 3 */ 4 5 #include <stdarg.h> 6 #include <stdio.h> 7 #include <stdlib.h> 8 #include <signal.h> 9 #include <string.h> 10 #include <time.h> 11 #include <fcntl.h> 12 #include <sys/mman.h> 13 #include <sys/types.h> 14 #include <errno.h> 15 #include <stdbool.h> 16 17 #include <sys/queue.h> 18 #include <sys/stat.h> 19 20 #include <stdint.h> 21 #include <unistd.h> 22 #include <inttypes.h> 23 24 #include <rte_common.h> 25 #include <rte_errno.h> 26 #include <rte_byteorder.h> 27 #include <rte_log.h> 28 #include <rte_debug.h> 29 #include <rte_cycles.h> 30 #include <rte_malloc_heap.h> 31 #include <rte_memory.h> 32 #include <rte_memcpy.h> 33 #include <rte_launch.h> 34 #include <rte_eal.h> 35 #include <rte_alarm.h> 36 #include <rte_per_lcore.h> 37 #include <rte_lcore.h> 38 #include <rte_atomic.h> 39 #include <rte_branch_prediction.h> 40 #include <rte_mempool.h> 41 #include <rte_malloc.h> 42 #include <rte_mbuf.h> 43 #include <rte_mbuf_pool_ops.h> 44 #include <rte_interrupts.h> 45 #include <rte_pci.h> 46 #include <rte_ether.h> 47 #include <rte_ethdev.h> 48 #include <rte_dev.h> 49 #include <rte_string_fns.h> 50 #ifdef RTE_LIBRTE_IXGBE_PMD 51 #include <rte_pmd_ixgbe.h> 52 #endif 53 #ifdef RTE_LIBRTE_PDUMP 54 #include <rte_pdump.h> 55 #endif 56 #include <rte_flow.h> 57 #include <rte_metrics.h> 58 #ifdef RTE_LIBRTE_BITRATE 59 #include <rte_bitrate.h> 60 #endif 61 #ifdef RTE_LIBRTE_LATENCY_STATS 62 #include <rte_latencystats.h> 63 #endif 64 65 #include "testpmd.h" 66 67 #ifndef MAP_HUGETLB 68 /* FreeBSD may not have MAP_HUGETLB (in fact, it probably doesn't) */ 69 #define HUGE_FLAG (0x40000) 70 #else 71 #define HUGE_FLAG MAP_HUGETLB 72 #endif 73 74 #ifndef MAP_HUGE_SHIFT 75 /* older kernels (or FreeBSD) will not have this define */ 76 #define HUGE_SHIFT (26) 77 #else 78 #define HUGE_SHIFT MAP_HUGE_SHIFT 79 #endif 80 81 #define EXTMEM_HEAP_NAME "extmem" 82 83 uint16_t verbose_level = 0; /**< Silent by default. */ 84 int testpmd_logtype; /**< Log type for testpmd logs */ 85 86 /* use master core for command line ? */ 87 uint8_t interactive = 0; 88 uint8_t auto_start = 0; 89 uint8_t tx_first; 90 char cmdline_filename[PATH_MAX] = {0}; 91 92 /* 93 * NUMA support configuration. 94 * When set, the NUMA support attempts to dispatch the allocation of the 95 * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the 96 * probed ports among the CPU sockets 0 and 1. 97 * Otherwise, all memory is allocated from CPU socket 0. 98 */ 99 uint8_t numa_support = 1; /**< numa enabled by default */ 100 101 /* 102 * In UMA mode,all memory is allocated from socket 0 if --socket-num is 103 * not configured. 104 */ 105 uint8_t socket_num = UMA_NO_CONFIG; 106 107 /* 108 * Select mempool allocation type: 109 * - native: use regular DPDK memory 110 * - anon: use regular DPDK memory to create mempool, but populate using 111 * anonymous memory (may not be IOVA-contiguous) 112 * - xmem: use externally allocated hugepage memory 113 */ 114 uint8_t mp_alloc_type = MP_ALLOC_NATIVE; 115 116 /* 117 * Store specified sockets on which memory pool to be used by ports 118 * is allocated. 119 */ 120 uint8_t port_numa[RTE_MAX_ETHPORTS]; 121 122 /* 123 * Store specified sockets on which RX ring to be used by ports 124 * is allocated. 125 */ 126 uint8_t rxring_numa[RTE_MAX_ETHPORTS]; 127 128 /* 129 * Store specified sockets on which TX ring to be used by ports 130 * is allocated. 131 */ 132 uint8_t txring_numa[RTE_MAX_ETHPORTS]; 133 134 /* 135 * Record the Ethernet address of peer target ports to which packets are 136 * forwarded. 137 * Must be instantiated with the ethernet addresses of peer traffic generator 138 * ports. 139 */ 140 struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS]; 141 portid_t nb_peer_eth_addrs = 0; 142 143 /* 144 * Probed Target Environment. 145 */ 146 struct rte_port *ports; /**< For all probed ethernet ports. */ 147 portid_t nb_ports; /**< Number of probed ethernet ports. */ 148 struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */ 149 lcoreid_t nb_lcores; /**< Number of probed logical cores. */ 150 151 portid_t ports_ids[RTE_MAX_ETHPORTS]; /**< Store all port ids. */ 152 153 /* 154 * Test Forwarding Configuration. 155 * nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores 156 * nb_fwd_ports <= nb_cfg_ports <= nb_ports 157 */ 158 lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */ 159 lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */ 160 portid_t nb_cfg_ports; /**< Number of configured ports. */ 161 portid_t nb_fwd_ports; /**< Number of forwarding ports. */ 162 163 unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */ 164 portid_t fwd_ports_ids[RTE_MAX_ETHPORTS]; /**< Port ids configuration. */ 165 166 struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */ 167 streamid_t nb_fwd_streams; /**< Is equal to (nb_ports * nb_rxq). */ 168 169 /* 170 * Forwarding engines. 171 */ 172 struct fwd_engine * fwd_engines[] = { 173 &io_fwd_engine, 174 &mac_fwd_engine, 175 &mac_swap_engine, 176 &flow_gen_engine, 177 &rx_only_engine, 178 &tx_only_engine, 179 &csum_fwd_engine, 180 &icmp_echo_engine, 181 &noisy_vnf_engine, 182 #if defined RTE_LIBRTE_PMD_SOFTNIC 183 &softnic_fwd_engine, 184 #endif 185 #ifdef RTE_LIBRTE_IEEE1588 186 &ieee1588_fwd_engine, 187 #endif 188 NULL, 189 }; 190 191 struct fwd_config cur_fwd_config; 192 struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */ 193 uint32_t retry_enabled; 194 uint32_t burst_tx_delay_time = BURST_TX_WAIT_US; 195 uint32_t burst_tx_retry_num = BURST_TX_RETRIES; 196 197 uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */ 198 uint32_t param_total_num_mbufs = 0; /**< number of mbufs in all pools - if 199 * specified on command-line. */ 200 uint16_t stats_period; /**< Period to show statistics (disabled by default) */ 201 202 /* 203 * In container, it cannot terminate the process which running with 'stats-period' 204 * option. Set flag to exit stats period loop after received SIGINT/SIGTERM. 205 */ 206 uint8_t f_quit; 207 208 /* 209 * Configuration of packet segments used by the "txonly" processing engine. 210 */ 211 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */ 212 uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = { 213 TXONLY_DEF_PACKET_LEN, 214 }; 215 uint8_t tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */ 216 217 enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF; 218 /**< Split policy for packets to TX. */ 219 220 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */ 221 uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */ 222 223 /* current configuration is in DCB or not,0 means it is not in DCB mode */ 224 uint8_t dcb_config = 0; 225 226 /* Whether the dcb is in testing status */ 227 uint8_t dcb_test = 0; 228 229 /* 230 * Configurable number of RX/TX queues. 231 */ 232 queueid_t nb_rxq = 1; /**< Number of RX queues per port. */ 233 queueid_t nb_txq = 1; /**< Number of TX queues per port. */ 234 235 /* 236 * Configurable number of RX/TX ring descriptors. 237 * Defaults are supplied by drivers via ethdev. 238 */ 239 #define RTE_TEST_RX_DESC_DEFAULT 0 240 #define RTE_TEST_TX_DESC_DEFAULT 0 241 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */ 242 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */ 243 244 #define RTE_PMD_PARAM_UNSET -1 245 /* 246 * Configurable values of RX and TX ring threshold registers. 247 */ 248 249 int8_t rx_pthresh = RTE_PMD_PARAM_UNSET; 250 int8_t rx_hthresh = RTE_PMD_PARAM_UNSET; 251 int8_t rx_wthresh = RTE_PMD_PARAM_UNSET; 252 253 int8_t tx_pthresh = RTE_PMD_PARAM_UNSET; 254 int8_t tx_hthresh = RTE_PMD_PARAM_UNSET; 255 int8_t tx_wthresh = RTE_PMD_PARAM_UNSET; 256 257 /* 258 * Configurable value of RX free threshold. 259 */ 260 int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET; 261 262 /* 263 * Configurable value of RX drop enable. 264 */ 265 int8_t rx_drop_en = RTE_PMD_PARAM_UNSET; 266 267 /* 268 * Configurable value of TX free threshold. 269 */ 270 int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET; 271 272 /* 273 * Configurable value of TX RS bit threshold. 274 */ 275 int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET; 276 277 /* 278 * Configurable value of buffered packets before sending. 279 */ 280 uint16_t noisy_tx_sw_bufsz; 281 282 /* 283 * Configurable value of packet buffer timeout. 284 */ 285 uint16_t noisy_tx_sw_buf_flush_time; 286 287 /* 288 * Configurable value for size of VNF internal memory area 289 * used for simulating noisy neighbour behaviour 290 */ 291 uint64_t noisy_lkup_mem_sz; 292 293 /* 294 * Configurable value of number of random writes done in 295 * VNF simulation memory area. 296 */ 297 uint64_t noisy_lkup_num_writes; 298 299 /* 300 * Configurable value of number of random reads done in 301 * VNF simulation memory area. 302 */ 303 uint64_t noisy_lkup_num_reads; 304 305 /* 306 * Configurable value of number of random reads/writes done in 307 * VNF simulation memory area. 308 */ 309 uint64_t noisy_lkup_num_reads_writes; 310 311 /* 312 * Receive Side Scaling (RSS) configuration. 313 */ 314 uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */ 315 316 /* 317 * Port topology configuration 318 */ 319 uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */ 320 321 /* 322 * Avoids to flush all the RX streams before starts forwarding. 323 */ 324 uint8_t no_flush_rx = 0; /* flush by default */ 325 326 /* 327 * Flow API isolated mode. 328 */ 329 uint8_t flow_isolate_all; 330 331 /* 332 * Avoids to check link status when starting/stopping a port. 333 */ 334 uint8_t no_link_check = 0; /* check by default */ 335 336 /* 337 * Enable link status change notification 338 */ 339 uint8_t lsc_interrupt = 1; /* enabled by default */ 340 341 /* 342 * Enable device removal notification. 343 */ 344 uint8_t rmv_interrupt = 1; /* enabled by default */ 345 346 uint8_t hot_plug = 0; /**< hotplug disabled by default. */ 347 348 /* 349 * Display or mask ether events 350 * Default to all events except VF_MBOX 351 */ 352 uint32_t event_print_mask = (UINT32_C(1) << RTE_ETH_EVENT_UNKNOWN) | 353 (UINT32_C(1) << RTE_ETH_EVENT_INTR_LSC) | 354 (UINT32_C(1) << RTE_ETH_EVENT_QUEUE_STATE) | 355 (UINT32_C(1) << RTE_ETH_EVENT_INTR_RESET) | 356 (UINT32_C(1) << RTE_ETH_EVENT_IPSEC) | 357 (UINT32_C(1) << RTE_ETH_EVENT_MACSEC) | 358 (UINT32_C(1) << RTE_ETH_EVENT_INTR_RMV); 359 /* 360 * Decide if all memory are locked for performance. 361 */ 362 int do_mlockall = 0; 363 364 /* 365 * NIC bypass mode configuration options. 366 */ 367 368 #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS 369 /* The NIC bypass watchdog timeout. */ 370 uint32_t bypass_timeout = RTE_PMD_IXGBE_BYPASS_TMT_OFF; 371 #endif 372 373 374 #ifdef RTE_LIBRTE_LATENCY_STATS 375 376 /* 377 * Set when latency stats is enabled in the commandline 378 */ 379 uint8_t latencystats_enabled; 380 381 /* 382 * Lcore ID to serive latency statistics. 383 */ 384 lcoreid_t latencystats_lcore_id = -1; 385 386 #endif 387 388 /* 389 * Ethernet device configuration. 390 */ 391 struct rte_eth_rxmode rx_mode = { 392 .max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */ 393 }; 394 395 struct rte_eth_txmode tx_mode = { 396 .offloads = DEV_TX_OFFLOAD_MBUF_FAST_FREE, 397 }; 398 399 struct rte_fdir_conf fdir_conf = { 400 .mode = RTE_FDIR_MODE_NONE, 401 .pballoc = RTE_FDIR_PBALLOC_64K, 402 .status = RTE_FDIR_REPORT_STATUS, 403 .mask = { 404 .vlan_tci_mask = 0xFFEF, 405 .ipv4_mask = { 406 .src_ip = 0xFFFFFFFF, 407 .dst_ip = 0xFFFFFFFF, 408 }, 409 .ipv6_mask = { 410 .src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF}, 411 .dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF}, 412 }, 413 .src_port_mask = 0xFFFF, 414 .dst_port_mask = 0xFFFF, 415 .mac_addr_byte_mask = 0xFF, 416 .tunnel_type_mask = 1, 417 .tunnel_id_mask = 0xFFFFFFFF, 418 }, 419 .drop_queue = 127, 420 }; 421 422 volatile int test_done = 1; /* stop packet forwarding when set to 1. */ 423 424 struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS]; 425 struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS]; 426 427 struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array; 428 struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array; 429 430 uint16_t nb_tx_queue_stats_mappings = 0; 431 uint16_t nb_rx_queue_stats_mappings = 0; 432 433 /* 434 * Display zero values by default for xstats 435 */ 436 uint8_t xstats_hide_zero; 437 438 unsigned int num_sockets = 0; 439 unsigned int socket_ids[RTE_MAX_NUMA_NODES]; 440 441 #ifdef RTE_LIBRTE_BITRATE 442 /* Bitrate statistics */ 443 struct rte_stats_bitrates *bitrate_data; 444 lcoreid_t bitrate_lcore_id; 445 uint8_t bitrate_enabled; 446 #endif 447 448 struct gro_status gro_ports[RTE_MAX_ETHPORTS]; 449 uint8_t gro_flush_cycles = GRO_DEFAULT_FLUSH_CYCLES; 450 451 struct vxlan_encap_conf vxlan_encap_conf = { 452 .select_ipv4 = 1, 453 .select_vlan = 0, 454 .vni = "\x00\x00\x00", 455 .udp_src = 0, 456 .udp_dst = RTE_BE16(4789), 457 .ipv4_src = IPv4(127, 0, 0, 1), 458 .ipv4_dst = IPv4(255, 255, 255, 255), 459 .ipv6_src = "\x00\x00\x00\x00\x00\x00\x00\x00" 460 "\x00\x00\x00\x00\x00\x00\x00\x01", 461 .ipv6_dst = "\x00\x00\x00\x00\x00\x00\x00\x00" 462 "\x00\x00\x00\x00\x00\x00\x11\x11", 463 .vlan_tci = 0, 464 .eth_src = "\x00\x00\x00\x00\x00\x00", 465 .eth_dst = "\xff\xff\xff\xff\xff\xff", 466 }; 467 468 struct nvgre_encap_conf nvgre_encap_conf = { 469 .select_ipv4 = 1, 470 .select_vlan = 0, 471 .tni = "\x00\x00\x00", 472 .ipv4_src = IPv4(127, 0, 0, 1), 473 .ipv4_dst = IPv4(255, 255, 255, 255), 474 .ipv6_src = "\x00\x00\x00\x00\x00\x00\x00\x00" 475 "\x00\x00\x00\x00\x00\x00\x00\x01", 476 .ipv6_dst = "\x00\x00\x00\x00\x00\x00\x00\x00" 477 "\x00\x00\x00\x00\x00\x00\x11\x11", 478 .vlan_tci = 0, 479 .eth_src = "\x00\x00\x00\x00\x00\x00", 480 .eth_dst = "\xff\xff\xff\xff\xff\xff", 481 }; 482 483 /* Forward function declarations */ 484 static void map_port_queue_stats_mapping_registers(portid_t pi, 485 struct rte_port *port); 486 static void check_all_ports_link_status(uint32_t port_mask); 487 static int eth_event_callback(portid_t port_id, 488 enum rte_eth_event_type type, 489 void *param, void *ret_param); 490 static void eth_dev_event_callback(const char *device_name, 491 enum rte_dev_event_type type, 492 void *param); 493 494 /* 495 * Check if all the ports are started. 496 * If yes, return positive value. If not, return zero. 497 */ 498 static int all_ports_started(void); 499 500 struct gso_status gso_ports[RTE_MAX_ETHPORTS]; 501 uint16_t gso_max_segment_size = ETHER_MAX_LEN - ETHER_CRC_LEN; 502 503 /* 504 * Helper function to check if socket is already discovered. 505 * If yes, return positive value. If not, return zero. 506 */ 507 int 508 new_socket_id(unsigned int socket_id) 509 { 510 unsigned int i; 511 512 for (i = 0; i < num_sockets; i++) { 513 if (socket_ids[i] == socket_id) 514 return 0; 515 } 516 return 1; 517 } 518 519 /* 520 * Setup default configuration. 521 */ 522 static void 523 set_default_fwd_lcores_config(void) 524 { 525 unsigned int i; 526 unsigned int nb_lc; 527 unsigned int sock_num; 528 529 nb_lc = 0; 530 for (i = 0; i < RTE_MAX_LCORE; i++) { 531 if (!rte_lcore_is_enabled(i)) 532 continue; 533 sock_num = rte_lcore_to_socket_id(i); 534 if (new_socket_id(sock_num)) { 535 if (num_sockets >= RTE_MAX_NUMA_NODES) { 536 rte_exit(EXIT_FAILURE, 537 "Total sockets greater than %u\n", 538 RTE_MAX_NUMA_NODES); 539 } 540 socket_ids[num_sockets++] = sock_num; 541 } 542 if (i == rte_get_master_lcore()) 543 continue; 544 fwd_lcores_cpuids[nb_lc++] = i; 545 } 546 nb_lcores = (lcoreid_t) nb_lc; 547 nb_cfg_lcores = nb_lcores; 548 nb_fwd_lcores = 1; 549 } 550 551 static void 552 set_def_peer_eth_addrs(void) 553 { 554 portid_t i; 555 556 for (i = 0; i < RTE_MAX_ETHPORTS; i++) { 557 peer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR; 558 peer_eth_addrs[i].addr_bytes[5] = i; 559 } 560 } 561 562 static void 563 set_default_fwd_ports_config(void) 564 { 565 portid_t pt_id; 566 int i = 0; 567 568 RTE_ETH_FOREACH_DEV(pt_id) { 569 fwd_ports_ids[i++] = pt_id; 570 571 /* Update sockets info according to the attached device */ 572 int socket_id = rte_eth_dev_socket_id(pt_id); 573 if (socket_id >= 0 && new_socket_id(socket_id)) { 574 if (num_sockets >= RTE_MAX_NUMA_NODES) { 575 rte_exit(EXIT_FAILURE, 576 "Total sockets greater than %u\n", 577 RTE_MAX_NUMA_NODES); 578 } 579 socket_ids[num_sockets++] = socket_id; 580 } 581 } 582 583 nb_cfg_ports = nb_ports; 584 nb_fwd_ports = nb_ports; 585 } 586 587 void 588 set_def_fwd_config(void) 589 { 590 set_default_fwd_lcores_config(); 591 set_def_peer_eth_addrs(); 592 set_default_fwd_ports_config(); 593 } 594 595 /* extremely pessimistic estimation of memory required to create a mempool */ 596 static int 597 calc_mem_size(uint32_t nb_mbufs, uint32_t mbuf_sz, size_t pgsz, size_t *out) 598 { 599 unsigned int n_pages, mbuf_per_pg, leftover; 600 uint64_t total_mem, mbuf_mem, obj_sz; 601 602 /* there is no good way to predict how much space the mempool will 603 * occupy because it will allocate chunks on the fly, and some of those 604 * will come from default DPDK memory while some will come from our 605 * external memory, so just assume 128MB will be enough for everyone. 606 */ 607 uint64_t hdr_mem = 128 << 20; 608 609 /* account for possible non-contiguousness */ 610 obj_sz = rte_mempool_calc_obj_size(mbuf_sz, 0, NULL); 611 if (obj_sz > pgsz) { 612 TESTPMD_LOG(ERR, "Object size is bigger than page size\n"); 613 return -1; 614 } 615 616 mbuf_per_pg = pgsz / obj_sz; 617 leftover = (nb_mbufs % mbuf_per_pg) > 0; 618 n_pages = (nb_mbufs / mbuf_per_pg) + leftover; 619 620 mbuf_mem = n_pages * pgsz; 621 622 total_mem = RTE_ALIGN(hdr_mem + mbuf_mem, pgsz); 623 624 if (total_mem > SIZE_MAX) { 625 TESTPMD_LOG(ERR, "Memory size too big\n"); 626 return -1; 627 } 628 *out = (size_t)total_mem; 629 630 return 0; 631 } 632 633 static inline uint32_t 634 bsf64(uint64_t v) 635 { 636 return (uint32_t)__builtin_ctzll(v); 637 } 638 639 static inline uint32_t 640 log2_u64(uint64_t v) 641 { 642 if (v == 0) 643 return 0; 644 v = rte_align64pow2(v); 645 return bsf64(v); 646 } 647 648 static int 649 pagesz_flags(uint64_t page_sz) 650 { 651 /* as per mmap() manpage, all page sizes are log2 of page size 652 * shifted by MAP_HUGE_SHIFT 653 */ 654 int log2 = log2_u64(page_sz); 655 656 return (log2 << HUGE_SHIFT); 657 } 658 659 static void * 660 alloc_mem(size_t memsz, size_t pgsz, bool huge) 661 { 662 void *addr; 663 int flags; 664 665 /* allocate anonymous hugepages */ 666 flags = MAP_ANONYMOUS | MAP_PRIVATE; 667 if (huge) 668 flags |= HUGE_FLAG | pagesz_flags(pgsz); 669 670 addr = mmap(NULL, memsz, PROT_READ | PROT_WRITE, flags, -1, 0); 671 if (addr == MAP_FAILED) 672 return NULL; 673 674 return addr; 675 } 676 677 struct extmem_param { 678 void *addr; 679 size_t len; 680 size_t pgsz; 681 rte_iova_t *iova_table; 682 unsigned int iova_table_len; 683 }; 684 685 static int 686 create_extmem(uint32_t nb_mbufs, uint32_t mbuf_sz, struct extmem_param *param, 687 bool huge) 688 { 689 uint64_t pgsizes[] = {RTE_PGSIZE_2M, RTE_PGSIZE_1G, /* x86_64, ARM */ 690 RTE_PGSIZE_16M, RTE_PGSIZE_16G}; /* POWER */ 691 unsigned int cur_page, n_pages, pgsz_idx; 692 size_t mem_sz, cur_pgsz; 693 rte_iova_t *iovas = NULL; 694 void *addr; 695 int ret; 696 697 for (pgsz_idx = 0; pgsz_idx < RTE_DIM(pgsizes); pgsz_idx++) { 698 /* skip anything that is too big */ 699 if (pgsizes[pgsz_idx] > SIZE_MAX) 700 continue; 701 702 cur_pgsz = pgsizes[pgsz_idx]; 703 704 /* if we were told not to allocate hugepages, override */ 705 if (!huge) 706 cur_pgsz = sysconf(_SC_PAGESIZE); 707 708 ret = calc_mem_size(nb_mbufs, mbuf_sz, cur_pgsz, &mem_sz); 709 if (ret < 0) { 710 TESTPMD_LOG(ERR, "Cannot calculate memory size\n"); 711 return -1; 712 } 713 714 /* allocate our memory */ 715 addr = alloc_mem(mem_sz, cur_pgsz, huge); 716 717 /* if we couldn't allocate memory with a specified page size, 718 * that doesn't mean we can't do it with other page sizes, so 719 * try another one. 720 */ 721 if (addr == NULL) 722 continue; 723 724 /* store IOVA addresses for every page in this memory area */ 725 n_pages = mem_sz / cur_pgsz; 726 727 iovas = malloc(sizeof(*iovas) * n_pages); 728 729 if (iovas == NULL) { 730 TESTPMD_LOG(ERR, "Cannot allocate memory for iova addresses\n"); 731 goto fail; 732 } 733 /* lock memory if it's not huge pages */ 734 if (!huge) 735 mlock(addr, mem_sz); 736 737 /* populate IOVA addresses */ 738 for (cur_page = 0; cur_page < n_pages; cur_page++) { 739 rte_iova_t iova; 740 size_t offset; 741 void *cur; 742 743 offset = cur_pgsz * cur_page; 744 cur = RTE_PTR_ADD(addr, offset); 745 746 /* touch the page before getting its IOVA */ 747 *(volatile char *)cur = 0; 748 749 iova = rte_mem_virt2iova(cur); 750 751 iovas[cur_page] = iova; 752 } 753 754 break; 755 } 756 /* if we couldn't allocate anything */ 757 if (iovas == NULL) 758 return -1; 759 760 param->addr = addr; 761 param->len = mem_sz; 762 param->pgsz = cur_pgsz; 763 param->iova_table = iovas; 764 param->iova_table_len = n_pages; 765 766 return 0; 767 fail: 768 if (iovas) 769 free(iovas); 770 if (addr) 771 munmap(addr, mem_sz); 772 773 return -1; 774 } 775 776 static int 777 setup_extmem(uint32_t nb_mbufs, uint32_t mbuf_sz, bool huge) 778 { 779 struct extmem_param param; 780 int socket_id, ret; 781 782 memset(¶m, 0, sizeof(param)); 783 784 /* check if our heap exists */ 785 socket_id = rte_malloc_heap_get_socket(EXTMEM_HEAP_NAME); 786 if (socket_id < 0) { 787 /* create our heap */ 788 ret = rte_malloc_heap_create(EXTMEM_HEAP_NAME); 789 if (ret < 0) { 790 TESTPMD_LOG(ERR, "Cannot create heap\n"); 791 return -1; 792 } 793 } 794 795 ret = create_extmem(nb_mbufs, mbuf_sz, ¶m, huge); 796 if (ret < 0) { 797 TESTPMD_LOG(ERR, "Cannot create memory area\n"); 798 return -1; 799 } 800 801 /* we now have a valid memory area, so add it to heap */ 802 ret = rte_malloc_heap_memory_add(EXTMEM_HEAP_NAME, 803 param.addr, param.len, param.iova_table, 804 param.iova_table_len, param.pgsz); 805 806 /* when using VFIO, memory is automatically mapped for DMA by EAL */ 807 808 /* not needed any more */ 809 free(param.iova_table); 810 811 if (ret < 0) { 812 TESTPMD_LOG(ERR, "Cannot add memory to heap\n"); 813 munmap(param.addr, param.len); 814 return -1; 815 } 816 817 /* success */ 818 819 TESTPMD_LOG(DEBUG, "Allocated %zuMB of external memory\n", 820 param.len >> 20); 821 822 return 0; 823 } 824 825 /* 826 * Configuration initialisation done once at init time. 827 */ 828 static void 829 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf, 830 unsigned int socket_id) 831 { 832 char pool_name[RTE_MEMPOOL_NAMESIZE]; 833 struct rte_mempool *rte_mp = NULL; 834 uint32_t mb_size; 835 836 mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size; 837 mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name)); 838 839 TESTPMD_LOG(INFO, 840 "create a new mbuf pool <%s>: n=%u, size=%u, socket=%u\n", 841 pool_name, nb_mbuf, mbuf_seg_size, socket_id); 842 843 switch (mp_alloc_type) { 844 case MP_ALLOC_NATIVE: 845 { 846 /* wrapper to rte_mempool_create() */ 847 TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n", 848 rte_mbuf_best_mempool_ops()); 849 rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf, 850 mb_mempool_cache, 0, mbuf_seg_size, socket_id); 851 break; 852 } 853 case MP_ALLOC_ANON: 854 { 855 rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf, 856 mb_size, (unsigned int) mb_mempool_cache, 857 sizeof(struct rte_pktmbuf_pool_private), 858 socket_id, 0); 859 if (rte_mp == NULL) 860 goto err; 861 862 if (rte_mempool_populate_anon(rte_mp) == 0) { 863 rte_mempool_free(rte_mp); 864 rte_mp = NULL; 865 goto err; 866 } 867 rte_pktmbuf_pool_init(rte_mp, NULL); 868 rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL); 869 break; 870 } 871 case MP_ALLOC_XMEM: 872 case MP_ALLOC_XMEM_HUGE: 873 { 874 int heap_socket; 875 bool huge = mp_alloc_type == MP_ALLOC_XMEM_HUGE; 876 877 if (setup_extmem(nb_mbuf, mbuf_seg_size, huge) < 0) 878 rte_exit(EXIT_FAILURE, "Could not create external memory\n"); 879 880 heap_socket = 881 rte_malloc_heap_get_socket(EXTMEM_HEAP_NAME); 882 if (heap_socket < 0) 883 rte_exit(EXIT_FAILURE, "Could not get external memory socket ID\n"); 884 885 TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n", 886 rte_mbuf_best_mempool_ops()); 887 rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf, 888 mb_mempool_cache, 0, mbuf_seg_size, 889 heap_socket); 890 break; 891 } 892 default: 893 { 894 rte_exit(EXIT_FAILURE, "Invalid mempool creation mode\n"); 895 } 896 } 897 898 err: 899 if (rte_mp == NULL) { 900 rte_exit(EXIT_FAILURE, 901 "Creation of mbuf pool for socket %u failed: %s\n", 902 socket_id, rte_strerror(rte_errno)); 903 } else if (verbose_level > 0) { 904 rte_mempool_dump(stdout, rte_mp); 905 } 906 } 907 908 /* 909 * Check given socket id is valid or not with NUMA mode, 910 * if valid, return 0, else return -1 911 */ 912 static int 913 check_socket_id(const unsigned int socket_id) 914 { 915 static int warning_once = 0; 916 917 if (new_socket_id(socket_id)) { 918 if (!warning_once && numa_support) 919 printf("Warning: NUMA should be configured manually by" 920 " using --port-numa-config and" 921 " --ring-numa-config parameters along with" 922 " --numa.\n"); 923 warning_once = 1; 924 return -1; 925 } 926 return 0; 927 } 928 929 /* 930 * Get the allowed maximum number of RX queues. 931 * *pid return the port id which has minimal value of 932 * max_rx_queues in all ports. 933 */ 934 queueid_t 935 get_allowed_max_nb_rxq(portid_t *pid) 936 { 937 queueid_t allowed_max_rxq = MAX_QUEUE_ID; 938 portid_t pi; 939 struct rte_eth_dev_info dev_info; 940 941 RTE_ETH_FOREACH_DEV(pi) { 942 rte_eth_dev_info_get(pi, &dev_info); 943 if (dev_info.max_rx_queues < allowed_max_rxq) { 944 allowed_max_rxq = dev_info.max_rx_queues; 945 *pid = pi; 946 } 947 } 948 return allowed_max_rxq; 949 } 950 951 /* 952 * Check input rxq is valid or not. 953 * If input rxq is not greater than any of maximum number 954 * of RX queues of all ports, it is valid. 955 * if valid, return 0, else return -1 956 */ 957 int 958 check_nb_rxq(queueid_t rxq) 959 { 960 queueid_t allowed_max_rxq; 961 portid_t pid = 0; 962 963 allowed_max_rxq = get_allowed_max_nb_rxq(&pid); 964 if (rxq > allowed_max_rxq) { 965 printf("Fail: input rxq (%u) can't be greater " 966 "than max_rx_queues (%u) of port %u\n", 967 rxq, 968 allowed_max_rxq, 969 pid); 970 return -1; 971 } 972 return 0; 973 } 974 975 /* 976 * Get the allowed maximum number of TX queues. 977 * *pid return the port id which has minimal value of 978 * max_tx_queues in all ports. 979 */ 980 queueid_t 981 get_allowed_max_nb_txq(portid_t *pid) 982 { 983 queueid_t allowed_max_txq = MAX_QUEUE_ID; 984 portid_t pi; 985 struct rte_eth_dev_info dev_info; 986 987 RTE_ETH_FOREACH_DEV(pi) { 988 rte_eth_dev_info_get(pi, &dev_info); 989 if (dev_info.max_tx_queues < allowed_max_txq) { 990 allowed_max_txq = dev_info.max_tx_queues; 991 *pid = pi; 992 } 993 } 994 return allowed_max_txq; 995 } 996 997 /* 998 * Check input txq is valid or not. 999 * If input txq is not greater than any of maximum number 1000 * of TX queues of all ports, it is valid. 1001 * if valid, return 0, else return -1 1002 */ 1003 int 1004 check_nb_txq(queueid_t txq) 1005 { 1006 queueid_t allowed_max_txq; 1007 portid_t pid = 0; 1008 1009 allowed_max_txq = get_allowed_max_nb_txq(&pid); 1010 if (txq > allowed_max_txq) { 1011 printf("Fail: input txq (%u) can't be greater " 1012 "than max_tx_queues (%u) of port %u\n", 1013 txq, 1014 allowed_max_txq, 1015 pid); 1016 return -1; 1017 } 1018 return 0; 1019 } 1020 1021 static void 1022 init_config(void) 1023 { 1024 portid_t pid; 1025 struct rte_port *port; 1026 struct rte_mempool *mbp; 1027 unsigned int nb_mbuf_per_pool; 1028 lcoreid_t lc_id; 1029 uint8_t port_per_socket[RTE_MAX_NUMA_NODES]; 1030 struct rte_gro_param gro_param; 1031 uint32_t gso_types; 1032 int k; 1033 1034 memset(port_per_socket,0,RTE_MAX_NUMA_NODES); 1035 1036 /* Configuration of logical cores. */ 1037 fwd_lcores = rte_zmalloc("testpmd: fwd_lcores", 1038 sizeof(struct fwd_lcore *) * nb_lcores, 1039 RTE_CACHE_LINE_SIZE); 1040 if (fwd_lcores == NULL) { 1041 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) " 1042 "failed\n", nb_lcores); 1043 } 1044 for (lc_id = 0; lc_id < nb_lcores; lc_id++) { 1045 fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore", 1046 sizeof(struct fwd_lcore), 1047 RTE_CACHE_LINE_SIZE); 1048 if (fwd_lcores[lc_id] == NULL) { 1049 rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) " 1050 "failed\n"); 1051 } 1052 fwd_lcores[lc_id]->cpuid_idx = lc_id; 1053 } 1054 1055 RTE_ETH_FOREACH_DEV(pid) { 1056 port = &ports[pid]; 1057 /* Apply default TxRx configuration for all ports */ 1058 port->dev_conf.txmode = tx_mode; 1059 port->dev_conf.rxmode = rx_mode; 1060 rte_eth_dev_info_get(pid, &port->dev_info); 1061 1062 if (!(port->dev_info.tx_offload_capa & 1063 DEV_TX_OFFLOAD_MBUF_FAST_FREE)) 1064 port->dev_conf.txmode.offloads &= 1065 ~DEV_TX_OFFLOAD_MBUF_FAST_FREE; 1066 if (numa_support) { 1067 if (port_numa[pid] != NUMA_NO_CONFIG) 1068 port_per_socket[port_numa[pid]]++; 1069 else { 1070 uint32_t socket_id = rte_eth_dev_socket_id(pid); 1071 1072 /* 1073 * if socket_id is invalid, 1074 * set to the first available socket. 1075 */ 1076 if (check_socket_id(socket_id) < 0) 1077 socket_id = socket_ids[0]; 1078 port_per_socket[socket_id]++; 1079 } 1080 } 1081 1082 /* Apply Rx offloads configuration */ 1083 for (k = 0; k < port->dev_info.max_rx_queues; k++) 1084 port->rx_conf[k].offloads = 1085 port->dev_conf.rxmode.offloads; 1086 /* Apply Tx offloads configuration */ 1087 for (k = 0; k < port->dev_info.max_tx_queues; k++) 1088 port->tx_conf[k].offloads = 1089 port->dev_conf.txmode.offloads; 1090 1091 /* set flag to initialize port/queue */ 1092 port->need_reconfig = 1; 1093 port->need_reconfig_queues = 1; 1094 } 1095 1096 /* 1097 * Create pools of mbuf. 1098 * If NUMA support is disabled, create a single pool of mbuf in 1099 * socket 0 memory by default. 1100 * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1. 1101 * 1102 * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and 1103 * nb_txd can be configured at run time. 1104 */ 1105 if (param_total_num_mbufs) 1106 nb_mbuf_per_pool = param_total_num_mbufs; 1107 else { 1108 nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX + 1109 (nb_lcores * mb_mempool_cache) + 1110 RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST; 1111 nb_mbuf_per_pool *= RTE_MAX_ETHPORTS; 1112 } 1113 1114 if (numa_support) { 1115 uint8_t i; 1116 1117 for (i = 0; i < num_sockets; i++) 1118 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 1119 socket_ids[i]); 1120 } else { 1121 if (socket_num == UMA_NO_CONFIG) 1122 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0); 1123 else 1124 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 1125 socket_num); 1126 } 1127 1128 init_port_config(); 1129 1130 gso_types = DEV_TX_OFFLOAD_TCP_TSO | DEV_TX_OFFLOAD_VXLAN_TNL_TSO | 1131 DEV_TX_OFFLOAD_GRE_TNL_TSO | DEV_TX_OFFLOAD_UDP_TSO; 1132 /* 1133 * Records which Mbuf pool to use by each logical core, if needed. 1134 */ 1135 for (lc_id = 0; lc_id < nb_lcores; lc_id++) { 1136 mbp = mbuf_pool_find( 1137 rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id])); 1138 1139 if (mbp == NULL) 1140 mbp = mbuf_pool_find(0); 1141 fwd_lcores[lc_id]->mbp = mbp; 1142 /* initialize GSO context */ 1143 fwd_lcores[lc_id]->gso_ctx.direct_pool = mbp; 1144 fwd_lcores[lc_id]->gso_ctx.indirect_pool = mbp; 1145 fwd_lcores[lc_id]->gso_ctx.gso_types = gso_types; 1146 fwd_lcores[lc_id]->gso_ctx.gso_size = ETHER_MAX_LEN - 1147 ETHER_CRC_LEN; 1148 fwd_lcores[lc_id]->gso_ctx.flag = 0; 1149 } 1150 1151 /* Configuration of packet forwarding streams. */ 1152 if (init_fwd_streams() < 0) 1153 rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n"); 1154 1155 fwd_config_setup(); 1156 1157 /* create a gro context for each lcore */ 1158 gro_param.gro_types = RTE_GRO_TCP_IPV4; 1159 gro_param.max_flow_num = GRO_MAX_FLUSH_CYCLES; 1160 gro_param.max_item_per_flow = MAX_PKT_BURST; 1161 for (lc_id = 0; lc_id < nb_lcores; lc_id++) { 1162 gro_param.socket_id = rte_lcore_to_socket_id( 1163 fwd_lcores_cpuids[lc_id]); 1164 fwd_lcores[lc_id]->gro_ctx = rte_gro_ctx_create(&gro_param); 1165 if (fwd_lcores[lc_id]->gro_ctx == NULL) { 1166 rte_exit(EXIT_FAILURE, 1167 "rte_gro_ctx_create() failed\n"); 1168 } 1169 } 1170 1171 #if defined RTE_LIBRTE_PMD_SOFTNIC 1172 if (strcmp(cur_fwd_eng->fwd_mode_name, "softnic") == 0) { 1173 RTE_ETH_FOREACH_DEV(pid) { 1174 port = &ports[pid]; 1175 const char *driver = port->dev_info.driver_name; 1176 1177 if (strcmp(driver, "net_softnic") == 0) 1178 port->softport.fwd_lcore_arg = fwd_lcores; 1179 } 1180 } 1181 #endif 1182 1183 } 1184 1185 1186 void 1187 reconfig(portid_t new_port_id, unsigned socket_id) 1188 { 1189 struct rte_port *port; 1190 1191 /* Reconfiguration of Ethernet ports. */ 1192 port = &ports[new_port_id]; 1193 rte_eth_dev_info_get(new_port_id, &port->dev_info); 1194 1195 /* set flag to initialize port/queue */ 1196 port->need_reconfig = 1; 1197 port->need_reconfig_queues = 1; 1198 port->socket_id = socket_id; 1199 1200 init_port_config(); 1201 } 1202 1203 1204 int 1205 init_fwd_streams(void) 1206 { 1207 portid_t pid; 1208 struct rte_port *port; 1209 streamid_t sm_id, nb_fwd_streams_new; 1210 queueid_t q; 1211 1212 /* set socket id according to numa or not */ 1213 RTE_ETH_FOREACH_DEV(pid) { 1214 port = &ports[pid]; 1215 if (nb_rxq > port->dev_info.max_rx_queues) { 1216 printf("Fail: nb_rxq(%d) is greater than " 1217 "max_rx_queues(%d)\n", nb_rxq, 1218 port->dev_info.max_rx_queues); 1219 return -1; 1220 } 1221 if (nb_txq > port->dev_info.max_tx_queues) { 1222 printf("Fail: nb_txq(%d) is greater than " 1223 "max_tx_queues(%d)\n", nb_txq, 1224 port->dev_info.max_tx_queues); 1225 return -1; 1226 } 1227 if (numa_support) { 1228 if (port_numa[pid] != NUMA_NO_CONFIG) 1229 port->socket_id = port_numa[pid]; 1230 else { 1231 port->socket_id = rte_eth_dev_socket_id(pid); 1232 1233 /* 1234 * if socket_id is invalid, 1235 * set to the first available socket. 1236 */ 1237 if (check_socket_id(port->socket_id) < 0) 1238 port->socket_id = socket_ids[0]; 1239 } 1240 } 1241 else { 1242 if (socket_num == UMA_NO_CONFIG) 1243 port->socket_id = 0; 1244 else 1245 port->socket_id = socket_num; 1246 } 1247 } 1248 1249 q = RTE_MAX(nb_rxq, nb_txq); 1250 if (q == 0) { 1251 printf("Fail: Cannot allocate fwd streams as number of queues is 0\n"); 1252 return -1; 1253 } 1254 nb_fwd_streams_new = (streamid_t)(nb_ports * q); 1255 if (nb_fwd_streams_new == nb_fwd_streams) 1256 return 0; 1257 /* clear the old */ 1258 if (fwd_streams != NULL) { 1259 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) { 1260 if (fwd_streams[sm_id] == NULL) 1261 continue; 1262 rte_free(fwd_streams[sm_id]); 1263 fwd_streams[sm_id] = NULL; 1264 } 1265 rte_free(fwd_streams); 1266 fwd_streams = NULL; 1267 } 1268 1269 /* init new */ 1270 nb_fwd_streams = nb_fwd_streams_new; 1271 if (nb_fwd_streams) { 1272 fwd_streams = rte_zmalloc("testpmd: fwd_streams", 1273 sizeof(struct fwd_stream *) * nb_fwd_streams, 1274 RTE_CACHE_LINE_SIZE); 1275 if (fwd_streams == NULL) 1276 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d" 1277 " (struct fwd_stream *)) failed\n", 1278 nb_fwd_streams); 1279 1280 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) { 1281 fwd_streams[sm_id] = rte_zmalloc("testpmd:" 1282 " struct fwd_stream", sizeof(struct fwd_stream), 1283 RTE_CACHE_LINE_SIZE); 1284 if (fwd_streams[sm_id] == NULL) 1285 rte_exit(EXIT_FAILURE, "rte_zmalloc" 1286 "(struct fwd_stream) failed\n"); 1287 } 1288 } 1289 1290 return 0; 1291 } 1292 1293 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS 1294 static void 1295 pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs) 1296 { 1297 unsigned int total_burst; 1298 unsigned int nb_burst; 1299 unsigned int burst_stats[3]; 1300 uint16_t pktnb_stats[3]; 1301 uint16_t nb_pkt; 1302 int burst_percent[3]; 1303 1304 /* 1305 * First compute the total number of packet bursts and the 1306 * two highest numbers of bursts of the same number of packets. 1307 */ 1308 total_burst = 0; 1309 burst_stats[0] = burst_stats[1] = burst_stats[2] = 0; 1310 pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0; 1311 for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) { 1312 nb_burst = pbs->pkt_burst_spread[nb_pkt]; 1313 if (nb_burst == 0) 1314 continue; 1315 total_burst += nb_burst; 1316 if (nb_burst > burst_stats[0]) { 1317 burst_stats[1] = burst_stats[0]; 1318 pktnb_stats[1] = pktnb_stats[0]; 1319 burst_stats[0] = nb_burst; 1320 pktnb_stats[0] = nb_pkt; 1321 } else if (nb_burst > burst_stats[1]) { 1322 burst_stats[1] = nb_burst; 1323 pktnb_stats[1] = nb_pkt; 1324 } 1325 } 1326 if (total_burst == 0) 1327 return; 1328 burst_percent[0] = (burst_stats[0] * 100) / total_burst; 1329 printf(" %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst, 1330 burst_percent[0], (int) pktnb_stats[0]); 1331 if (burst_stats[0] == total_burst) { 1332 printf("]\n"); 1333 return; 1334 } 1335 if (burst_stats[0] + burst_stats[1] == total_burst) { 1336 printf(" + %d%% of %d pkts]\n", 1337 100 - burst_percent[0], pktnb_stats[1]); 1338 return; 1339 } 1340 burst_percent[1] = (burst_stats[1] * 100) / total_burst; 1341 burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]); 1342 if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) { 1343 printf(" + %d%% of others]\n", 100 - burst_percent[0]); 1344 return; 1345 } 1346 printf(" + %d%% of %d pkts + %d%% of others]\n", 1347 burst_percent[1], (int) pktnb_stats[1], burst_percent[2]); 1348 } 1349 #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */ 1350 1351 static void 1352 fwd_port_stats_display(portid_t port_id, struct rte_eth_stats *stats) 1353 { 1354 struct rte_port *port; 1355 uint8_t i; 1356 1357 static const char *fwd_stats_border = "----------------------"; 1358 1359 port = &ports[port_id]; 1360 printf("\n %s Forward statistics for port %-2d %s\n", 1361 fwd_stats_border, port_id, fwd_stats_border); 1362 1363 if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) { 1364 printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: " 1365 "%-"PRIu64"\n", 1366 stats->ipackets, stats->imissed, 1367 (uint64_t) (stats->ipackets + stats->imissed)); 1368 1369 if (cur_fwd_eng == &csum_fwd_engine) 1370 printf(" Bad-ipcsum: %-14"PRIu64" Bad-l4csum: %-14"PRIu64"Bad-outer-l4csum: %-14"PRIu64"\n", 1371 port->rx_bad_ip_csum, port->rx_bad_l4_csum, 1372 port->rx_bad_outer_l4_csum); 1373 if ((stats->ierrors + stats->rx_nombuf) > 0) { 1374 printf(" RX-error: %-"PRIu64"\n", stats->ierrors); 1375 printf(" RX-nombufs: %-14"PRIu64"\n", stats->rx_nombuf); 1376 } 1377 1378 printf(" TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: " 1379 "%-"PRIu64"\n", 1380 stats->opackets, port->tx_dropped, 1381 (uint64_t) (stats->opackets + port->tx_dropped)); 1382 } 1383 else { 1384 printf(" RX-packets: %14"PRIu64" RX-dropped:%14"PRIu64" RX-total:" 1385 "%14"PRIu64"\n", 1386 stats->ipackets, stats->imissed, 1387 (uint64_t) (stats->ipackets + stats->imissed)); 1388 1389 if (cur_fwd_eng == &csum_fwd_engine) 1390 printf(" Bad-ipcsum:%14"PRIu64" Bad-l4csum:%14"PRIu64" Bad-outer-l4csum: %-14"PRIu64"\n", 1391 port->rx_bad_ip_csum, port->rx_bad_l4_csum, 1392 port->rx_bad_outer_l4_csum); 1393 if ((stats->ierrors + stats->rx_nombuf) > 0) { 1394 printf(" RX-error:%"PRIu64"\n", stats->ierrors); 1395 printf(" RX-nombufs: %14"PRIu64"\n", 1396 stats->rx_nombuf); 1397 } 1398 1399 printf(" TX-packets: %14"PRIu64" TX-dropped:%14"PRIu64" TX-total:" 1400 "%14"PRIu64"\n", 1401 stats->opackets, port->tx_dropped, 1402 (uint64_t) (stats->opackets + port->tx_dropped)); 1403 } 1404 1405 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS 1406 if (port->rx_stream) 1407 pkt_burst_stats_display("RX", 1408 &port->rx_stream->rx_burst_stats); 1409 if (port->tx_stream) 1410 pkt_burst_stats_display("TX", 1411 &port->tx_stream->tx_burst_stats); 1412 #endif 1413 1414 if (port->rx_queue_stats_mapping_enabled) { 1415 printf("\n"); 1416 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) { 1417 printf(" Stats reg %2d RX-packets:%14"PRIu64 1418 " RX-errors:%14"PRIu64 1419 " RX-bytes:%14"PRIu64"\n", 1420 i, stats->q_ipackets[i], stats->q_errors[i], stats->q_ibytes[i]); 1421 } 1422 printf("\n"); 1423 } 1424 if (port->tx_queue_stats_mapping_enabled) { 1425 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) { 1426 printf(" Stats reg %2d TX-packets:%14"PRIu64 1427 " TX-bytes:%14"PRIu64"\n", 1428 i, stats->q_opackets[i], stats->q_obytes[i]); 1429 } 1430 } 1431 1432 printf(" %s--------------------------------%s\n", 1433 fwd_stats_border, fwd_stats_border); 1434 } 1435 1436 static void 1437 fwd_stream_stats_display(streamid_t stream_id) 1438 { 1439 struct fwd_stream *fs; 1440 static const char *fwd_top_stats_border = "-------"; 1441 1442 fs = fwd_streams[stream_id]; 1443 if ((fs->rx_packets == 0) && (fs->tx_packets == 0) && 1444 (fs->fwd_dropped == 0)) 1445 return; 1446 printf("\n %s Forward Stats for RX Port=%2d/Queue=%2d -> " 1447 "TX Port=%2d/Queue=%2d %s\n", 1448 fwd_top_stats_border, fs->rx_port, fs->rx_queue, 1449 fs->tx_port, fs->tx_queue, fwd_top_stats_border); 1450 printf(" RX-packets: %-14u TX-packets: %-14u TX-dropped: %-14u", 1451 fs->rx_packets, fs->tx_packets, fs->fwd_dropped); 1452 1453 /* if checksum mode */ 1454 if (cur_fwd_eng == &csum_fwd_engine) { 1455 printf(" RX- bad IP checksum: %-14u Rx- bad L4 checksum: " 1456 "%-14u Rx- bad outer L4 checksum: %-14u\n", 1457 fs->rx_bad_ip_csum, fs->rx_bad_l4_csum, 1458 fs->rx_bad_outer_l4_csum); 1459 } 1460 1461 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS 1462 pkt_burst_stats_display("RX", &fs->rx_burst_stats); 1463 pkt_burst_stats_display("TX", &fs->tx_burst_stats); 1464 #endif 1465 } 1466 1467 static void 1468 flush_fwd_rx_queues(void) 1469 { 1470 struct rte_mbuf *pkts_burst[MAX_PKT_BURST]; 1471 portid_t rxp; 1472 portid_t port_id; 1473 queueid_t rxq; 1474 uint16_t nb_rx; 1475 uint16_t i; 1476 uint8_t j; 1477 uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0; 1478 uint64_t timer_period; 1479 1480 /* convert to number of cycles */ 1481 timer_period = rte_get_timer_hz(); /* 1 second timeout */ 1482 1483 for (j = 0; j < 2; j++) { 1484 for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) { 1485 for (rxq = 0; rxq < nb_rxq; rxq++) { 1486 port_id = fwd_ports_ids[rxp]; 1487 /** 1488 * testpmd can stuck in the below do while loop 1489 * if rte_eth_rx_burst() always returns nonzero 1490 * packets. So timer is added to exit this loop 1491 * after 1sec timer expiry. 1492 */ 1493 prev_tsc = rte_rdtsc(); 1494 do { 1495 nb_rx = rte_eth_rx_burst(port_id, rxq, 1496 pkts_burst, MAX_PKT_BURST); 1497 for (i = 0; i < nb_rx; i++) 1498 rte_pktmbuf_free(pkts_burst[i]); 1499 1500 cur_tsc = rte_rdtsc(); 1501 diff_tsc = cur_tsc - prev_tsc; 1502 timer_tsc += diff_tsc; 1503 } while ((nb_rx > 0) && 1504 (timer_tsc < timer_period)); 1505 timer_tsc = 0; 1506 } 1507 } 1508 rte_delay_ms(10); /* wait 10 milli-seconds before retrying */ 1509 } 1510 } 1511 1512 static void 1513 run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd) 1514 { 1515 struct fwd_stream **fsm; 1516 streamid_t nb_fs; 1517 streamid_t sm_id; 1518 #ifdef RTE_LIBRTE_BITRATE 1519 uint64_t tics_per_1sec; 1520 uint64_t tics_datum; 1521 uint64_t tics_current; 1522 uint16_t i, cnt_ports; 1523 1524 cnt_ports = nb_ports; 1525 tics_datum = rte_rdtsc(); 1526 tics_per_1sec = rte_get_timer_hz(); 1527 #endif 1528 fsm = &fwd_streams[fc->stream_idx]; 1529 nb_fs = fc->stream_nb; 1530 do { 1531 for (sm_id = 0; sm_id < nb_fs; sm_id++) 1532 (*pkt_fwd)(fsm[sm_id]); 1533 #ifdef RTE_LIBRTE_BITRATE 1534 if (bitrate_enabled != 0 && 1535 bitrate_lcore_id == rte_lcore_id()) { 1536 tics_current = rte_rdtsc(); 1537 if (tics_current - tics_datum >= tics_per_1sec) { 1538 /* Periodic bitrate calculation */ 1539 for (i = 0; i < cnt_ports; i++) 1540 rte_stats_bitrate_calc(bitrate_data, 1541 ports_ids[i]); 1542 tics_datum = tics_current; 1543 } 1544 } 1545 #endif 1546 #ifdef RTE_LIBRTE_LATENCY_STATS 1547 if (latencystats_enabled != 0 && 1548 latencystats_lcore_id == rte_lcore_id()) 1549 rte_latencystats_update(); 1550 #endif 1551 1552 } while (! fc->stopped); 1553 } 1554 1555 static int 1556 start_pkt_forward_on_core(void *fwd_arg) 1557 { 1558 run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg, 1559 cur_fwd_config.fwd_eng->packet_fwd); 1560 return 0; 1561 } 1562 1563 /* 1564 * Run the TXONLY packet forwarding engine to send a single burst of packets. 1565 * Used to start communication flows in network loopback test configurations. 1566 */ 1567 static int 1568 run_one_txonly_burst_on_core(void *fwd_arg) 1569 { 1570 struct fwd_lcore *fwd_lc; 1571 struct fwd_lcore tmp_lcore; 1572 1573 fwd_lc = (struct fwd_lcore *) fwd_arg; 1574 tmp_lcore = *fwd_lc; 1575 tmp_lcore.stopped = 1; 1576 run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd); 1577 return 0; 1578 } 1579 1580 /* 1581 * Launch packet forwarding: 1582 * - Setup per-port forwarding context. 1583 * - launch logical cores with their forwarding configuration. 1584 */ 1585 static void 1586 launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore) 1587 { 1588 port_fwd_begin_t port_fwd_begin; 1589 unsigned int i; 1590 unsigned int lc_id; 1591 int diag; 1592 1593 port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin; 1594 if (port_fwd_begin != NULL) { 1595 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) 1596 (*port_fwd_begin)(fwd_ports_ids[i]); 1597 } 1598 for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) { 1599 lc_id = fwd_lcores_cpuids[i]; 1600 if ((interactive == 0) || (lc_id != rte_lcore_id())) { 1601 fwd_lcores[i]->stopped = 0; 1602 diag = rte_eal_remote_launch(pkt_fwd_on_lcore, 1603 fwd_lcores[i], lc_id); 1604 if (diag != 0) 1605 printf("launch lcore %u failed - diag=%d\n", 1606 lc_id, diag); 1607 } 1608 } 1609 } 1610 1611 /* 1612 * Update the forward ports list. 1613 */ 1614 void 1615 update_fwd_ports(portid_t new_pid) 1616 { 1617 unsigned int i; 1618 unsigned int new_nb_fwd_ports = 0; 1619 int move = 0; 1620 1621 for (i = 0; i < nb_fwd_ports; ++i) { 1622 if (port_id_is_invalid(fwd_ports_ids[i], DISABLED_WARN)) 1623 move = 1; 1624 else if (move) 1625 fwd_ports_ids[new_nb_fwd_ports++] = fwd_ports_ids[i]; 1626 else 1627 new_nb_fwd_ports++; 1628 } 1629 if (new_pid < RTE_MAX_ETHPORTS) 1630 fwd_ports_ids[new_nb_fwd_ports++] = new_pid; 1631 1632 nb_fwd_ports = new_nb_fwd_ports; 1633 nb_cfg_ports = new_nb_fwd_ports; 1634 } 1635 1636 /* 1637 * Launch packet forwarding configuration. 1638 */ 1639 void 1640 start_packet_forwarding(int with_tx_first) 1641 { 1642 port_fwd_begin_t port_fwd_begin; 1643 port_fwd_end_t port_fwd_end; 1644 struct rte_port *port; 1645 unsigned int i; 1646 portid_t pt_id; 1647 streamid_t sm_id; 1648 1649 if (strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") == 0 && !nb_rxq) 1650 rte_exit(EXIT_FAILURE, "rxq are 0, cannot use rxonly fwd mode\n"); 1651 1652 if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 && !nb_txq) 1653 rte_exit(EXIT_FAILURE, "txq are 0, cannot use txonly fwd mode\n"); 1654 1655 if ((strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") != 0 && 1656 strcmp(cur_fwd_eng->fwd_mode_name, "txonly") != 0) && 1657 (!nb_rxq || !nb_txq)) 1658 rte_exit(EXIT_FAILURE, 1659 "Either rxq or txq are 0, cannot use %s fwd mode\n", 1660 cur_fwd_eng->fwd_mode_name); 1661 1662 if (all_ports_started() == 0) { 1663 printf("Not all ports were started\n"); 1664 return; 1665 } 1666 if (test_done == 0) { 1667 printf("Packet forwarding already started\n"); 1668 return; 1669 } 1670 1671 1672 if(dcb_test) { 1673 for (i = 0; i < nb_fwd_ports; i++) { 1674 pt_id = fwd_ports_ids[i]; 1675 port = &ports[pt_id]; 1676 if (!port->dcb_flag) { 1677 printf("In DCB mode, all forwarding ports must " 1678 "be configured in this mode.\n"); 1679 return; 1680 } 1681 } 1682 if (nb_fwd_lcores == 1) { 1683 printf("In DCB mode,the nb forwarding cores " 1684 "should be larger than 1.\n"); 1685 return; 1686 } 1687 } 1688 test_done = 0; 1689 1690 fwd_config_setup(); 1691 1692 if(!no_flush_rx) 1693 flush_fwd_rx_queues(); 1694 1695 pkt_fwd_config_display(&cur_fwd_config); 1696 rxtx_config_display(); 1697 1698 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) { 1699 pt_id = fwd_ports_ids[i]; 1700 port = &ports[pt_id]; 1701 rte_eth_stats_get(pt_id, &port->stats); 1702 port->tx_dropped = 0; 1703 1704 map_port_queue_stats_mapping_registers(pt_id, port); 1705 } 1706 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) { 1707 fwd_streams[sm_id]->rx_packets = 0; 1708 fwd_streams[sm_id]->tx_packets = 0; 1709 fwd_streams[sm_id]->fwd_dropped = 0; 1710 fwd_streams[sm_id]->rx_bad_ip_csum = 0; 1711 fwd_streams[sm_id]->rx_bad_l4_csum = 0; 1712 fwd_streams[sm_id]->rx_bad_outer_l4_csum = 0; 1713 1714 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS 1715 memset(&fwd_streams[sm_id]->rx_burst_stats, 0, 1716 sizeof(fwd_streams[sm_id]->rx_burst_stats)); 1717 memset(&fwd_streams[sm_id]->tx_burst_stats, 0, 1718 sizeof(fwd_streams[sm_id]->tx_burst_stats)); 1719 #endif 1720 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES 1721 fwd_streams[sm_id]->core_cycles = 0; 1722 #endif 1723 } 1724 if (with_tx_first) { 1725 port_fwd_begin = tx_only_engine.port_fwd_begin; 1726 if (port_fwd_begin != NULL) { 1727 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) 1728 (*port_fwd_begin)(fwd_ports_ids[i]); 1729 } 1730 while (with_tx_first--) { 1731 launch_packet_forwarding( 1732 run_one_txonly_burst_on_core); 1733 rte_eal_mp_wait_lcore(); 1734 } 1735 port_fwd_end = tx_only_engine.port_fwd_end; 1736 if (port_fwd_end != NULL) { 1737 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) 1738 (*port_fwd_end)(fwd_ports_ids[i]); 1739 } 1740 } 1741 launch_packet_forwarding(start_pkt_forward_on_core); 1742 } 1743 1744 void 1745 stop_packet_forwarding(void) 1746 { 1747 struct rte_eth_stats stats; 1748 struct rte_port *port; 1749 port_fwd_end_t port_fwd_end; 1750 int i; 1751 portid_t pt_id; 1752 streamid_t sm_id; 1753 lcoreid_t lc_id; 1754 uint64_t total_recv; 1755 uint64_t total_xmit; 1756 uint64_t total_rx_dropped; 1757 uint64_t total_tx_dropped; 1758 uint64_t total_rx_nombuf; 1759 uint64_t tx_dropped; 1760 uint64_t rx_bad_ip_csum; 1761 uint64_t rx_bad_l4_csum; 1762 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES 1763 uint64_t fwd_cycles; 1764 #endif 1765 1766 static const char *acc_stats_border = "+++++++++++++++"; 1767 1768 if (test_done) { 1769 printf("Packet forwarding not started\n"); 1770 return; 1771 } 1772 printf("Telling cores to stop..."); 1773 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) 1774 fwd_lcores[lc_id]->stopped = 1; 1775 printf("\nWaiting for lcores to finish...\n"); 1776 rte_eal_mp_wait_lcore(); 1777 port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end; 1778 if (port_fwd_end != NULL) { 1779 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) { 1780 pt_id = fwd_ports_ids[i]; 1781 (*port_fwd_end)(pt_id); 1782 } 1783 } 1784 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES 1785 fwd_cycles = 0; 1786 #endif 1787 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) { 1788 if (cur_fwd_config.nb_fwd_streams > 1789 cur_fwd_config.nb_fwd_ports) { 1790 fwd_stream_stats_display(sm_id); 1791 ports[fwd_streams[sm_id]->tx_port].tx_stream = NULL; 1792 ports[fwd_streams[sm_id]->rx_port].rx_stream = NULL; 1793 } else { 1794 ports[fwd_streams[sm_id]->tx_port].tx_stream = 1795 fwd_streams[sm_id]; 1796 ports[fwd_streams[sm_id]->rx_port].rx_stream = 1797 fwd_streams[sm_id]; 1798 } 1799 tx_dropped = ports[fwd_streams[sm_id]->tx_port].tx_dropped; 1800 tx_dropped = (uint64_t) (tx_dropped + 1801 fwd_streams[sm_id]->fwd_dropped); 1802 ports[fwd_streams[sm_id]->tx_port].tx_dropped = tx_dropped; 1803 1804 rx_bad_ip_csum = 1805 ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum; 1806 rx_bad_ip_csum = (uint64_t) (rx_bad_ip_csum + 1807 fwd_streams[sm_id]->rx_bad_ip_csum); 1808 ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum = 1809 rx_bad_ip_csum; 1810 1811 rx_bad_l4_csum = 1812 ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum; 1813 rx_bad_l4_csum = (uint64_t) (rx_bad_l4_csum + 1814 fwd_streams[sm_id]->rx_bad_l4_csum); 1815 ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum = 1816 rx_bad_l4_csum; 1817 1818 ports[fwd_streams[sm_id]->rx_port].rx_bad_outer_l4_csum += 1819 fwd_streams[sm_id]->rx_bad_outer_l4_csum; 1820 1821 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES 1822 fwd_cycles = (uint64_t) (fwd_cycles + 1823 fwd_streams[sm_id]->core_cycles); 1824 #endif 1825 } 1826 total_recv = 0; 1827 total_xmit = 0; 1828 total_rx_dropped = 0; 1829 total_tx_dropped = 0; 1830 total_rx_nombuf = 0; 1831 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) { 1832 pt_id = fwd_ports_ids[i]; 1833 1834 port = &ports[pt_id]; 1835 rte_eth_stats_get(pt_id, &stats); 1836 stats.ipackets -= port->stats.ipackets; 1837 port->stats.ipackets = 0; 1838 stats.opackets -= port->stats.opackets; 1839 port->stats.opackets = 0; 1840 stats.ibytes -= port->stats.ibytes; 1841 port->stats.ibytes = 0; 1842 stats.obytes -= port->stats.obytes; 1843 port->stats.obytes = 0; 1844 stats.imissed -= port->stats.imissed; 1845 port->stats.imissed = 0; 1846 stats.oerrors -= port->stats.oerrors; 1847 port->stats.oerrors = 0; 1848 stats.rx_nombuf -= port->stats.rx_nombuf; 1849 port->stats.rx_nombuf = 0; 1850 1851 total_recv += stats.ipackets; 1852 total_xmit += stats.opackets; 1853 total_rx_dropped += stats.imissed; 1854 total_tx_dropped += port->tx_dropped; 1855 total_rx_nombuf += stats.rx_nombuf; 1856 1857 fwd_port_stats_display(pt_id, &stats); 1858 } 1859 1860 printf("\n %s Accumulated forward statistics for all ports" 1861 "%s\n", 1862 acc_stats_border, acc_stats_border); 1863 printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: " 1864 "%-"PRIu64"\n" 1865 " TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: " 1866 "%-"PRIu64"\n", 1867 total_recv, total_rx_dropped, total_recv + total_rx_dropped, 1868 total_xmit, total_tx_dropped, total_xmit + total_tx_dropped); 1869 if (total_rx_nombuf > 0) 1870 printf(" RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf); 1871 printf(" %s++++++++++++++++++++++++++++++++++++++++++++++" 1872 "%s\n", 1873 acc_stats_border, acc_stats_border); 1874 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES 1875 if (total_recv > 0) 1876 printf("\n CPU cycles/packet=%u (total cycles=" 1877 "%"PRIu64" / total RX packets=%"PRIu64")\n", 1878 (unsigned int)(fwd_cycles / total_recv), 1879 fwd_cycles, total_recv); 1880 #endif 1881 printf("\nDone.\n"); 1882 test_done = 1; 1883 } 1884 1885 void 1886 dev_set_link_up(portid_t pid) 1887 { 1888 if (rte_eth_dev_set_link_up(pid) < 0) 1889 printf("\nSet link up fail.\n"); 1890 } 1891 1892 void 1893 dev_set_link_down(portid_t pid) 1894 { 1895 if (rte_eth_dev_set_link_down(pid) < 0) 1896 printf("\nSet link down fail.\n"); 1897 } 1898 1899 static int 1900 all_ports_started(void) 1901 { 1902 portid_t pi; 1903 struct rte_port *port; 1904 1905 RTE_ETH_FOREACH_DEV(pi) { 1906 port = &ports[pi]; 1907 /* Check if there is a port which is not started */ 1908 if ((port->port_status != RTE_PORT_STARTED) && 1909 (port->slave_flag == 0)) 1910 return 0; 1911 } 1912 1913 /* No port is not started */ 1914 return 1; 1915 } 1916 1917 int 1918 port_is_stopped(portid_t port_id) 1919 { 1920 struct rte_port *port = &ports[port_id]; 1921 1922 if ((port->port_status != RTE_PORT_STOPPED) && 1923 (port->slave_flag == 0)) 1924 return 0; 1925 return 1; 1926 } 1927 1928 int 1929 all_ports_stopped(void) 1930 { 1931 portid_t pi; 1932 1933 RTE_ETH_FOREACH_DEV(pi) { 1934 if (!port_is_stopped(pi)) 1935 return 0; 1936 } 1937 1938 return 1; 1939 } 1940 1941 int 1942 port_is_started(portid_t port_id) 1943 { 1944 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1945 return 0; 1946 1947 if (ports[port_id].port_status != RTE_PORT_STARTED) 1948 return 0; 1949 1950 return 1; 1951 } 1952 1953 static int 1954 port_is_closed(portid_t port_id) 1955 { 1956 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1957 return 0; 1958 1959 if (ports[port_id].port_status != RTE_PORT_CLOSED) 1960 return 0; 1961 1962 return 1; 1963 } 1964 1965 int 1966 start_port(portid_t pid) 1967 { 1968 int diag, need_check_link_status = -1; 1969 portid_t pi; 1970 queueid_t qi; 1971 struct rte_port *port; 1972 struct ether_addr mac_addr; 1973 enum rte_eth_event_type event_type; 1974 1975 if (port_id_is_invalid(pid, ENABLED_WARN)) 1976 return 0; 1977 1978 if(dcb_config) 1979 dcb_test = 1; 1980 RTE_ETH_FOREACH_DEV(pi) { 1981 if (pid != pi && pid != (portid_t)RTE_PORT_ALL) 1982 continue; 1983 1984 need_check_link_status = 0; 1985 port = &ports[pi]; 1986 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED, 1987 RTE_PORT_HANDLING) == 0) { 1988 printf("Port %d is now not stopped\n", pi); 1989 continue; 1990 } 1991 1992 if (port->need_reconfig > 0) { 1993 port->need_reconfig = 0; 1994 1995 if (flow_isolate_all) { 1996 int ret = port_flow_isolate(pi, 1); 1997 if (ret) { 1998 printf("Failed to apply isolated" 1999 " mode on port %d\n", pi); 2000 return -1; 2001 } 2002 } 2003 configure_rxtx_dump_callbacks(0); 2004 printf("Configuring Port %d (socket %u)\n", pi, 2005 port->socket_id); 2006 /* configure port */ 2007 diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq, 2008 &(port->dev_conf)); 2009 if (diag != 0) { 2010 if (rte_atomic16_cmpset(&(port->port_status), 2011 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0) 2012 printf("Port %d can not be set back " 2013 "to stopped\n", pi); 2014 printf("Fail to configure port %d\n", pi); 2015 /* try to reconfigure port next time */ 2016 port->need_reconfig = 1; 2017 return -1; 2018 } 2019 } 2020 if (port->need_reconfig_queues > 0) { 2021 port->need_reconfig_queues = 0; 2022 /* setup tx queues */ 2023 for (qi = 0; qi < nb_txq; qi++) { 2024 if ((numa_support) && 2025 (txring_numa[pi] != NUMA_NO_CONFIG)) 2026 diag = rte_eth_tx_queue_setup(pi, qi, 2027 port->nb_tx_desc[qi], 2028 txring_numa[pi], 2029 &(port->tx_conf[qi])); 2030 else 2031 diag = rte_eth_tx_queue_setup(pi, qi, 2032 port->nb_tx_desc[qi], 2033 port->socket_id, 2034 &(port->tx_conf[qi])); 2035 2036 if (diag == 0) 2037 continue; 2038 2039 /* Fail to setup tx queue, return */ 2040 if (rte_atomic16_cmpset(&(port->port_status), 2041 RTE_PORT_HANDLING, 2042 RTE_PORT_STOPPED) == 0) 2043 printf("Port %d can not be set back " 2044 "to stopped\n", pi); 2045 printf("Fail to configure port %d tx queues\n", 2046 pi); 2047 /* try to reconfigure queues next time */ 2048 port->need_reconfig_queues = 1; 2049 return -1; 2050 } 2051 for (qi = 0; qi < nb_rxq; qi++) { 2052 /* setup rx queues */ 2053 if ((numa_support) && 2054 (rxring_numa[pi] != NUMA_NO_CONFIG)) { 2055 struct rte_mempool * mp = 2056 mbuf_pool_find(rxring_numa[pi]); 2057 if (mp == NULL) { 2058 printf("Failed to setup RX queue:" 2059 "No mempool allocation" 2060 " on the socket %d\n", 2061 rxring_numa[pi]); 2062 return -1; 2063 } 2064 2065 diag = rte_eth_rx_queue_setup(pi, qi, 2066 port->nb_rx_desc[qi], 2067 rxring_numa[pi], 2068 &(port->rx_conf[qi]), 2069 mp); 2070 } else { 2071 struct rte_mempool *mp = 2072 mbuf_pool_find(port->socket_id); 2073 if (mp == NULL) { 2074 printf("Failed to setup RX queue:" 2075 "No mempool allocation" 2076 " on the socket %d\n", 2077 port->socket_id); 2078 return -1; 2079 } 2080 diag = rte_eth_rx_queue_setup(pi, qi, 2081 port->nb_rx_desc[qi], 2082 port->socket_id, 2083 &(port->rx_conf[qi]), 2084 mp); 2085 } 2086 if (diag == 0) 2087 continue; 2088 2089 /* Fail to setup rx queue, return */ 2090 if (rte_atomic16_cmpset(&(port->port_status), 2091 RTE_PORT_HANDLING, 2092 RTE_PORT_STOPPED) == 0) 2093 printf("Port %d can not be set back " 2094 "to stopped\n", pi); 2095 printf("Fail to configure port %d rx queues\n", 2096 pi); 2097 /* try to reconfigure queues next time */ 2098 port->need_reconfig_queues = 1; 2099 return -1; 2100 } 2101 } 2102 configure_rxtx_dump_callbacks(verbose_level); 2103 /* start port */ 2104 if (rte_eth_dev_start(pi) < 0) { 2105 printf("Fail to start port %d\n", pi); 2106 2107 /* Fail to setup rx queue, return */ 2108 if (rte_atomic16_cmpset(&(port->port_status), 2109 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0) 2110 printf("Port %d can not be set back to " 2111 "stopped\n", pi); 2112 continue; 2113 } 2114 2115 if (rte_atomic16_cmpset(&(port->port_status), 2116 RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0) 2117 printf("Port %d can not be set into started\n", pi); 2118 2119 rte_eth_macaddr_get(pi, &mac_addr); 2120 printf("Port %d: %02X:%02X:%02X:%02X:%02X:%02X\n", pi, 2121 mac_addr.addr_bytes[0], mac_addr.addr_bytes[1], 2122 mac_addr.addr_bytes[2], mac_addr.addr_bytes[3], 2123 mac_addr.addr_bytes[4], mac_addr.addr_bytes[5]); 2124 2125 /* at least one port started, need checking link status */ 2126 need_check_link_status = 1; 2127 } 2128 2129 for (event_type = RTE_ETH_EVENT_UNKNOWN; 2130 event_type < RTE_ETH_EVENT_MAX; 2131 event_type++) { 2132 diag = rte_eth_dev_callback_register(RTE_ETH_ALL, 2133 event_type, 2134 eth_event_callback, 2135 NULL); 2136 if (diag) { 2137 printf("Failed to setup even callback for event %d\n", 2138 event_type); 2139 return -1; 2140 } 2141 } 2142 2143 if (need_check_link_status == 1 && !no_link_check) 2144 check_all_ports_link_status(RTE_PORT_ALL); 2145 else if (need_check_link_status == 0) 2146 printf("Please stop the ports first\n"); 2147 2148 printf("Done\n"); 2149 return 0; 2150 } 2151 2152 void 2153 stop_port(portid_t pid) 2154 { 2155 portid_t pi; 2156 struct rte_port *port; 2157 int need_check_link_status = 0; 2158 2159 if (dcb_test) { 2160 dcb_test = 0; 2161 dcb_config = 0; 2162 } 2163 2164 if (port_id_is_invalid(pid, ENABLED_WARN)) 2165 return; 2166 2167 printf("Stopping ports...\n"); 2168 2169 RTE_ETH_FOREACH_DEV(pi) { 2170 if (pid != pi && pid != (portid_t)RTE_PORT_ALL) 2171 continue; 2172 2173 if (port_is_forwarding(pi) != 0 && test_done == 0) { 2174 printf("Please remove port %d from forwarding configuration.\n", pi); 2175 continue; 2176 } 2177 2178 if (port_is_bonding_slave(pi)) { 2179 printf("Please remove port %d from bonded device.\n", pi); 2180 continue; 2181 } 2182 2183 port = &ports[pi]; 2184 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED, 2185 RTE_PORT_HANDLING) == 0) 2186 continue; 2187 2188 rte_eth_dev_stop(pi); 2189 2190 if (rte_atomic16_cmpset(&(port->port_status), 2191 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0) 2192 printf("Port %d can not be set into stopped\n", pi); 2193 need_check_link_status = 1; 2194 } 2195 if (need_check_link_status && !no_link_check) 2196 check_all_ports_link_status(RTE_PORT_ALL); 2197 2198 printf("Done\n"); 2199 } 2200 2201 void 2202 close_port(portid_t pid) 2203 { 2204 portid_t pi; 2205 struct rte_port *port; 2206 2207 if (port_id_is_invalid(pid, ENABLED_WARN)) 2208 return; 2209 2210 printf("Closing ports...\n"); 2211 2212 RTE_ETH_FOREACH_DEV(pi) { 2213 if (pid != pi && pid != (portid_t)RTE_PORT_ALL) 2214 continue; 2215 2216 if (port_is_forwarding(pi) != 0 && test_done == 0) { 2217 printf("Please remove port %d from forwarding configuration.\n", pi); 2218 continue; 2219 } 2220 2221 if (port_is_bonding_slave(pi)) { 2222 printf("Please remove port %d from bonded device.\n", pi); 2223 continue; 2224 } 2225 2226 port = &ports[pi]; 2227 if (rte_atomic16_cmpset(&(port->port_status), 2228 RTE_PORT_CLOSED, RTE_PORT_CLOSED) == 1) { 2229 printf("Port %d is already closed\n", pi); 2230 continue; 2231 } 2232 2233 if (rte_atomic16_cmpset(&(port->port_status), 2234 RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) { 2235 printf("Port %d is now not stopped\n", pi); 2236 continue; 2237 } 2238 2239 if (port->flow_list) 2240 port_flow_flush(pi); 2241 rte_eth_dev_close(pi); 2242 2243 if (rte_atomic16_cmpset(&(port->port_status), 2244 RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0) 2245 printf("Port %d cannot be set to closed\n", pi); 2246 } 2247 2248 printf("Done\n"); 2249 } 2250 2251 void 2252 reset_port(portid_t pid) 2253 { 2254 int diag; 2255 portid_t pi; 2256 struct rte_port *port; 2257 2258 if (port_id_is_invalid(pid, ENABLED_WARN)) 2259 return; 2260 2261 printf("Resetting ports...\n"); 2262 2263 RTE_ETH_FOREACH_DEV(pi) { 2264 if (pid != pi && pid != (portid_t)RTE_PORT_ALL) 2265 continue; 2266 2267 if (port_is_forwarding(pi) != 0 && test_done == 0) { 2268 printf("Please remove port %d from forwarding " 2269 "configuration.\n", pi); 2270 continue; 2271 } 2272 2273 if (port_is_bonding_slave(pi)) { 2274 printf("Please remove port %d from bonded device.\n", 2275 pi); 2276 continue; 2277 } 2278 2279 diag = rte_eth_dev_reset(pi); 2280 if (diag == 0) { 2281 port = &ports[pi]; 2282 port->need_reconfig = 1; 2283 port->need_reconfig_queues = 1; 2284 } else { 2285 printf("Failed to reset port %d. diag=%d\n", pi, diag); 2286 } 2287 } 2288 2289 printf("Done\n"); 2290 } 2291 2292 void 2293 attach_port(char *identifier) 2294 { 2295 portid_t pi = 0; 2296 unsigned int socket_id; 2297 2298 printf("Attaching a new port...\n"); 2299 2300 if (identifier == NULL) { 2301 printf("Invalid parameters are specified\n"); 2302 return; 2303 } 2304 2305 if (rte_eth_dev_attach(identifier, &pi)) 2306 return; 2307 2308 socket_id = (unsigned)rte_eth_dev_socket_id(pi); 2309 /* if socket_id is invalid, set to the first available socket. */ 2310 if (check_socket_id(socket_id) < 0) 2311 socket_id = socket_ids[0]; 2312 reconfig(pi, socket_id); 2313 rte_eth_promiscuous_enable(pi); 2314 2315 ports_ids[nb_ports] = pi; 2316 nb_ports = rte_eth_dev_count_avail(); 2317 2318 ports[pi].port_status = RTE_PORT_STOPPED; 2319 2320 update_fwd_ports(pi); 2321 2322 printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports); 2323 printf("Done\n"); 2324 } 2325 2326 void 2327 detach_port(portid_t port_id) 2328 { 2329 char name[RTE_ETH_NAME_MAX_LEN]; 2330 uint16_t i; 2331 2332 printf("Detaching a port...\n"); 2333 2334 if (!port_is_closed(port_id)) { 2335 printf("Please close port first\n"); 2336 return; 2337 } 2338 2339 if (ports[port_id].flow_list) 2340 port_flow_flush(port_id); 2341 2342 if (rte_eth_dev_detach(port_id, name)) { 2343 TESTPMD_LOG(ERR, "Failed to detach port %u\n", port_id); 2344 return; 2345 } 2346 2347 for (i = 0; i < nb_ports; i++) { 2348 if (ports_ids[i] == port_id) { 2349 ports_ids[i] = ports_ids[nb_ports-1]; 2350 ports_ids[nb_ports-1] = 0; 2351 break; 2352 } 2353 } 2354 nb_ports = rte_eth_dev_count_avail(); 2355 2356 update_fwd_ports(RTE_MAX_ETHPORTS); 2357 2358 printf("Port %u is detached. Now total ports is %d\n", 2359 port_id, nb_ports); 2360 printf("Done\n"); 2361 return; 2362 } 2363 2364 void 2365 pmd_test_exit(void) 2366 { 2367 struct rte_device *device; 2368 portid_t pt_id; 2369 int ret; 2370 2371 if (test_done == 0) 2372 stop_packet_forwarding(); 2373 2374 if (ports != NULL) { 2375 no_link_check = 1; 2376 RTE_ETH_FOREACH_DEV(pt_id) { 2377 printf("\nShutting down port %d...\n", pt_id); 2378 fflush(stdout); 2379 stop_port(pt_id); 2380 close_port(pt_id); 2381 2382 /* 2383 * This is a workaround to fix a virtio-user issue that 2384 * requires to call clean-up routine to remove existing 2385 * socket. 2386 * This workaround valid only for testpmd, needs a fix 2387 * valid for all applications. 2388 * TODO: Implement proper resource cleanup 2389 */ 2390 device = rte_eth_devices[pt_id].device; 2391 if (device && !strcmp(device->driver->name, "net_virtio_user")) 2392 detach_port(pt_id); 2393 } 2394 } 2395 2396 if (hot_plug) { 2397 ret = rte_dev_event_monitor_stop(); 2398 if (ret) { 2399 RTE_LOG(ERR, EAL, 2400 "fail to stop device event monitor."); 2401 return; 2402 } 2403 2404 ret = rte_dev_event_callback_unregister(NULL, 2405 eth_dev_event_callback, NULL); 2406 if (ret < 0) { 2407 RTE_LOG(ERR, EAL, 2408 "fail to unregister device event callback.\n"); 2409 return; 2410 } 2411 2412 ret = rte_dev_hotplug_handle_disable(); 2413 if (ret) { 2414 RTE_LOG(ERR, EAL, 2415 "fail to disable hotplug handling.\n"); 2416 return; 2417 } 2418 } 2419 2420 printf("\nBye...\n"); 2421 } 2422 2423 typedef void (*cmd_func_t)(void); 2424 struct pmd_test_command { 2425 const char *cmd_name; 2426 cmd_func_t cmd_func; 2427 }; 2428 2429 #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0])) 2430 2431 /* Check the link status of all ports in up to 9s, and print them finally */ 2432 static void 2433 check_all_ports_link_status(uint32_t port_mask) 2434 { 2435 #define CHECK_INTERVAL 100 /* 100ms */ 2436 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */ 2437 portid_t portid; 2438 uint8_t count, all_ports_up, print_flag = 0; 2439 struct rte_eth_link link; 2440 2441 printf("Checking link statuses...\n"); 2442 fflush(stdout); 2443 for (count = 0; count <= MAX_CHECK_TIME; count++) { 2444 all_ports_up = 1; 2445 RTE_ETH_FOREACH_DEV(portid) { 2446 if ((port_mask & (1 << portid)) == 0) 2447 continue; 2448 memset(&link, 0, sizeof(link)); 2449 rte_eth_link_get_nowait(portid, &link); 2450 /* print link status if flag set */ 2451 if (print_flag == 1) { 2452 if (link.link_status) 2453 printf( 2454 "Port%d Link Up. speed %u Mbps- %s\n", 2455 portid, link.link_speed, 2456 (link.link_duplex == ETH_LINK_FULL_DUPLEX) ? 2457 ("full-duplex") : ("half-duplex\n")); 2458 else 2459 printf("Port %d Link Down\n", portid); 2460 continue; 2461 } 2462 /* clear all_ports_up flag if any link down */ 2463 if (link.link_status == ETH_LINK_DOWN) { 2464 all_ports_up = 0; 2465 break; 2466 } 2467 } 2468 /* after finally printing all link status, get out */ 2469 if (print_flag == 1) 2470 break; 2471 2472 if (all_ports_up == 0) { 2473 fflush(stdout); 2474 rte_delay_ms(CHECK_INTERVAL); 2475 } 2476 2477 /* set the print_flag if all ports up or timeout */ 2478 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) { 2479 print_flag = 1; 2480 } 2481 2482 if (lsc_interrupt) 2483 break; 2484 } 2485 } 2486 2487 static void 2488 rmv_event_callback(void *arg) 2489 { 2490 int need_to_start = 0; 2491 int org_no_link_check = no_link_check; 2492 portid_t port_id = (intptr_t)arg; 2493 2494 RTE_ETH_VALID_PORTID_OR_RET(port_id); 2495 2496 if (!test_done && port_is_forwarding(port_id)) { 2497 need_to_start = 1; 2498 stop_packet_forwarding(); 2499 } 2500 no_link_check = 1; 2501 stop_port(port_id); 2502 no_link_check = org_no_link_check; 2503 close_port(port_id); 2504 detach_port(port_id); 2505 if (need_to_start) 2506 start_packet_forwarding(0); 2507 } 2508 2509 /* This function is used by the interrupt thread */ 2510 static int 2511 eth_event_callback(portid_t port_id, enum rte_eth_event_type type, void *param, 2512 void *ret_param) 2513 { 2514 static const char * const event_desc[] = { 2515 [RTE_ETH_EVENT_UNKNOWN] = "Unknown", 2516 [RTE_ETH_EVENT_INTR_LSC] = "LSC", 2517 [RTE_ETH_EVENT_QUEUE_STATE] = "Queue state", 2518 [RTE_ETH_EVENT_INTR_RESET] = "Interrupt reset", 2519 [RTE_ETH_EVENT_VF_MBOX] = "VF Mbox", 2520 [RTE_ETH_EVENT_IPSEC] = "IPsec", 2521 [RTE_ETH_EVENT_MACSEC] = "MACsec", 2522 [RTE_ETH_EVENT_INTR_RMV] = "device removal", 2523 [RTE_ETH_EVENT_NEW] = "device probed", 2524 [RTE_ETH_EVENT_DESTROY] = "device released", 2525 [RTE_ETH_EVENT_MAX] = NULL, 2526 }; 2527 2528 RTE_SET_USED(param); 2529 RTE_SET_USED(ret_param); 2530 2531 if (type >= RTE_ETH_EVENT_MAX) { 2532 fprintf(stderr, "\nPort %" PRIu16 ": %s called upon invalid event %d\n", 2533 port_id, __func__, type); 2534 fflush(stderr); 2535 } else if (event_print_mask & (UINT32_C(1) << type)) { 2536 printf("\nPort %" PRIu16 ": %s event\n", port_id, 2537 event_desc[type]); 2538 fflush(stdout); 2539 } 2540 2541 if (port_id_is_invalid(port_id, DISABLED_WARN)) 2542 return 0; 2543 2544 switch (type) { 2545 case RTE_ETH_EVENT_INTR_RMV: 2546 if (rte_eal_alarm_set(100000, 2547 rmv_event_callback, (void *)(intptr_t)port_id)) 2548 fprintf(stderr, "Could not set up deferred device removal\n"); 2549 break; 2550 default: 2551 break; 2552 } 2553 return 0; 2554 } 2555 2556 /* This function is used by the interrupt thread */ 2557 static void 2558 eth_dev_event_callback(const char *device_name, enum rte_dev_event_type type, 2559 __rte_unused void *arg) 2560 { 2561 uint16_t port_id; 2562 int ret; 2563 2564 if (type >= RTE_DEV_EVENT_MAX) { 2565 fprintf(stderr, "%s called upon invalid event %d\n", 2566 __func__, type); 2567 fflush(stderr); 2568 } 2569 2570 switch (type) { 2571 case RTE_DEV_EVENT_REMOVE: 2572 RTE_LOG(ERR, EAL, "The device: %s has been removed!\n", 2573 device_name); 2574 ret = rte_eth_dev_get_port_by_name(device_name, &port_id); 2575 if (ret) { 2576 RTE_LOG(ERR, EAL, "can not get port by device %s!\n", 2577 device_name); 2578 return; 2579 } 2580 rmv_event_callback((void *)(intptr_t)port_id); 2581 break; 2582 case RTE_DEV_EVENT_ADD: 2583 RTE_LOG(ERR, EAL, "The device: %s has been added!\n", 2584 device_name); 2585 /* TODO: After finish kernel driver binding, 2586 * begin to attach port. 2587 */ 2588 break; 2589 default: 2590 break; 2591 } 2592 } 2593 2594 static int 2595 set_tx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port) 2596 { 2597 uint16_t i; 2598 int diag; 2599 uint8_t mapping_found = 0; 2600 2601 for (i = 0; i < nb_tx_queue_stats_mappings; i++) { 2602 if ((tx_queue_stats_mappings[i].port_id == port_id) && 2603 (tx_queue_stats_mappings[i].queue_id < nb_txq )) { 2604 diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id, 2605 tx_queue_stats_mappings[i].queue_id, 2606 tx_queue_stats_mappings[i].stats_counter_id); 2607 if (diag != 0) 2608 return diag; 2609 mapping_found = 1; 2610 } 2611 } 2612 if (mapping_found) 2613 port->tx_queue_stats_mapping_enabled = 1; 2614 return 0; 2615 } 2616 2617 static int 2618 set_rx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port) 2619 { 2620 uint16_t i; 2621 int diag; 2622 uint8_t mapping_found = 0; 2623 2624 for (i = 0; i < nb_rx_queue_stats_mappings; i++) { 2625 if ((rx_queue_stats_mappings[i].port_id == port_id) && 2626 (rx_queue_stats_mappings[i].queue_id < nb_rxq )) { 2627 diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id, 2628 rx_queue_stats_mappings[i].queue_id, 2629 rx_queue_stats_mappings[i].stats_counter_id); 2630 if (diag != 0) 2631 return diag; 2632 mapping_found = 1; 2633 } 2634 } 2635 if (mapping_found) 2636 port->rx_queue_stats_mapping_enabled = 1; 2637 return 0; 2638 } 2639 2640 static void 2641 map_port_queue_stats_mapping_registers(portid_t pi, struct rte_port *port) 2642 { 2643 int diag = 0; 2644 2645 diag = set_tx_queue_stats_mapping_registers(pi, port); 2646 if (diag != 0) { 2647 if (diag == -ENOTSUP) { 2648 port->tx_queue_stats_mapping_enabled = 0; 2649 printf("TX queue stats mapping not supported port id=%d\n", pi); 2650 } 2651 else 2652 rte_exit(EXIT_FAILURE, 2653 "set_tx_queue_stats_mapping_registers " 2654 "failed for port id=%d diag=%d\n", 2655 pi, diag); 2656 } 2657 2658 diag = set_rx_queue_stats_mapping_registers(pi, port); 2659 if (diag != 0) { 2660 if (diag == -ENOTSUP) { 2661 port->rx_queue_stats_mapping_enabled = 0; 2662 printf("RX queue stats mapping not supported port id=%d\n", pi); 2663 } 2664 else 2665 rte_exit(EXIT_FAILURE, 2666 "set_rx_queue_stats_mapping_registers " 2667 "failed for port id=%d diag=%d\n", 2668 pi, diag); 2669 } 2670 } 2671 2672 static void 2673 rxtx_port_config(struct rte_port *port) 2674 { 2675 uint16_t qid; 2676 2677 for (qid = 0; qid < nb_rxq; qid++) { 2678 port->rx_conf[qid] = port->dev_info.default_rxconf; 2679 2680 /* Check if any Rx parameters have been passed */ 2681 if (rx_pthresh != RTE_PMD_PARAM_UNSET) 2682 port->rx_conf[qid].rx_thresh.pthresh = rx_pthresh; 2683 2684 if (rx_hthresh != RTE_PMD_PARAM_UNSET) 2685 port->rx_conf[qid].rx_thresh.hthresh = rx_hthresh; 2686 2687 if (rx_wthresh != RTE_PMD_PARAM_UNSET) 2688 port->rx_conf[qid].rx_thresh.wthresh = rx_wthresh; 2689 2690 if (rx_free_thresh != RTE_PMD_PARAM_UNSET) 2691 port->rx_conf[qid].rx_free_thresh = rx_free_thresh; 2692 2693 if (rx_drop_en != RTE_PMD_PARAM_UNSET) 2694 port->rx_conf[qid].rx_drop_en = rx_drop_en; 2695 2696 port->nb_rx_desc[qid] = nb_rxd; 2697 } 2698 2699 for (qid = 0; qid < nb_txq; qid++) { 2700 port->tx_conf[qid] = port->dev_info.default_txconf; 2701 2702 /* Check if any Tx parameters have been passed */ 2703 if (tx_pthresh != RTE_PMD_PARAM_UNSET) 2704 port->tx_conf[qid].tx_thresh.pthresh = tx_pthresh; 2705 2706 if (tx_hthresh != RTE_PMD_PARAM_UNSET) 2707 port->tx_conf[qid].tx_thresh.hthresh = tx_hthresh; 2708 2709 if (tx_wthresh != RTE_PMD_PARAM_UNSET) 2710 port->tx_conf[qid].tx_thresh.wthresh = tx_wthresh; 2711 2712 if (tx_rs_thresh != RTE_PMD_PARAM_UNSET) 2713 port->tx_conf[qid].tx_rs_thresh = tx_rs_thresh; 2714 2715 if (tx_free_thresh != RTE_PMD_PARAM_UNSET) 2716 port->tx_conf[qid].tx_free_thresh = tx_free_thresh; 2717 2718 port->nb_tx_desc[qid] = nb_txd; 2719 } 2720 } 2721 2722 void 2723 init_port_config(void) 2724 { 2725 portid_t pid; 2726 struct rte_port *port; 2727 2728 RTE_ETH_FOREACH_DEV(pid) { 2729 port = &ports[pid]; 2730 port->dev_conf.fdir_conf = fdir_conf; 2731 rte_eth_dev_info_get(pid, &port->dev_info); 2732 if (nb_rxq > 1) { 2733 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL; 2734 port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 2735 rss_hf & port->dev_info.flow_type_rss_offloads; 2736 } else { 2737 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL; 2738 port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0; 2739 } 2740 2741 if (port->dcb_flag == 0) { 2742 if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0) 2743 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_RSS; 2744 else 2745 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE; 2746 } 2747 2748 rxtx_port_config(port); 2749 2750 rte_eth_macaddr_get(pid, &port->eth_addr); 2751 2752 map_port_queue_stats_mapping_registers(pid, port); 2753 #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS 2754 rte_pmd_ixgbe_bypass_init(pid); 2755 #endif 2756 2757 if (lsc_interrupt && 2758 (rte_eth_devices[pid].data->dev_flags & 2759 RTE_ETH_DEV_INTR_LSC)) 2760 port->dev_conf.intr_conf.lsc = 1; 2761 if (rmv_interrupt && 2762 (rte_eth_devices[pid].data->dev_flags & 2763 RTE_ETH_DEV_INTR_RMV)) 2764 port->dev_conf.intr_conf.rmv = 1; 2765 } 2766 } 2767 2768 void set_port_slave_flag(portid_t slave_pid) 2769 { 2770 struct rte_port *port; 2771 2772 port = &ports[slave_pid]; 2773 port->slave_flag = 1; 2774 } 2775 2776 void clear_port_slave_flag(portid_t slave_pid) 2777 { 2778 struct rte_port *port; 2779 2780 port = &ports[slave_pid]; 2781 port->slave_flag = 0; 2782 } 2783 2784 uint8_t port_is_bonding_slave(portid_t slave_pid) 2785 { 2786 struct rte_port *port; 2787 2788 port = &ports[slave_pid]; 2789 if ((rte_eth_devices[slave_pid].data->dev_flags & 2790 RTE_ETH_DEV_BONDED_SLAVE) || (port->slave_flag == 1)) 2791 return 1; 2792 return 0; 2793 } 2794 2795 const uint16_t vlan_tags[] = { 2796 0, 1, 2, 3, 4, 5, 6, 7, 2797 8, 9, 10, 11, 12, 13, 14, 15, 2798 16, 17, 18, 19, 20, 21, 22, 23, 2799 24, 25, 26, 27, 28, 29, 30, 31 2800 }; 2801 2802 static int 2803 get_eth_dcb_conf(portid_t pid, struct rte_eth_conf *eth_conf, 2804 enum dcb_mode_enable dcb_mode, 2805 enum rte_eth_nb_tcs num_tcs, 2806 uint8_t pfc_en) 2807 { 2808 uint8_t i; 2809 int32_t rc; 2810 struct rte_eth_rss_conf rss_conf; 2811 2812 /* 2813 * Builds up the correct configuration for dcb+vt based on the vlan tags array 2814 * given above, and the number of traffic classes available for use. 2815 */ 2816 if (dcb_mode == DCB_VT_ENABLED) { 2817 struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf = 2818 ð_conf->rx_adv_conf.vmdq_dcb_conf; 2819 struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf = 2820 ð_conf->tx_adv_conf.vmdq_dcb_tx_conf; 2821 2822 /* VMDQ+DCB RX and TX configurations */ 2823 vmdq_rx_conf->enable_default_pool = 0; 2824 vmdq_rx_conf->default_pool = 0; 2825 vmdq_rx_conf->nb_queue_pools = 2826 (num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS); 2827 vmdq_tx_conf->nb_queue_pools = 2828 (num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS); 2829 2830 vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools; 2831 for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) { 2832 vmdq_rx_conf->pool_map[i].vlan_id = vlan_tags[i]; 2833 vmdq_rx_conf->pool_map[i].pools = 2834 1 << (i % vmdq_rx_conf->nb_queue_pools); 2835 } 2836 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) { 2837 vmdq_rx_conf->dcb_tc[i] = i % num_tcs; 2838 vmdq_tx_conf->dcb_tc[i] = i % num_tcs; 2839 } 2840 2841 /* set DCB mode of RX and TX of multiple queues */ 2842 eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB; 2843 eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB; 2844 } else { 2845 struct rte_eth_dcb_rx_conf *rx_conf = 2846 ð_conf->rx_adv_conf.dcb_rx_conf; 2847 struct rte_eth_dcb_tx_conf *tx_conf = 2848 ð_conf->tx_adv_conf.dcb_tx_conf; 2849 2850 rc = rte_eth_dev_rss_hash_conf_get(pid, &rss_conf); 2851 if (rc != 0) 2852 return rc; 2853 2854 rx_conf->nb_tcs = num_tcs; 2855 tx_conf->nb_tcs = num_tcs; 2856 2857 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) { 2858 rx_conf->dcb_tc[i] = i % num_tcs; 2859 tx_conf->dcb_tc[i] = i % num_tcs; 2860 } 2861 2862 eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB_RSS; 2863 eth_conf->rx_adv_conf.rss_conf = rss_conf; 2864 eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB; 2865 } 2866 2867 if (pfc_en) 2868 eth_conf->dcb_capability_en = 2869 ETH_DCB_PG_SUPPORT | ETH_DCB_PFC_SUPPORT; 2870 else 2871 eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT; 2872 2873 return 0; 2874 } 2875 2876 int 2877 init_port_dcb_config(portid_t pid, 2878 enum dcb_mode_enable dcb_mode, 2879 enum rte_eth_nb_tcs num_tcs, 2880 uint8_t pfc_en) 2881 { 2882 struct rte_eth_conf port_conf; 2883 struct rte_port *rte_port; 2884 int retval; 2885 uint16_t i; 2886 2887 rte_port = &ports[pid]; 2888 2889 memset(&port_conf, 0, sizeof(struct rte_eth_conf)); 2890 /* Enter DCB configuration status */ 2891 dcb_config = 1; 2892 2893 port_conf.rxmode = rte_port->dev_conf.rxmode; 2894 port_conf.txmode = rte_port->dev_conf.txmode; 2895 2896 /*set configuration of DCB in vt mode and DCB in non-vt mode*/ 2897 retval = get_eth_dcb_conf(pid, &port_conf, dcb_mode, num_tcs, pfc_en); 2898 if (retval < 0) 2899 return retval; 2900 port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER; 2901 2902 /* re-configure the device . */ 2903 rte_eth_dev_configure(pid, nb_rxq, nb_rxq, &port_conf); 2904 2905 rte_eth_dev_info_get(pid, &rte_port->dev_info); 2906 2907 /* If dev_info.vmdq_pool_base is greater than 0, 2908 * the queue id of vmdq pools is started after pf queues. 2909 */ 2910 if (dcb_mode == DCB_VT_ENABLED && 2911 rte_port->dev_info.vmdq_pool_base > 0) { 2912 printf("VMDQ_DCB multi-queue mode is nonsensical" 2913 " for port %d.", pid); 2914 return -1; 2915 } 2916 2917 /* Assume the ports in testpmd have the same dcb capability 2918 * and has the same number of rxq and txq in dcb mode 2919 */ 2920 if (dcb_mode == DCB_VT_ENABLED) { 2921 if (rte_port->dev_info.max_vfs > 0) { 2922 nb_rxq = rte_port->dev_info.nb_rx_queues; 2923 nb_txq = rte_port->dev_info.nb_tx_queues; 2924 } else { 2925 nb_rxq = rte_port->dev_info.max_rx_queues; 2926 nb_txq = rte_port->dev_info.max_tx_queues; 2927 } 2928 } else { 2929 /*if vt is disabled, use all pf queues */ 2930 if (rte_port->dev_info.vmdq_pool_base == 0) { 2931 nb_rxq = rte_port->dev_info.max_rx_queues; 2932 nb_txq = rte_port->dev_info.max_tx_queues; 2933 } else { 2934 nb_rxq = (queueid_t)num_tcs; 2935 nb_txq = (queueid_t)num_tcs; 2936 2937 } 2938 } 2939 rx_free_thresh = 64; 2940 2941 memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf)); 2942 2943 rxtx_port_config(rte_port); 2944 /* VLAN filter */ 2945 rte_port->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER; 2946 for (i = 0; i < RTE_DIM(vlan_tags); i++) 2947 rx_vft_set(pid, vlan_tags[i], 1); 2948 2949 rte_eth_macaddr_get(pid, &rte_port->eth_addr); 2950 map_port_queue_stats_mapping_registers(pid, rte_port); 2951 2952 rte_port->dcb_flag = 1; 2953 2954 return 0; 2955 } 2956 2957 static void 2958 init_port(void) 2959 { 2960 /* Configuration of Ethernet ports. */ 2961 ports = rte_zmalloc("testpmd: ports", 2962 sizeof(struct rte_port) * RTE_MAX_ETHPORTS, 2963 RTE_CACHE_LINE_SIZE); 2964 if (ports == NULL) { 2965 rte_exit(EXIT_FAILURE, 2966 "rte_zmalloc(%d struct rte_port) failed\n", 2967 RTE_MAX_ETHPORTS); 2968 } 2969 2970 /* Initialize ports NUMA structures */ 2971 memset(port_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS); 2972 memset(rxring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS); 2973 memset(txring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS); 2974 } 2975 2976 static void 2977 force_quit(void) 2978 { 2979 pmd_test_exit(); 2980 prompt_exit(); 2981 } 2982 2983 static void 2984 print_stats(void) 2985 { 2986 uint8_t i; 2987 const char clr[] = { 27, '[', '2', 'J', '\0' }; 2988 const char top_left[] = { 27, '[', '1', ';', '1', 'H', '\0' }; 2989 2990 /* Clear screen and move to top left */ 2991 printf("%s%s", clr, top_left); 2992 2993 printf("\nPort statistics ===================================="); 2994 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) 2995 nic_stats_display(fwd_ports_ids[i]); 2996 } 2997 2998 static void 2999 signal_handler(int signum) 3000 { 3001 if (signum == SIGINT || signum == SIGTERM) { 3002 printf("\nSignal %d received, preparing to exit...\n", 3003 signum); 3004 #ifdef RTE_LIBRTE_PDUMP 3005 /* uninitialize packet capture framework */ 3006 rte_pdump_uninit(); 3007 #endif 3008 #ifdef RTE_LIBRTE_LATENCY_STATS 3009 rte_latencystats_uninit(); 3010 #endif 3011 force_quit(); 3012 /* Set flag to indicate the force termination. */ 3013 f_quit = 1; 3014 /* exit with the expected status */ 3015 signal(signum, SIG_DFL); 3016 kill(getpid(), signum); 3017 } 3018 } 3019 3020 int 3021 main(int argc, char** argv) 3022 { 3023 int diag; 3024 portid_t port_id; 3025 uint16_t count; 3026 int ret; 3027 3028 signal(SIGINT, signal_handler); 3029 signal(SIGTERM, signal_handler); 3030 3031 diag = rte_eal_init(argc, argv); 3032 if (diag < 0) 3033 rte_panic("Cannot init EAL\n"); 3034 3035 testpmd_logtype = rte_log_register("testpmd"); 3036 if (testpmd_logtype < 0) 3037 rte_panic("Cannot register log type"); 3038 rte_log_set_level(testpmd_logtype, RTE_LOG_DEBUG); 3039 3040 #ifdef RTE_LIBRTE_PDUMP 3041 /* initialize packet capture framework */ 3042 rte_pdump_init(NULL); 3043 #endif 3044 3045 count = 0; 3046 RTE_ETH_FOREACH_DEV(port_id) { 3047 ports_ids[count] = port_id; 3048 count++; 3049 } 3050 nb_ports = (portid_t) count; 3051 if (nb_ports == 0) 3052 TESTPMD_LOG(WARNING, "No probed ethernet devices\n"); 3053 3054 /* allocate port structures, and init them */ 3055 init_port(); 3056 3057 set_def_fwd_config(); 3058 if (nb_lcores == 0) 3059 rte_panic("Empty set of forwarding logical cores - check the " 3060 "core mask supplied in the command parameters\n"); 3061 3062 /* Bitrate/latency stats disabled by default */ 3063 #ifdef RTE_LIBRTE_BITRATE 3064 bitrate_enabled = 0; 3065 #endif 3066 #ifdef RTE_LIBRTE_LATENCY_STATS 3067 latencystats_enabled = 0; 3068 #endif 3069 3070 /* on FreeBSD, mlockall() is disabled by default */ 3071 #ifdef RTE_EXEC_ENV_BSDAPP 3072 do_mlockall = 0; 3073 #else 3074 do_mlockall = 1; 3075 #endif 3076 3077 argc -= diag; 3078 argv += diag; 3079 if (argc > 1) 3080 launch_args_parse(argc, argv); 3081 3082 if (do_mlockall && mlockall(MCL_CURRENT | MCL_FUTURE)) { 3083 TESTPMD_LOG(NOTICE, "mlockall() failed with error \"%s\"\n", 3084 strerror(errno)); 3085 } 3086 3087 if (tx_first && interactive) 3088 rte_exit(EXIT_FAILURE, "--tx-first cannot be used on " 3089 "interactive mode.\n"); 3090 3091 if (tx_first && lsc_interrupt) { 3092 printf("Warning: lsc_interrupt needs to be off when " 3093 " using tx_first. Disabling.\n"); 3094 lsc_interrupt = 0; 3095 } 3096 3097 if (!nb_rxq && !nb_txq) 3098 printf("Warning: Either rx or tx queues should be non-zero\n"); 3099 3100 if (nb_rxq > 1 && nb_rxq > nb_txq) 3101 printf("Warning: nb_rxq=%d enables RSS configuration, " 3102 "but nb_txq=%d will prevent to fully test it.\n", 3103 nb_rxq, nb_txq); 3104 3105 init_config(); 3106 3107 if (hot_plug) { 3108 ret = rte_dev_hotplug_handle_enable(); 3109 if (ret) { 3110 RTE_LOG(ERR, EAL, 3111 "fail to enable hotplug handling."); 3112 return -1; 3113 } 3114 3115 ret = rte_dev_event_monitor_start(); 3116 if (ret) { 3117 RTE_LOG(ERR, EAL, 3118 "fail to start device event monitoring."); 3119 return -1; 3120 } 3121 3122 ret = rte_dev_event_callback_register(NULL, 3123 eth_dev_event_callback, NULL); 3124 if (ret) { 3125 RTE_LOG(ERR, EAL, 3126 "fail to register device event callback\n"); 3127 return -1; 3128 } 3129 } 3130 3131 if (start_port(RTE_PORT_ALL) != 0) 3132 rte_exit(EXIT_FAILURE, "Start ports failed\n"); 3133 3134 /* set all ports to promiscuous mode by default */ 3135 RTE_ETH_FOREACH_DEV(port_id) 3136 rte_eth_promiscuous_enable(port_id); 3137 3138 /* Init metrics library */ 3139 rte_metrics_init(rte_socket_id()); 3140 3141 #ifdef RTE_LIBRTE_LATENCY_STATS 3142 if (latencystats_enabled != 0) { 3143 int ret = rte_latencystats_init(1, NULL); 3144 if (ret) 3145 printf("Warning: latencystats init()" 3146 " returned error %d\n", ret); 3147 printf("Latencystats running on lcore %d\n", 3148 latencystats_lcore_id); 3149 } 3150 #endif 3151 3152 /* Setup bitrate stats */ 3153 #ifdef RTE_LIBRTE_BITRATE 3154 if (bitrate_enabled != 0) { 3155 bitrate_data = rte_stats_bitrate_create(); 3156 if (bitrate_data == NULL) 3157 rte_exit(EXIT_FAILURE, 3158 "Could not allocate bitrate data.\n"); 3159 rte_stats_bitrate_reg(bitrate_data); 3160 } 3161 #endif 3162 3163 #ifdef RTE_LIBRTE_CMDLINE 3164 if (strlen(cmdline_filename) != 0) 3165 cmdline_read_from_file(cmdline_filename); 3166 3167 if (interactive == 1) { 3168 if (auto_start) { 3169 printf("Start automatic packet forwarding\n"); 3170 start_packet_forwarding(0); 3171 } 3172 prompt(); 3173 pmd_test_exit(); 3174 } else 3175 #endif 3176 { 3177 char c; 3178 int rc; 3179 3180 f_quit = 0; 3181 3182 printf("No commandline core given, start packet forwarding\n"); 3183 start_packet_forwarding(tx_first); 3184 if (stats_period != 0) { 3185 uint64_t prev_time = 0, cur_time, diff_time = 0; 3186 uint64_t timer_period; 3187 3188 /* Convert to number of cycles */ 3189 timer_period = stats_period * rte_get_timer_hz(); 3190 3191 while (f_quit == 0) { 3192 cur_time = rte_get_timer_cycles(); 3193 diff_time += cur_time - prev_time; 3194 3195 if (diff_time >= timer_period) { 3196 print_stats(); 3197 /* Reset the timer */ 3198 diff_time = 0; 3199 } 3200 /* Sleep to avoid unnecessary checks */ 3201 prev_time = cur_time; 3202 sleep(1); 3203 } 3204 } 3205 3206 printf("Press enter to exit\n"); 3207 rc = read(0, &c, 1); 3208 pmd_test_exit(); 3209 if (rc < 0) 3210 return 1; 3211 } 3212 3213 return 0; 3214 } 3215