1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2010-2017 Intel Corporation 3 */ 4 5 #include <stdarg.h> 6 #include <stdio.h> 7 #include <stdlib.h> 8 #include <signal.h> 9 #include <string.h> 10 #include <time.h> 11 #include <fcntl.h> 12 #include <sys/mman.h> 13 #include <sys/types.h> 14 #include <errno.h> 15 #include <stdbool.h> 16 17 #include <sys/queue.h> 18 #include <sys/stat.h> 19 20 #include <stdint.h> 21 #include <unistd.h> 22 #include <inttypes.h> 23 24 #include <rte_common.h> 25 #include <rte_errno.h> 26 #include <rte_byteorder.h> 27 #include <rte_log.h> 28 #include <rte_debug.h> 29 #include <rte_cycles.h> 30 #include <rte_malloc_heap.h> 31 #include <rte_memory.h> 32 #include <rte_memcpy.h> 33 #include <rte_launch.h> 34 #include <rte_eal.h> 35 #include <rte_alarm.h> 36 #include <rte_per_lcore.h> 37 #include <rte_lcore.h> 38 #include <rte_atomic.h> 39 #include <rte_branch_prediction.h> 40 #include <rte_mempool.h> 41 #include <rte_malloc.h> 42 #include <rte_mbuf.h> 43 #include <rte_mbuf_pool_ops.h> 44 #include <rte_interrupts.h> 45 #include <rte_pci.h> 46 #include <rte_ether.h> 47 #include <rte_ethdev.h> 48 #include <rte_dev.h> 49 #include <rte_string_fns.h> 50 #ifdef RTE_LIBRTE_IXGBE_PMD 51 #include <rte_pmd_ixgbe.h> 52 #endif 53 #ifdef RTE_LIBRTE_PDUMP 54 #include <rte_pdump.h> 55 #endif 56 #include <rte_flow.h> 57 #include <rte_metrics.h> 58 #ifdef RTE_LIBRTE_BITRATE 59 #include <rte_bitrate.h> 60 #endif 61 #ifdef RTE_LIBRTE_LATENCY_STATS 62 #include <rte_latencystats.h> 63 #endif 64 65 #include "testpmd.h" 66 67 #ifndef MAP_HUGETLB 68 /* FreeBSD may not have MAP_HUGETLB (in fact, it probably doesn't) */ 69 #define HUGE_FLAG (0x40000) 70 #else 71 #define HUGE_FLAG MAP_HUGETLB 72 #endif 73 74 #ifndef MAP_HUGE_SHIFT 75 /* older kernels (or FreeBSD) will not have this define */ 76 #define HUGE_SHIFT (26) 77 #else 78 #define HUGE_SHIFT MAP_HUGE_SHIFT 79 #endif 80 81 #define EXTMEM_HEAP_NAME "extmem" 82 83 uint16_t verbose_level = 0; /**< Silent by default. */ 84 int testpmd_logtype; /**< Log type for testpmd logs */ 85 86 /* use master core for command line ? */ 87 uint8_t interactive = 0; 88 uint8_t auto_start = 0; 89 uint8_t tx_first; 90 char cmdline_filename[PATH_MAX] = {0}; 91 92 /* 93 * NUMA support configuration. 94 * When set, the NUMA support attempts to dispatch the allocation of the 95 * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the 96 * probed ports among the CPU sockets 0 and 1. 97 * Otherwise, all memory is allocated from CPU socket 0. 98 */ 99 uint8_t numa_support = 1; /**< numa enabled by default */ 100 101 /* 102 * In UMA mode,all memory is allocated from socket 0 if --socket-num is 103 * not configured. 104 */ 105 uint8_t socket_num = UMA_NO_CONFIG; 106 107 /* 108 * Select mempool allocation type: 109 * - native: use regular DPDK memory 110 * - anon: use regular DPDK memory to create mempool, but populate using 111 * anonymous memory (may not be IOVA-contiguous) 112 * - xmem: use externally allocated hugepage memory 113 */ 114 uint8_t mp_alloc_type = MP_ALLOC_NATIVE; 115 116 /* 117 * Store specified sockets on which memory pool to be used by ports 118 * is allocated. 119 */ 120 uint8_t port_numa[RTE_MAX_ETHPORTS]; 121 122 /* 123 * Store specified sockets on which RX ring to be used by ports 124 * is allocated. 125 */ 126 uint8_t rxring_numa[RTE_MAX_ETHPORTS]; 127 128 /* 129 * Store specified sockets on which TX ring to be used by ports 130 * is allocated. 131 */ 132 uint8_t txring_numa[RTE_MAX_ETHPORTS]; 133 134 /* 135 * Record the Ethernet address of peer target ports to which packets are 136 * forwarded. 137 * Must be instantiated with the ethernet addresses of peer traffic generator 138 * ports. 139 */ 140 struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS]; 141 portid_t nb_peer_eth_addrs = 0; 142 143 /* 144 * Probed Target Environment. 145 */ 146 struct rte_port *ports; /**< For all probed ethernet ports. */ 147 portid_t nb_ports; /**< Number of probed ethernet ports. */ 148 struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */ 149 lcoreid_t nb_lcores; /**< Number of probed logical cores. */ 150 151 portid_t ports_ids[RTE_MAX_ETHPORTS]; /**< Store all port ids. */ 152 153 /* 154 * Test Forwarding Configuration. 155 * nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores 156 * nb_fwd_ports <= nb_cfg_ports <= nb_ports 157 */ 158 lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */ 159 lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */ 160 portid_t nb_cfg_ports; /**< Number of configured ports. */ 161 portid_t nb_fwd_ports; /**< Number of forwarding ports. */ 162 163 unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */ 164 portid_t fwd_ports_ids[RTE_MAX_ETHPORTS]; /**< Port ids configuration. */ 165 166 struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */ 167 streamid_t nb_fwd_streams; /**< Is equal to (nb_ports * nb_rxq). */ 168 169 /* 170 * Forwarding engines. 171 */ 172 struct fwd_engine * fwd_engines[] = { 173 &io_fwd_engine, 174 &mac_fwd_engine, 175 &mac_swap_engine, 176 &flow_gen_engine, 177 &rx_only_engine, 178 &tx_only_engine, 179 &csum_fwd_engine, 180 &icmp_echo_engine, 181 &noisy_vnf_engine, 182 #if defined RTE_LIBRTE_PMD_SOFTNIC 183 &softnic_fwd_engine, 184 #endif 185 #ifdef RTE_LIBRTE_IEEE1588 186 &ieee1588_fwd_engine, 187 #endif 188 NULL, 189 }; 190 191 struct fwd_config cur_fwd_config; 192 struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */ 193 uint32_t retry_enabled; 194 uint32_t burst_tx_delay_time = BURST_TX_WAIT_US; 195 uint32_t burst_tx_retry_num = BURST_TX_RETRIES; 196 197 uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */ 198 uint32_t param_total_num_mbufs = 0; /**< number of mbufs in all pools - if 199 * specified on command-line. */ 200 uint16_t stats_period; /**< Period to show statistics (disabled by default) */ 201 202 /* 203 * In container, it cannot terminate the process which running with 'stats-period' 204 * option. Set flag to exit stats period loop after received SIGINT/SIGTERM. 205 */ 206 uint8_t f_quit; 207 208 /* 209 * Configuration of packet segments used by the "txonly" processing engine. 210 */ 211 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */ 212 uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = { 213 TXONLY_DEF_PACKET_LEN, 214 }; 215 uint8_t tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */ 216 217 enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF; 218 /**< Split policy for packets to TX. */ 219 220 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */ 221 uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */ 222 223 /* current configuration is in DCB or not,0 means it is not in DCB mode */ 224 uint8_t dcb_config = 0; 225 226 /* Whether the dcb is in testing status */ 227 uint8_t dcb_test = 0; 228 229 /* 230 * Configurable number of RX/TX queues. 231 */ 232 queueid_t nb_rxq = 1; /**< Number of RX queues per port. */ 233 queueid_t nb_txq = 1; /**< Number of TX queues per port. */ 234 235 /* 236 * Configurable number of RX/TX ring descriptors. 237 * Defaults are supplied by drivers via ethdev. 238 */ 239 #define RTE_TEST_RX_DESC_DEFAULT 0 240 #define RTE_TEST_TX_DESC_DEFAULT 0 241 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */ 242 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */ 243 244 #define RTE_PMD_PARAM_UNSET -1 245 /* 246 * Configurable values of RX and TX ring threshold registers. 247 */ 248 249 int8_t rx_pthresh = RTE_PMD_PARAM_UNSET; 250 int8_t rx_hthresh = RTE_PMD_PARAM_UNSET; 251 int8_t rx_wthresh = RTE_PMD_PARAM_UNSET; 252 253 int8_t tx_pthresh = RTE_PMD_PARAM_UNSET; 254 int8_t tx_hthresh = RTE_PMD_PARAM_UNSET; 255 int8_t tx_wthresh = RTE_PMD_PARAM_UNSET; 256 257 /* 258 * Configurable value of RX free threshold. 259 */ 260 int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET; 261 262 /* 263 * Configurable value of RX drop enable. 264 */ 265 int8_t rx_drop_en = RTE_PMD_PARAM_UNSET; 266 267 /* 268 * Configurable value of TX free threshold. 269 */ 270 int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET; 271 272 /* 273 * Configurable value of TX RS bit threshold. 274 */ 275 int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET; 276 277 /* 278 * Configurable value of buffered packets before sending. 279 */ 280 uint16_t noisy_tx_sw_bufsz; 281 282 /* 283 * Configurable value of packet buffer timeout. 284 */ 285 uint16_t noisy_tx_sw_buf_flush_time; 286 287 /* 288 * Configurable value for size of VNF internal memory area 289 * used for simulating noisy neighbour behaviour 290 */ 291 uint64_t noisy_lkup_mem_sz; 292 293 /* 294 * Configurable value of number of random writes done in 295 * VNF simulation memory area. 296 */ 297 uint64_t noisy_lkup_num_writes; 298 299 /* 300 * Configurable value of number of random reads done in 301 * VNF simulation memory area. 302 */ 303 uint64_t noisy_lkup_num_reads; 304 305 /* 306 * Configurable value of number of random reads/writes done in 307 * VNF simulation memory area. 308 */ 309 uint64_t noisy_lkup_num_reads_writes; 310 311 /* 312 * Receive Side Scaling (RSS) configuration. 313 */ 314 uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */ 315 316 /* 317 * Port topology configuration 318 */ 319 uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */ 320 321 /* 322 * Avoids to flush all the RX streams before starts forwarding. 323 */ 324 uint8_t no_flush_rx = 0; /* flush by default */ 325 326 /* 327 * Flow API isolated mode. 328 */ 329 uint8_t flow_isolate_all; 330 331 /* 332 * Avoids to check link status when starting/stopping a port. 333 */ 334 uint8_t no_link_check = 0; /* check by default */ 335 336 /* 337 * Enable link status change notification 338 */ 339 uint8_t lsc_interrupt = 1; /* enabled by default */ 340 341 /* 342 * Enable device removal notification. 343 */ 344 uint8_t rmv_interrupt = 1; /* enabled by default */ 345 346 uint8_t hot_plug = 0; /**< hotplug disabled by default. */ 347 348 /* 349 * Display or mask ether events 350 * Default to all events except VF_MBOX 351 */ 352 uint32_t event_print_mask = (UINT32_C(1) << RTE_ETH_EVENT_UNKNOWN) | 353 (UINT32_C(1) << RTE_ETH_EVENT_INTR_LSC) | 354 (UINT32_C(1) << RTE_ETH_EVENT_QUEUE_STATE) | 355 (UINT32_C(1) << RTE_ETH_EVENT_INTR_RESET) | 356 (UINT32_C(1) << RTE_ETH_EVENT_IPSEC) | 357 (UINT32_C(1) << RTE_ETH_EVENT_MACSEC) | 358 (UINT32_C(1) << RTE_ETH_EVENT_INTR_RMV); 359 /* 360 * Decide if all memory are locked for performance. 361 */ 362 int do_mlockall = 0; 363 364 /* 365 * NIC bypass mode configuration options. 366 */ 367 368 #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS 369 /* The NIC bypass watchdog timeout. */ 370 uint32_t bypass_timeout = RTE_PMD_IXGBE_BYPASS_TMT_OFF; 371 #endif 372 373 374 #ifdef RTE_LIBRTE_LATENCY_STATS 375 376 /* 377 * Set when latency stats is enabled in the commandline 378 */ 379 uint8_t latencystats_enabled; 380 381 /* 382 * Lcore ID to serive latency statistics. 383 */ 384 lcoreid_t latencystats_lcore_id = -1; 385 386 #endif 387 388 /* 389 * Ethernet device configuration. 390 */ 391 struct rte_eth_rxmode rx_mode = { 392 .max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */ 393 }; 394 395 struct rte_eth_txmode tx_mode = { 396 .offloads = DEV_TX_OFFLOAD_MBUF_FAST_FREE, 397 }; 398 399 struct rte_fdir_conf fdir_conf = { 400 .mode = RTE_FDIR_MODE_NONE, 401 .pballoc = RTE_FDIR_PBALLOC_64K, 402 .status = RTE_FDIR_REPORT_STATUS, 403 .mask = { 404 .vlan_tci_mask = 0xFFEF, 405 .ipv4_mask = { 406 .src_ip = 0xFFFFFFFF, 407 .dst_ip = 0xFFFFFFFF, 408 }, 409 .ipv6_mask = { 410 .src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF}, 411 .dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF}, 412 }, 413 .src_port_mask = 0xFFFF, 414 .dst_port_mask = 0xFFFF, 415 .mac_addr_byte_mask = 0xFF, 416 .tunnel_type_mask = 1, 417 .tunnel_id_mask = 0xFFFFFFFF, 418 }, 419 .drop_queue = 127, 420 }; 421 422 volatile int test_done = 1; /* stop packet forwarding when set to 1. */ 423 424 struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS]; 425 struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS]; 426 427 struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array; 428 struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array; 429 430 uint16_t nb_tx_queue_stats_mappings = 0; 431 uint16_t nb_rx_queue_stats_mappings = 0; 432 433 /* 434 * Display zero values by default for xstats 435 */ 436 uint8_t xstats_hide_zero; 437 438 unsigned int num_sockets = 0; 439 unsigned int socket_ids[RTE_MAX_NUMA_NODES]; 440 441 #ifdef RTE_LIBRTE_BITRATE 442 /* Bitrate statistics */ 443 struct rte_stats_bitrates *bitrate_data; 444 lcoreid_t bitrate_lcore_id; 445 uint8_t bitrate_enabled; 446 #endif 447 448 struct gro_status gro_ports[RTE_MAX_ETHPORTS]; 449 uint8_t gro_flush_cycles = GRO_DEFAULT_FLUSH_CYCLES; 450 451 struct vxlan_encap_conf vxlan_encap_conf = { 452 .select_ipv4 = 1, 453 .select_vlan = 0, 454 .vni = "\x00\x00\x00", 455 .udp_src = 0, 456 .udp_dst = RTE_BE16(4789), 457 .ipv4_src = IPv4(127, 0, 0, 1), 458 .ipv4_dst = IPv4(255, 255, 255, 255), 459 .ipv6_src = "\x00\x00\x00\x00\x00\x00\x00\x00" 460 "\x00\x00\x00\x00\x00\x00\x00\x01", 461 .ipv6_dst = "\x00\x00\x00\x00\x00\x00\x00\x00" 462 "\x00\x00\x00\x00\x00\x00\x11\x11", 463 .vlan_tci = 0, 464 .eth_src = "\x00\x00\x00\x00\x00\x00", 465 .eth_dst = "\xff\xff\xff\xff\xff\xff", 466 }; 467 468 struct nvgre_encap_conf nvgre_encap_conf = { 469 .select_ipv4 = 1, 470 .select_vlan = 0, 471 .tni = "\x00\x00\x00", 472 .ipv4_src = IPv4(127, 0, 0, 1), 473 .ipv4_dst = IPv4(255, 255, 255, 255), 474 .ipv6_src = "\x00\x00\x00\x00\x00\x00\x00\x00" 475 "\x00\x00\x00\x00\x00\x00\x00\x01", 476 .ipv6_dst = "\x00\x00\x00\x00\x00\x00\x00\x00" 477 "\x00\x00\x00\x00\x00\x00\x11\x11", 478 .vlan_tci = 0, 479 .eth_src = "\x00\x00\x00\x00\x00\x00", 480 .eth_dst = "\xff\xff\xff\xff\xff\xff", 481 }; 482 483 /* Forward function declarations */ 484 static void map_port_queue_stats_mapping_registers(portid_t pi, 485 struct rte_port *port); 486 static void check_all_ports_link_status(uint32_t port_mask); 487 static int eth_event_callback(portid_t port_id, 488 enum rte_eth_event_type type, 489 void *param, void *ret_param); 490 static void eth_dev_event_callback(const char *device_name, 491 enum rte_dev_event_type type, 492 void *param); 493 494 /* 495 * Check if all the ports are started. 496 * If yes, return positive value. If not, return zero. 497 */ 498 static int all_ports_started(void); 499 500 struct gso_status gso_ports[RTE_MAX_ETHPORTS]; 501 uint16_t gso_max_segment_size = ETHER_MAX_LEN - ETHER_CRC_LEN; 502 503 /* 504 * Helper function to check if socket is already discovered. 505 * If yes, return positive value. If not, return zero. 506 */ 507 int 508 new_socket_id(unsigned int socket_id) 509 { 510 unsigned int i; 511 512 for (i = 0; i < num_sockets; i++) { 513 if (socket_ids[i] == socket_id) 514 return 0; 515 } 516 return 1; 517 } 518 519 /* 520 * Setup default configuration. 521 */ 522 static void 523 set_default_fwd_lcores_config(void) 524 { 525 unsigned int i; 526 unsigned int nb_lc; 527 unsigned int sock_num; 528 529 nb_lc = 0; 530 for (i = 0; i < RTE_MAX_LCORE; i++) { 531 if (!rte_lcore_is_enabled(i)) 532 continue; 533 sock_num = rte_lcore_to_socket_id(i); 534 if (new_socket_id(sock_num)) { 535 if (num_sockets >= RTE_MAX_NUMA_NODES) { 536 rte_exit(EXIT_FAILURE, 537 "Total sockets greater than %u\n", 538 RTE_MAX_NUMA_NODES); 539 } 540 socket_ids[num_sockets++] = sock_num; 541 } 542 if (i == rte_get_master_lcore()) 543 continue; 544 fwd_lcores_cpuids[nb_lc++] = i; 545 } 546 nb_lcores = (lcoreid_t) nb_lc; 547 nb_cfg_lcores = nb_lcores; 548 nb_fwd_lcores = 1; 549 } 550 551 static void 552 set_def_peer_eth_addrs(void) 553 { 554 portid_t i; 555 556 for (i = 0; i < RTE_MAX_ETHPORTS; i++) { 557 peer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR; 558 peer_eth_addrs[i].addr_bytes[5] = i; 559 } 560 } 561 562 static void 563 set_default_fwd_ports_config(void) 564 { 565 portid_t pt_id; 566 int i = 0; 567 568 RTE_ETH_FOREACH_DEV(pt_id) { 569 fwd_ports_ids[i++] = pt_id; 570 571 /* Update sockets info according to the attached device */ 572 int socket_id = rte_eth_dev_socket_id(pt_id); 573 if (socket_id >= 0 && new_socket_id(socket_id)) { 574 if (num_sockets >= RTE_MAX_NUMA_NODES) { 575 rte_exit(EXIT_FAILURE, 576 "Total sockets greater than %u\n", 577 RTE_MAX_NUMA_NODES); 578 } 579 socket_ids[num_sockets++] = socket_id; 580 } 581 } 582 583 nb_cfg_ports = nb_ports; 584 nb_fwd_ports = nb_ports; 585 } 586 587 void 588 set_def_fwd_config(void) 589 { 590 set_default_fwd_lcores_config(); 591 set_def_peer_eth_addrs(); 592 set_default_fwd_ports_config(); 593 } 594 595 /* extremely pessimistic estimation of memory required to create a mempool */ 596 static int 597 calc_mem_size(uint32_t nb_mbufs, uint32_t mbuf_sz, size_t pgsz, size_t *out) 598 { 599 unsigned int n_pages, mbuf_per_pg, leftover; 600 uint64_t total_mem, mbuf_mem, obj_sz; 601 602 /* there is no good way to predict how much space the mempool will 603 * occupy because it will allocate chunks on the fly, and some of those 604 * will come from default DPDK memory while some will come from our 605 * external memory, so just assume 128MB will be enough for everyone. 606 */ 607 uint64_t hdr_mem = 128 << 20; 608 609 /* account for possible non-contiguousness */ 610 obj_sz = rte_mempool_calc_obj_size(mbuf_sz, 0, NULL); 611 if (obj_sz > pgsz) { 612 TESTPMD_LOG(ERR, "Object size is bigger than page size\n"); 613 return -1; 614 } 615 616 mbuf_per_pg = pgsz / obj_sz; 617 leftover = (nb_mbufs % mbuf_per_pg) > 0; 618 n_pages = (nb_mbufs / mbuf_per_pg) + leftover; 619 620 mbuf_mem = n_pages * pgsz; 621 622 total_mem = RTE_ALIGN(hdr_mem + mbuf_mem, pgsz); 623 624 if (total_mem > SIZE_MAX) { 625 TESTPMD_LOG(ERR, "Memory size too big\n"); 626 return -1; 627 } 628 *out = (size_t)total_mem; 629 630 return 0; 631 } 632 633 static inline uint32_t 634 bsf64(uint64_t v) 635 { 636 return (uint32_t)__builtin_ctzll(v); 637 } 638 639 static inline uint32_t 640 log2_u64(uint64_t v) 641 { 642 if (v == 0) 643 return 0; 644 v = rte_align64pow2(v); 645 return bsf64(v); 646 } 647 648 static int 649 pagesz_flags(uint64_t page_sz) 650 { 651 /* as per mmap() manpage, all page sizes are log2 of page size 652 * shifted by MAP_HUGE_SHIFT 653 */ 654 int log2 = log2_u64(page_sz); 655 656 return (log2 << HUGE_SHIFT); 657 } 658 659 static void * 660 alloc_mem(size_t memsz, size_t pgsz, bool huge) 661 { 662 void *addr; 663 int flags; 664 665 /* allocate anonymous hugepages */ 666 flags = MAP_ANONYMOUS | MAP_PRIVATE; 667 if (huge) 668 flags |= HUGE_FLAG | pagesz_flags(pgsz); 669 670 addr = mmap(NULL, memsz, PROT_READ | PROT_WRITE, flags, -1, 0); 671 if (addr == MAP_FAILED) 672 return NULL; 673 674 return addr; 675 } 676 677 struct extmem_param { 678 void *addr; 679 size_t len; 680 size_t pgsz; 681 rte_iova_t *iova_table; 682 unsigned int iova_table_len; 683 }; 684 685 static int 686 create_extmem(uint32_t nb_mbufs, uint32_t mbuf_sz, struct extmem_param *param, 687 bool huge) 688 { 689 uint64_t pgsizes[] = {RTE_PGSIZE_2M, RTE_PGSIZE_1G, /* x86_64, ARM */ 690 RTE_PGSIZE_16M, RTE_PGSIZE_16G}; /* POWER */ 691 unsigned int cur_page, n_pages, pgsz_idx; 692 size_t mem_sz, cur_pgsz; 693 rte_iova_t *iovas = NULL; 694 void *addr; 695 int ret; 696 697 for (pgsz_idx = 0; pgsz_idx < RTE_DIM(pgsizes); pgsz_idx++) { 698 /* skip anything that is too big */ 699 if (pgsizes[pgsz_idx] > SIZE_MAX) 700 continue; 701 702 cur_pgsz = pgsizes[pgsz_idx]; 703 704 /* if we were told not to allocate hugepages, override */ 705 if (!huge) 706 cur_pgsz = sysconf(_SC_PAGESIZE); 707 708 ret = calc_mem_size(nb_mbufs, mbuf_sz, cur_pgsz, &mem_sz); 709 if (ret < 0) { 710 TESTPMD_LOG(ERR, "Cannot calculate memory size\n"); 711 return -1; 712 } 713 714 /* allocate our memory */ 715 addr = alloc_mem(mem_sz, cur_pgsz, huge); 716 717 /* if we couldn't allocate memory with a specified page size, 718 * that doesn't mean we can't do it with other page sizes, so 719 * try another one. 720 */ 721 if (addr == NULL) 722 continue; 723 724 /* store IOVA addresses for every page in this memory area */ 725 n_pages = mem_sz / cur_pgsz; 726 727 iovas = malloc(sizeof(*iovas) * n_pages); 728 729 if (iovas == NULL) { 730 TESTPMD_LOG(ERR, "Cannot allocate memory for iova addresses\n"); 731 goto fail; 732 } 733 /* lock memory if it's not huge pages */ 734 if (!huge) 735 mlock(addr, mem_sz); 736 737 /* populate IOVA addresses */ 738 for (cur_page = 0; cur_page < n_pages; cur_page++) { 739 rte_iova_t iova; 740 size_t offset; 741 void *cur; 742 743 offset = cur_pgsz * cur_page; 744 cur = RTE_PTR_ADD(addr, offset); 745 746 /* touch the page before getting its IOVA */ 747 *(volatile char *)cur = 0; 748 749 iova = rte_mem_virt2iova(cur); 750 751 iovas[cur_page] = iova; 752 } 753 754 break; 755 } 756 /* if we couldn't allocate anything */ 757 if (iovas == NULL) 758 return -1; 759 760 param->addr = addr; 761 param->len = mem_sz; 762 param->pgsz = cur_pgsz; 763 param->iova_table = iovas; 764 param->iova_table_len = n_pages; 765 766 return 0; 767 fail: 768 if (iovas) 769 free(iovas); 770 if (addr) 771 munmap(addr, mem_sz); 772 773 return -1; 774 } 775 776 static int 777 setup_extmem(uint32_t nb_mbufs, uint32_t mbuf_sz, bool huge) 778 { 779 struct extmem_param param; 780 int socket_id, ret; 781 782 memset(¶m, 0, sizeof(param)); 783 784 /* check if our heap exists */ 785 socket_id = rte_malloc_heap_get_socket(EXTMEM_HEAP_NAME); 786 if (socket_id < 0) { 787 /* create our heap */ 788 ret = rte_malloc_heap_create(EXTMEM_HEAP_NAME); 789 if (ret < 0) { 790 TESTPMD_LOG(ERR, "Cannot create heap\n"); 791 return -1; 792 } 793 } 794 795 ret = create_extmem(nb_mbufs, mbuf_sz, ¶m, huge); 796 if (ret < 0) { 797 TESTPMD_LOG(ERR, "Cannot create memory area\n"); 798 return -1; 799 } 800 801 /* we now have a valid memory area, so add it to heap */ 802 ret = rte_malloc_heap_memory_add(EXTMEM_HEAP_NAME, 803 param.addr, param.len, param.iova_table, 804 param.iova_table_len, param.pgsz); 805 806 /* when using VFIO, memory is automatically mapped for DMA by EAL */ 807 808 /* not needed any more */ 809 free(param.iova_table); 810 811 if (ret < 0) { 812 TESTPMD_LOG(ERR, "Cannot add memory to heap\n"); 813 munmap(param.addr, param.len); 814 return -1; 815 } 816 817 /* success */ 818 819 TESTPMD_LOG(DEBUG, "Allocated %zuMB of external memory\n", 820 param.len >> 20); 821 822 return 0; 823 } 824 825 /* 826 * Configuration initialisation done once at init time. 827 */ 828 static void 829 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf, 830 unsigned int socket_id) 831 { 832 char pool_name[RTE_MEMPOOL_NAMESIZE]; 833 struct rte_mempool *rte_mp = NULL; 834 uint32_t mb_size; 835 836 mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size; 837 mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name)); 838 839 TESTPMD_LOG(INFO, 840 "create a new mbuf pool <%s>: n=%u, size=%u, socket=%u\n", 841 pool_name, nb_mbuf, mbuf_seg_size, socket_id); 842 843 switch (mp_alloc_type) { 844 case MP_ALLOC_NATIVE: 845 { 846 /* wrapper to rte_mempool_create() */ 847 TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n", 848 rte_mbuf_best_mempool_ops()); 849 rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf, 850 mb_mempool_cache, 0, mbuf_seg_size, socket_id); 851 break; 852 } 853 case MP_ALLOC_ANON: 854 { 855 rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf, 856 mb_size, (unsigned int) mb_mempool_cache, 857 sizeof(struct rte_pktmbuf_pool_private), 858 socket_id, 0); 859 if (rte_mp == NULL) 860 goto err; 861 862 if (rte_mempool_populate_anon(rte_mp) == 0) { 863 rte_mempool_free(rte_mp); 864 rte_mp = NULL; 865 goto err; 866 } 867 rte_pktmbuf_pool_init(rte_mp, NULL); 868 rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL); 869 break; 870 } 871 case MP_ALLOC_XMEM: 872 case MP_ALLOC_XMEM_HUGE: 873 { 874 int heap_socket; 875 bool huge = mp_alloc_type == MP_ALLOC_XMEM_HUGE; 876 877 if (setup_extmem(nb_mbuf, mbuf_seg_size, huge) < 0) 878 rte_exit(EXIT_FAILURE, "Could not create external memory\n"); 879 880 heap_socket = 881 rte_malloc_heap_get_socket(EXTMEM_HEAP_NAME); 882 if (heap_socket < 0) 883 rte_exit(EXIT_FAILURE, "Could not get external memory socket ID\n"); 884 885 TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n", 886 rte_mbuf_best_mempool_ops()); 887 rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf, 888 mb_mempool_cache, 0, mbuf_seg_size, 889 heap_socket); 890 break; 891 } 892 default: 893 { 894 rte_exit(EXIT_FAILURE, "Invalid mempool creation mode\n"); 895 } 896 } 897 898 err: 899 if (rte_mp == NULL) { 900 rte_exit(EXIT_FAILURE, 901 "Creation of mbuf pool for socket %u failed: %s\n", 902 socket_id, rte_strerror(rte_errno)); 903 } else if (verbose_level > 0) { 904 rte_mempool_dump(stdout, rte_mp); 905 } 906 } 907 908 /* 909 * Check given socket id is valid or not with NUMA mode, 910 * if valid, return 0, else return -1 911 */ 912 static int 913 check_socket_id(const unsigned int socket_id) 914 { 915 static int warning_once = 0; 916 917 if (new_socket_id(socket_id)) { 918 if (!warning_once && numa_support) 919 printf("Warning: NUMA should be configured manually by" 920 " using --port-numa-config and" 921 " --ring-numa-config parameters along with" 922 " --numa.\n"); 923 warning_once = 1; 924 return -1; 925 } 926 return 0; 927 } 928 929 /* 930 * Get the allowed maximum number of RX queues. 931 * *pid return the port id which has minimal value of 932 * max_rx_queues in all ports. 933 */ 934 queueid_t 935 get_allowed_max_nb_rxq(portid_t *pid) 936 { 937 queueid_t allowed_max_rxq = MAX_QUEUE_ID; 938 portid_t pi; 939 struct rte_eth_dev_info dev_info; 940 941 RTE_ETH_FOREACH_DEV(pi) { 942 rte_eth_dev_info_get(pi, &dev_info); 943 if (dev_info.max_rx_queues < allowed_max_rxq) { 944 allowed_max_rxq = dev_info.max_rx_queues; 945 *pid = pi; 946 } 947 } 948 return allowed_max_rxq; 949 } 950 951 /* 952 * Check input rxq is valid or not. 953 * If input rxq is not greater than any of maximum number 954 * of RX queues of all ports, it is valid. 955 * if valid, return 0, else return -1 956 */ 957 int 958 check_nb_rxq(queueid_t rxq) 959 { 960 queueid_t allowed_max_rxq; 961 portid_t pid = 0; 962 963 allowed_max_rxq = get_allowed_max_nb_rxq(&pid); 964 if (rxq > allowed_max_rxq) { 965 printf("Fail: input rxq (%u) can't be greater " 966 "than max_rx_queues (%u) of port %u\n", 967 rxq, 968 allowed_max_rxq, 969 pid); 970 return -1; 971 } 972 return 0; 973 } 974 975 /* 976 * Get the allowed maximum number of TX queues. 977 * *pid return the port id which has minimal value of 978 * max_tx_queues in all ports. 979 */ 980 queueid_t 981 get_allowed_max_nb_txq(portid_t *pid) 982 { 983 queueid_t allowed_max_txq = MAX_QUEUE_ID; 984 portid_t pi; 985 struct rte_eth_dev_info dev_info; 986 987 RTE_ETH_FOREACH_DEV(pi) { 988 rte_eth_dev_info_get(pi, &dev_info); 989 if (dev_info.max_tx_queues < allowed_max_txq) { 990 allowed_max_txq = dev_info.max_tx_queues; 991 *pid = pi; 992 } 993 } 994 return allowed_max_txq; 995 } 996 997 /* 998 * Check input txq is valid or not. 999 * If input txq is not greater than any of maximum number 1000 * of TX queues of all ports, it is valid. 1001 * if valid, return 0, else return -1 1002 */ 1003 int 1004 check_nb_txq(queueid_t txq) 1005 { 1006 queueid_t allowed_max_txq; 1007 portid_t pid = 0; 1008 1009 allowed_max_txq = get_allowed_max_nb_txq(&pid); 1010 if (txq > allowed_max_txq) { 1011 printf("Fail: input txq (%u) can't be greater " 1012 "than max_tx_queues (%u) of port %u\n", 1013 txq, 1014 allowed_max_txq, 1015 pid); 1016 return -1; 1017 } 1018 return 0; 1019 } 1020 1021 static void 1022 init_config(void) 1023 { 1024 portid_t pid; 1025 struct rte_port *port; 1026 struct rte_mempool *mbp; 1027 unsigned int nb_mbuf_per_pool; 1028 lcoreid_t lc_id; 1029 uint8_t port_per_socket[RTE_MAX_NUMA_NODES]; 1030 struct rte_gro_param gro_param; 1031 uint32_t gso_types; 1032 int k; 1033 1034 memset(port_per_socket,0,RTE_MAX_NUMA_NODES); 1035 1036 /* Configuration of logical cores. */ 1037 fwd_lcores = rte_zmalloc("testpmd: fwd_lcores", 1038 sizeof(struct fwd_lcore *) * nb_lcores, 1039 RTE_CACHE_LINE_SIZE); 1040 if (fwd_lcores == NULL) { 1041 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) " 1042 "failed\n", nb_lcores); 1043 } 1044 for (lc_id = 0; lc_id < nb_lcores; lc_id++) { 1045 fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore", 1046 sizeof(struct fwd_lcore), 1047 RTE_CACHE_LINE_SIZE); 1048 if (fwd_lcores[lc_id] == NULL) { 1049 rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) " 1050 "failed\n"); 1051 } 1052 fwd_lcores[lc_id]->cpuid_idx = lc_id; 1053 } 1054 1055 RTE_ETH_FOREACH_DEV(pid) { 1056 port = &ports[pid]; 1057 /* Apply default TxRx configuration for all ports */ 1058 port->dev_conf.txmode = tx_mode; 1059 port->dev_conf.rxmode = rx_mode; 1060 rte_eth_dev_info_get(pid, &port->dev_info); 1061 1062 if (!(port->dev_info.tx_offload_capa & 1063 DEV_TX_OFFLOAD_MBUF_FAST_FREE)) 1064 port->dev_conf.txmode.offloads &= 1065 ~DEV_TX_OFFLOAD_MBUF_FAST_FREE; 1066 if (numa_support) { 1067 if (port_numa[pid] != NUMA_NO_CONFIG) 1068 port_per_socket[port_numa[pid]]++; 1069 else { 1070 uint32_t socket_id = rte_eth_dev_socket_id(pid); 1071 1072 /* 1073 * if socket_id is invalid, 1074 * set to the first available socket. 1075 */ 1076 if (check_socket_id(socket_id) < 0) 1077 socket_id = socket_ids[0]; 1078 port_per_socket[socket_id]++; 1079 } 1080 } 1081 1082 /* Apply Rx offloads configuration */ 1083 for (k = 0; k < port->dev_info.max_rx_queues; k++) 1084 port->rx_conf[k].offloads = 1085 port->dev_conf.rxmode.offloads; 1086 /* Apply Tx offloads configuration */ 1087 for (k = 0; k < port->dev_info.max_tx_queues; k++) 1088 port->tx_conf[k].offloads = 1089 port->dev_conf.txmode.offloads; 1090 1091 /* set flag to initialize port/queue */ 1092 port->need_reconfig = 1; 1093 port->need_reconfig_queues = 1; 1094 } 1095 1096 /* 1097 * Create pools of mbuf. 1098 * If NUMA support is disabled, create a single pool of mbuf in 1099 * socket 0 memory by default. 1100 * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1. 1101 * 1102 * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and 1103 * nb_txd can be configured at run time. 1104 */ 1105 if (param_total_num_mbufs) 1106 nb_mbuf_per_pool = param_total_num_mbufs; 1107 else { 1108 nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX + 1109 (nb_lcores * mb_mempool_cache) + 1110 RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST; 1111 nb_mbuf_per_pool *= RTE_MAX_ETHPORTS; 1112 } 1113 1114 if (numa_support) { 1115 uint8_t i; 1116 1117 for (i = 0; i < num_sockets; i++) 1118 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 1119 socket_ids[i]); 1120 } else { 1121 if (socket_num == UMA_NO_CONFIG) 1122 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0); 1123 else 1124 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 1125 socket_num); 1126 } 1127 1128 init_port_config(); 1129 1130 gso_types = DEV_TX_OFFLOAD_TCP_TSO | DEV_TX_OFFLOAD_VXLAN_TNL_TSO | 1131 DEV_TX_OFFLOAD_GRE_TNL_TSO | DEV_TX_OFFLOAD_UDP_TSO; 1132 /* 1133 * Records which Mbuf pool to use by each logical core, if needed. 1134 */ 1135 for (lc_id = 0; lc_id < nb_lcores; lc_id++) { 1136 mbp = mbuf_pool_find( 1137 rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id])); 1138 1139 if (mbp == NULL) 1140 mbp = mbuf_pool_find(0); 1141 fwd_lcores[lc_id]->mbp = mbp; 1142 /* initialize GSO context */ 1143 fwd_lcores[lc_id]->gso_ctx.direct_pool = mbp; 1144 fwd_lcores[lc_id]->gso_ctx.indirect_pool = mbp; 1145 fwd_lcores[lc_id]->gso_ctx.gso_types = gso_types; 1146 fwd_lcores[lc_id]->gso_ctx.gso_size = ETHER_MAX_LEN - 1147 ETHER_CRC_LEN; 1148 fwd_lcores[lc_id]->gso_ctx.flag = 0; 1149 } 1150 1151 /* Configuration of packet forwarding streams. */ 1152 if (init_fwd_streams() < 0) 1153 rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n"); 1154 1155 fwd_config_setup(); 1156 1157 /* create a gro context for each lcore */ 1158 gro_param.gro_types = RTE_GRO_TCP_IPV4; 1159 gro_param.max_flow_num = GRO_MAX_FLUSH_CYCLES; 1160 gro_param.max_item_per_flow = MAX_PKT_BURST; 1161 for (lc_id = 0; lc_id < nb_lcores; lc_id++) { 1162 gro_param.socket_id = rte_lcore_to_socket_id( 1163 fwd_lcores_cpuids[lc_id]); 1164 fwd_lcores[lc_id]->gro_ctx = rte_gro_ctx_create(&gro_param); 1165 if (fwd_lcores[lc_id]->gro_ctx == NULL) { 1166 rte_exit(EXIT_FAILURE, 1167 "rte_gro_ctx_create() failed\n"); 1168 } 1169 } 1170 1171 #if defined RTE_LIBRTE_PMD_SOFTNIC 1172 if (strcmp(cur_fwd_eng->fwd_mode_name, "softnic") == 0) { 1173 RTE_ETH_FOREACH_DEV(pid) { 1174 port = &ports[pid]; 1175 const char *driver = port->dev_info.driver_name; 1176 1177 if (strcmp(driver, "net_softnic") == 0) 1178 port->softport.fwd_lcore_arg = fwd_lcores; 1179 } 1180 } 1181 #endif 1182 1183 } 1184 1185 1186 void 1187 reconfig(portid_t new_port_id, unsigned socket_id) 1188 { 1189 struct rte_port *port; 1190 1191 /* Reconfiguration of Ethernet ports. */ 1192 port = &ports[new_port_id]; 1193 rte_eth_dev_info_get(new_port_id, &port->dev_info); 1194 1195 /* set flag to initialize port/queue */ 1196 port->need_reconfig = 1; 1197 port->need_reconfig_queues = 1; 1198 port->socket_id = socket_id; 1199 1200 init_port_config(); 1201 } 1202 1203 1204 int 1205 init_fwd_streams(void) 1206 { 1207 portid_t pid; 1208 struct rte_port *port; 1209 streamid_t sm_id, nb_fwd_streams_new; 1210 queueid_t q; 1211 1212 /* set socket id according to numa or not */ 1213 RTE_ETH_FOREACH_DEV(pid) { 1214 port = &ports[pid]; 1215 if (nb_rxq > port->dev_info.max_rx_queues) { 1216 printf("Fail: nb_rxq(%d) is greater than " 1217 "max_rx_queues(%d)\n", nb_rxq, 1218 port->dev_info.max_rx_queues); 1219 return -1; 1220 } 1221 if (nb_txq > port->dev_info.max_tx_queues) { 1222 printf("Fail: nb_txq(%d) is greater than " 1223 "max_tx_queues(%d)\n", nb_txq, 1224 port->dev_info.max_tx_queues); 1225 return -1; 1226 } 1227 if (numa_support) { 1228 if (port_numa[pid] != NUMA_NO_CONFIG) 1229 port->socket_id = port_numa[pid]; 1230 else { 1231 port->socket_id = rte_eth_dev_socket_id(pid); 1232 1233 /* 1234 * if socket_id is invalid, 1235 * set to the first available socket. 1236 */ 1237 if (check_socket_id(port->socket_id) < 0) 1238 port->socket_id = socket_ids[0]; 1239 } 1240 } 1241 else { 1242 if (socket_num == UMA_NO_CONFIG) 1243 port->socket_id = 0; 1244 else 1245 port->socket_id = socket_num; 1246 } 1247 } 1248 1249 q = RTE_MAX(nb_rxq, nb_txq); 1250 if (q == 0) { 1251 printf("Fail: Cannot allocate fwd streams as number of queues is 0\n"); 1252 return -1; 1253 } 1254 nb_fwd_streams_new = (streamid_t)(nb_ports * q); 1255 if (nb_fwd_streams_new == nb_fwd_streams) 1256 return 0; 1257 /* clear the old */ 1258 if (fwd_streams != NULL) { 1259 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) { 1260 if (fwd_streams[sm_id] == NULL) 1261 continue; 1262 rte_free(fwd_streams[sm_id]); 1263 fwd_streams[sm_id] = NULL; 1264 } 1265 rte_free(fwd_streams); 1266 fwd_streams = NULL; 1267 } 1268 1269 /* init new */ 1270 nb_fwd_streams = nb_fwd_streams_new; 1271 if (nb_fwd_streams) { 1272 fwd_streams = rte_zmalloc("testpmd: fwd_streams", 1273 sizeof(struct fwd_stream *) * nb_fwd_streams, 1274 RTE_CACHE_LINE_SIZE); 1275 if (fwd_streams == NULL) 1276 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d" 1277 " (struct fwd_stream *)) failed\n", 1278 nb_fwd_streams); 1279 1280 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) { 1281 fwd_streams[sm_id] = rte_zmalloc("testpmd:" 1282 " struct fwd_stream", sizeof(struct fwd_stream), 1283 RTE_CACHE_LINE_SIZE); 1284 if (fwd_streams[sm_id] == NULL) 1285 rte_exit(EXIT_FAILURE, "rte_zmalloc" 1286 "(struct fwd_stream) failed\n"); 1287 } 1288 } 1289 1290 return 0; 1291 } 1292 1293 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS 1294 static void 1295 pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs) 1296 { 1297 unsigned int total_burst; 1298 unsigned int nb_burst; 1299 unsigned int burst_stats[3]; 1300 uint16_t pktnb_stats[3]; 1301 uint16_t nb_pkt; 1302 int burst_percent[3]; 1303 1304 /* 1305 * First compute the total number of packet bursts and the 1306 * two highest numbers of bursts of the same number of packets. 1307 */ 1308 total_burst = 0; 1309 burst_stats[0] = burst_stats[1] = burst_stats[2] = 0; 1310 pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0; 1311 for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) { 1312 nb_burst = pbs->pkt_burst_spread[nb_pkt]; 1313 if (nb_burst == 0) 1314 continue; 1315 total_burst += nb_burst; 1316 if (nb_burst > burst_stats[0]) { 1317 burst_stats[1] = burst_stats[0]; 1318 pktnb_stats[1] = pktnb_stats[0]; 1319 burst_stats[0] = nb_burst; 1320 pktnb_stats[0] = nb_pkt; 1321 } else if (nb_burst > burst_stats[1]) { 1322 burst_stats[1] = nb_burst; 1323 pktnb_stats[1] = nb_pkt; 1324 } 1325 } 1326 if (total_burst == 0) 1327 return; 1328 burst_percent[0] = (burst_stats[0] * 100) / total_burst; 1329 printf(" %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst, 1330 burst_percent[0], (int) pktnb_stats[0]); 1331 if (burst_stats[0] == total_burst) { 1332 printf("]\n"); 1333 return; 1334 } 1335 if (burst_stats[0] + burst_stats[1] == total_burst) { 1336 printf(" + %d%% of %d pkts]\n", 1337 100 - burst_percent[0], pktnb_stats[1]); 1338 return; 1339 } 1340 burst_percent[1] = (burst_stats[1] * 100) / total_burst; 1341 burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]); 1342 if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) { 1343 printf(" + %d%% of others]\n", 100 - burst_percent[0]); 1344 return; 1345 } 1346 printf(" + %d%% of %d pkts + %d%% of others]\n", 1347 burst_percent[1], (int) pktnb_stats[1], burst_percent[2]); 1348 } 1349 #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */ 1350 1351 static void 1352 fwd_port_stats_display(portid_t port_id, struct rte_eth_stats *stats) 1353 { 1354 struct rte_port *port; 1355 uint8_t i; 1356 1357 static const char *fwd_stats_border = "----------------------"; 1358 1359 port = &ports[port_id]; 1360 printf("\n %s Forward statistics for port %-2d %s\n", 1361 fwd_stats_border, port_id, fwd_stats_border); 1362 1363 if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) { 1364 printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: " 1365 "%-"PRIu64"\n", 1366 stats->ipackets, stats->imissed, 1367 (uint64_t) (stats->ipackets + stats->imissed)); 1368 1369 if (cur_fwd_eng == &csum_fwd_engine) 1370 printf(" Bad-ipcsum: %-14"PRIu64" Bad-l4csum: %-14"PRIu64"Bad-outer-l4csum: %-14"PRIu64"\n", 1371 port->rx_bad_ip_csum, port->rx_bad_l4_csum, 1372 port->rx_bad_outer_l4_csum); 1373 if ((stats->ierrors + stats->rx_nombuf) > 0) { 1374 printf(" RX-error: %-"PRIu64"\n", stats->ierrors); 1375 printf(" RX-nombufs: %-14"PRIu64"\n", stats->rx_nombuf); 1376 } 1377 1378 printf(" TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: " 1379 "%-"PRIu64"\n", 1380 stats->opackets, port->tx_dropped, 1381 (uint64_t) (stats->opackets + port->tx_dropped)); 1382 } 1383 else { 1384 printf(" RX-packets: %14"PRIu64" RX-dropped:%14"PRIu64" RX-total:" 1385 "%14"PRIu64"\n", 1386 stats->ipackets, stats->imissed, 1387 (uint64_t) (stats->ipackets + stats->imissed)); 1388 1389 if (cur_fwd_eng == &csum_fwd_engine) 1390 printf(" Bad-ipcsum:%14"PRIu64" Bad-l4csum:%14"PRIu64" Bad-outer-l4csum: %-14"PRIu64"\n", 1391 port->rx_bad_ip_csum, port->rx_bad_l4_csum, 1392 port->rx_bad_outer_l4_csum); 1393 if ((stats->ierrors + stats->rx_nombuf) > 0) { 1394 printf(" RX-error:%"PRIu64"\n", stats->ierrors); 1395 printf(" RX-nombufs: %14"PRIu64"\n", 1396 stats->rx_nombuf); 1397 } 1398 1399 printf(" TX-packets: %14"PRIu64" TX-dropped:%14"PRIu64" TX-total:" 1400 "%14"PRIu64"\n", 1401 stats->opackets, port->tx_dropped, 1402 (uint64_t) (stats->opackets + port->tx_dropped)); 1403 } 1404 1405 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS 1406 if (port->rx_stream) 1407 pkt_burst_stats_display("RX", 1408 &port->rx_stream->rx_burst_stats); 1409 if (port->tx_stream) 1410 pkt_burst_stats_display("TX", 1411 &port->tx_stream->tx_burst_stats); 1412 #endif 1413 1414 if (port->rx_queue_stats_mapping_enabled) { 1415 printf("\n"); 1416 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) { 1417 printf(" Stats reg %2d RX-packets:%14"PRIu64 1418 " RX-errors:%14"PRIu64 1419 " RX-bytes:%14"PRIu64"\n", 1420 i, stats->q_ipackets[i], stats->q_errors[i], stats->q_ibytes[i]); 1421 } 1422 printf("\n"); 1423 } 1424 if (port->tx_queue_stats_mapping_enabled) { 1425 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) { 1426 printf(" Stats reg %2d TX-packets:%14"PRIu64 1427 " TX-bytes:%14"PRIu64"\n", 1428 i, stats->q_opackets[i], stats->q_obytes[i]); 1429 } 1430 } 1431 1432 printf(" %s--------------------------------%s\n", 1433 fwd_stats_border, fwd_stats_border); 1434 } 1435 1436 static void 1437 fwd_stream_stats_display(streamid_t stream_id) 1438 { 1439 struct fwd_stream *fs; 1440 static const char *fwd_top_stats_border = "-------"; 1441 1442 fs = fwd_streams[stream_id]; 1443 if ((fs->rx_packets == 0) && (fs->tx_packets == 0) && 1444 (fs->fwd_dropped == 0)) 1445 return; 1446 printf("\n %s Forward Stats for RX Port=%2d/Queue=%2d -> " 1447 "TX Port=%2d/Queue=%2d %s\n", 1448 fwd_top_stats_border, fs->rx_port, fs->rx_queue, 1449 fs->tx_port, fs->tx_queue, fwd_top_stats_border); 1450 printf(" RX-packets: %-14u TX-packets: %-14u TX-dropped: %-14u", 1451 fs->rx_packets, fs->tx_packets, fs->fwd_dropped); 1452 1453 /* if checksum mode */ 1454 if (cur_fwd_eng == &csum_fwd_engine) { 1455 printf(" RX- bad IP checksum: %-14u Rx- bad L4 checksum: " 1456 "%-14u Rx- bad outer L4 checksum: %-14u\n", 1457 fs->rx_bad_ip_csum, fs->rx_bad_l4_csum, 1458 fs->rx_bad_outer_l4_csum); 1459 } 1460 1461 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS 1462 pkt_burst_stats_display("RX", &fs->rx_burst_stats); 1463 pkt_burst_stats_display("TX", &fs->tx_burst_stats); 1464 #endif 1465 } 1466 1467 static void 1468 flush_fwd_rx_queues(void) 1469 { 1470 struct rte_mbuf *pkts_burst[MAX_PKT_BURST]; 1471 portid_t rxp; 1472 portid_t port_id; 1473 queueid_t rxq; 1474 uint16_t nb_rx; 1475 uint16_t i; 1476 uint8_t j; 1477 uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0; 1478 uint64_t timer_period; 1479 1480 /* convert to number of cycles */ 1481 timer_period = rte_get_timer_hz(); /* 1 second timeout */ 1482 1483 for (j = 0; j < 2; j++) { 1484 for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) { 1485 for (rxq = 0; rxq < nb_rxq; rxq++) { 1486 port_id = fwd_ports_ids[rxp]; 1487 /** 1488 * testpmd can stuck in the below do while loop 1489 * if rte_eth_rx_burst() always returns nonzero 1490 * packets. So timer is added to exit this loop 1491 * after 1sec timer expiry. 1492 */ 1493 prev_tsc = rte_rdtsc(); 1494 do { 1495 nb_rx = rte_eth_rx_burst(port_id, rxq, 1496 pkts_burst, MAX_PKT_BURST); 1497 for (i = 0; i < nb_rx; i++) 1498 rte_pktmbuf_free(pkts_burst[i]); 1499 1500 cur_tsc = rte_rdtsc(); 1501 diff_tsc = cur_tsc - prev_tsc; 1502 timer_tsc += diff_tsc; 1503 } while ((nb_rx > 0) && 1504 (timer_tsc < timer_period)); 1505 timer_tsc = 0; 1506 } 1507 } 1508 rte_delay_ms(10); /* wait 10 milli-seconds before retrying */ 1509 } 1510 } 1511 1512 static void 1513 run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd) 1514 { 1515 struct fwd_stream **fsm; 1516 streamid_t nb_fs; 1517 streamid_t sm_id; 1518 #ifdef RTE_LIBRTE_BITRATE 1519 uint64_t tics_per_1sec; 1520 uint64_t tics_datum; 1521 uint64_t tics_current; 1522 uint16_t i, cnt_ports; 1523 1524 cnt_ports = nb_ports; 1525 tics_datum = rte_rdtsc(); 1526 tics_per_1sec = rte_get_timer_hz(); 1527 #endif 1528 fsm = &fwd_streams[fc->stream_idx]; 1529 nb_fs = fc->stream_nb; 1530 do { 1531 for (sm_id = 0; sm_id < nb_fs; sm_id++) 1532 (*pkt_fwd)(fsm[sm_id]); 1533 #ifdef RTE_LIBRTE_BITRATE 1534 if (bitrate_enabled != 0 && 1535 bitrate_lcore_id == rte_lcore_id()) { 1536 tics_current = rte_rdtsc(); 1537 if (tics_current - tics_datum >= tics_per_1sec) { 1538 /* Periodic bitrate calculation */ 1539 for (i = 0; i < cnt_ports; i++) 1540 rte_stats_bitrate_calc(bitrate_data, 1541 ports_ids[i]); 1542 tics_datum = tics_current; 1543 } 1544 } 1545 #endif 1546 #ifdef RTE_LIBRTE_LATENCY_STATS 1547 if (latencystats_enabled != 0 && 1548 latencystats_lcore_id == rte_lcore_id()) 1549 rte_latencystats_update(); 1550 #endif 1551 1552 } while (! fc->stopped); 1553 } 1554 1555 static int 1556 start_pkt_forward_on_core(void *fwd_arg) 1557 { 1558 run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg, 1559 cur_fwd_config.fwd_eng->packet_fwd); 1560 return 0; 1561 } 1562 1563 /* 1564 * Run the TXONLY packet forwarding engine to send a single burst of packets. 1565 * Used to start communication flows in network loopback test configurations. 1566 */ 1567 static int 1568 run_one_txonly_burst_on_core(void *fwd_arg) 1569 { 1570 struct fwd_lcore *fwd_lc; 1571 struct fwd_lcore tmp_lcore; 1572 1573 fwd_lc = (struct fwd_lcore *) fwd_arg; 1574 tmp_lcore = *fwd_lc; 1575 tmp_lcore.stopped = 1; 1576 run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd); 1577 return 0; 1578 } 1579 1580 /* 1581 * Launch packet forwarding: 1582 * - Setup per-port forwarding context. 1583 * - launch logical cores with their forwarding configuration. 1584 */ 1585 static void 1586 launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore) 1587 { 1588 port_fwd_begin_t port_fwd_begin; 1589 unsigned int i; 1590 unsigned int lc_id; 1591 int diag; 1592 1593 port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin; 1594 if (port_fwd_begin != NULL) { 1595 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) 1596 (*port_fwd_begin)(fwd_ports_ids[i]); 1597 } 1598 for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) { 1599 lc_id = fwd_lcores_cpuids[i]; 1600 if ((interactive == 0) || (lc_id != rte_lcore_id())) { 1601 fwd_lcores[i]->stopped = 0; 1602 diag = rte_eal_remote_launch(pkt_fwd_on_lcore, 1603 fwd_lcores[i], lc_id); 1604 if (diag != 0) 1605 printf("launch lcore %u failed - diag=%d\n", 1606 lc_id, diag); 1607 } 1608 } 1609 } 1610 1611 /* 1612 * Update the forward ports list. 1613 */ 1614 void 1615 update_fwd_ports(portid_t new_pid) 1616 { 1617 unsigned int i; 1618 unsigned int new_nb_fwd_ports = 0; 1619 int move = 0; 1620 1621 for (i = 0; i < nb_fwd_ports; ++i) { 1622 if (port_id_is_invalid(fwd_ports_ids[i], DISABLED_WARN)) 1623 move = 1; 1624 else if (move) 1625 fwd_ports_ids[new_nb_fwd_ports++] = fwd_ports_ids[i]; 1626 else 1627 new_nb_fwd_ports++; 1628 } 1629 if (new_pid < RTE_MAX_ETHPORTS) 1630 fwd_ports_ids[new_nb_fwd_ports++] = new_pid; 1631 1632 nb_fwd_ports = new_nb_fwd_ports; 1633 nb_cfg_ports = new_nb_fwd_ports; 1634 } 1635 1636 /* 1637 * Launch packet forwarding configuration. 1638 */ 1639 void 1640 start_packet_forwarding(int with_tx_first) 1641 { 1642 port_fwd_begin_t port_fwd_begin; 1643 port_fwd_end_t port_fwd_end; 1644 struct rte_port *port; 1645 unsigned int i; 1646 portid_t pt_id; 1647 streamid_t sm_id; 1648 1649 if (strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") == 0 && !nb_rxq) 1650 rte_exit(EXIT_FAILURE, "rxq are 0, cannot use rxonly fwd mode\n"); 1651 1652 if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 && !nb_txq) 1653 rte_exit(EXIT_FAILURE, "txq are 0, cannot use txonly fwd mode\n"); 1654 1655 if ((strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") != 0 && 1656 strcmp(cur_fwd_eng->fwd_mode_name, "txonly") != 0) && 1657 (!nb_rxq || !nb_txq)) 1658 rte_exit(EXIT_FAILURE, 1659 "Either rxq or txq are 0, cannot use %s fwd mode\n", 1660 cur_fwd_eng->fwd_mode_name); 1661 1662 if (all_ports_started() == 0) { 1663 printf("Not all ports were started\n"); 1664 return; 1665 } 1666 if (test_done == 0) { 1667 printf("Packet forwarding already started\n"); 1668 return; 1669 } 1670 1671 1672 if(dcb_test) { 1673 for (i = 0; i < nb_fwd_ports; i++) { 1674 pt_id = fwd_ports_ids[i]; 1675 port = &ports[pt_id]; 1676 if (!port->dcb_flag) { 1677 printf("In DCB mode, all forwarding ports must " 1678 "be configured in this mode.\n"); 1679 return; 1680 } 1681 } 1682 if (nb_fwd_lcores == 1) { 1683 printf("In DCB mode,the nb forwarding cores " 1684 "should be larger than 1.\n"); 1685 return; 1686 } 1687 } 1688 test_done = 0; 1689 1690 fwd_config_setup(); 1691 1692 if(!no_flush_rx) 1693 flush_fwd_rx_queues(); 1694 1695 pkt_fwd_config_display(&cur_fwd_config); 1696 rxtx_config_display(); 1697 1698 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) { 1699 pt_id = fwd_ports_ids[i]; 1700 port = &ports[pt_id]; 1701 rte_eth_stats_get(pt_id, &port->stats); 1702 port->tx_dropped = 0; 1703 1704 map_port_queue_stats_mapping_registers(pt_id, port); 1705 } 1706 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) { 1707 fwd_streams[sm_id]->rx_packets = 0; 1708 fwd_streams[sm_id]->tx_packets = 0; 1709 fwd_streams[sm_id]->fwd_dropped = 0; 1710 fwd_streams[sm_id]->rx_bad_ip_csum = 0; 1711 fwd_streams[sm_id]->rx_bad_l4_csum = 0; 1712 fwd_streams[sm_id]->rx_bad_outer_l4_csum = 0; 1713 1714 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS 1715 memset(&fwd_streams[sm_id]->rx_burst_stats, 0, 1716 sizeof(fwd_streams[sm_id]->rx_burst_stats)); 1717 memset(&fwd_streams[sm_id]->tx_burst_stats, 0, 1718 sizeof(fwd_streams[sm_id]->tx_burst_stats)); 1719 #endif 1720 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES 1721 fwd_streams[sm_id]->core_cycles = 0; 1722 #endif 1723 } 1724 if (with_tx_first) { 1725 port_fwd_begin = tx_only_engine.port_fwd_begin; 1726 if (port_fwd_begin != NULL) { 1727 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) 1728 (*port_fwd_begin)(fwd_ports_ids[i]); 1729 } 1730 while (with_tx_first--) { 1731 launch_packet_forwarding( 1732 run_one_txonly_burst_on_core); 1733 rte_eal_mp_wait_lcore(); 1734 } 1735 port_fwd_end = tx_only_engine.port_fwd_end; 1736 if (port_fwd_end != NULL) { 1737 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) 1738 (*port_fwd_end)(fwd_ports_ids[i]); 1739 } 1740 } 1741 launch_packet_forwarding(start_pkt_forward_on_core); 1742 } 1743 1744 void 1745 stop_packet_forwarding(void) 1746 { 1747 struct rte_eth_stats stats; 1748 struct rte_port *port; 1749 port_fwd_end_t port_fwd_end; 1750 int i; 1751 portid_t pt_id; 1752 streamid_t sm_id; 1753 lcoreid_t lc_id; 1754 uint64_t total_recv; 1755 uint64_t total_xmit; 1756 uint64_t total_rx_dropped; 1757 uint64_t total_tx_dropped; 1758 uint64_t total_rx_nombuf; 1759 uint64_t tx_dropped; 1760 uint64_t rx_bad_ip_csum; 1761 uint64_t rx_bad_l4_csum; 1762 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES 1763 uint64_t fwd_cycles; 1764 #endif 1765 1766 static const char *acc_stats_border = "+++++++++++++++"; 1767 1768 if (test_done) { 1769 printf("Packet forwarding not started\n"); 1770 return; 1771 } 1772 printf("Telling cores to stop..."); 1773 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) 1774 fwd_lcores[lc_id]->stopped = 1; 1775 printf("\nWaiting for lcores to finish...\n"); 1776 rte_eal_mp_wait_lcore(); 1777 port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end; 1778 if (port_fwd_end != NULL) { 1779 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) { 1780 pt_id = fwd_ports_ids[i]; 1781 (*port_fwd_end)(pt_id); 1782 } 1783 } 1784 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES 1785 fwd_cycles = 0; 1786 #endif 1787 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) { 1788 if (cur_fwd_config.nb_fwd_streams > 1789 cur_fwd_config.nb_fwd_ports) { 1790 fwd_stream_stats_display(sm_id); 1791 ports[fwd_streams[sm_id]->tx_port].tx_stream = NULL; 1792 ports[fwd_streams[sm_id]->rx_port].rx_stream = NULL; 1793 } else { 1794 ports[fwd_streams[sm_id]->tx_port].tx_stream = 1795 fwd_streams[sm_id]; 1796 ports[fwd_streams[sm_id]->rx_port].rx_stream = 1797 fwd_streams[sm_id]; 1798 } 1799 tx_dropped = ports[fwd_streams[sm_id]->tx_port].tx_dropped; 1800 tx_dropped = (uint64_t) (tx_dropped + 1801 fwd_streams[sm_id]->fwd_dropped); 1802 ports[fwd_streams[sm_id]->tx_port].tx_dropped = tx_dropped; 1803 1804 rx_bad_ip_csum = 1805 ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum; 1806 rx_bad_ip_csum = (uint64_t) (rx_bad_ip_csum + 1807 fwd_streams[sm_id]->rx_bad_ip_csum); 1808 ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum = 1809 rx_bad_ip_csum; 1810 1811 rx_bad_l4_csum = 1812 ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum; 1813 rx_bad_l4_csum = (uint64_t) (rx_bad_l4_csum + 1814 fwd_streams[sm_id]->rx_bad_l4_csum); 1815 ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum = 1816 rx_bad_l4_csum; 1817 1818 ports[fwd_streams[sm_id]->rx_port].rx_bad_outer_l4_csum += 1819 fwd_streams[sm_id]->rx_bad_outer_l4_csum; 1820 1821 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES 1822 fwd_cycles = (uint64_t) (fwd_cycles + 1823 fwd_streams[sm_id]->core_cycles); 1824 #endif 1825 } 1826 total_recv = 0; 1827 total_xmit = 0; 1828 total_rx_dropped = 0; 1829 total_tx_dropped = 0; 1830 total_rx_nombuf = 0; 1831 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) { 1832 pt_id = fwd_ports_ids[i]; 1833 1834 port = &ports[pt_id]; 1835 rte_eth_stats_get(pt_id, &stats); 1836 stats.ipackets -= port->stats.ipackets; 1837 port->stats.ipackets = 0; 1838 stats.opackets -= port->stats.opackets; 1839 port->stats.opackets = 0; 1840 stats.ibytes -= port->stats.ibytes; 1841 port->stats.ibytes = 0; 1842 stats.obytes -= port->stats.obytes; 1843 port->stats.obytes = 0; 1844 stats.imissed -= port->stats.imissed; 1845 port->stats.imissed = 0; 1846 stats.oerrors -= port->stats.oerrors; 1847 port->stats.oerrors = 0; 1848 stats.rx_nombuf -= port->stats.rx_nombuf; 1849 port->stats.rx_nombuf = 0; 1850 1851 total_recv += stats.ipackets; 1852 total_xmit += stats.opackets; 1853 total_rx_dropped += stats.imissed; 1854 total_tx_dropped += port->tx_dropped; 1855 total_rx_nombuf += stats.rx_nombuf; 1856 1857 fwd_port_stats_display(pt_id, &stats); 1858 } 1859 1860 printf("\n %s Accumulated forward statistics for all ports" 1861 "%s\n", 1862 acc_stats_border, acc_stats_border); 1863 printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: " 1864 "%-"PRIu64"\n" 1865 " TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: " 1866 "%-"PRIu64"\n", 1867 total_recv, total_rx_dropped, total_recv + total_rx_dropped, 1868 total_xmit, total_tx_dropped, total_xmit + total_tx_dropped); 1869 if (total_rx_nombuf > 0) 1870 printf(" RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf); 1871 printf(" %s++++++++++++++++++++++++++++++++++++++++++++++" 1872 "%s\n", 1873 acc_stats_border, acc_stats_border); 1874 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES 1875 if (total_recv > 0) 1876 printf("\n CPU cycles/packet=%u (total cycles=" 1877 "%"PRIu64" / total RX packets=%"PRIu64")\n", 1878 (unsigned int)(fwd_cycles / total_recv), 1879 fwd_cycles, total_recv); 1880 #endif 1881 printf("\nDone.\n"); 1882 test_done = 1; 1883 } 1884 1885 void 1886 dev_set_link_up(portid_t pid) 1887 { 1888 if (rte_eth_dev_set_link_up(pid) < 0) 1889 printf("\nSet link up fail.\n"); 1890 } 1891 1892 void 1893 dev_set_link_down(portid_t pid) 1894 { 1895 if (rte_eth_dev_set_link_down(pid) < 0) 1896 printf("\nSet link down fail.\n"); 1897 } 1898 1899 static int 1900 all_ports_started(void) 1901 { 1902 portid_t pi; 1903 struct rte_port *port; 1904 1905 RTE_ETH_FOREACH_DEV(pi) { 1906 port = &ports[pi]; 1907 /* Check if there is a port which is not started */ 1908 if ((port->port_status != RTE_PORT_STARTED) && 1909 (port->slave_flag == 0)) 1910 return 0; 1911 } 1912 1913 /* No port is not started */ 1914 return 1; 1915 } 1916 1917 int 1918 port_is_stopped(portid_t port_id) 1919 { 1920 struct rte_port *port = &ports[port_id]; 1921 1922 if ((port->port_status != RTE_PORT_STOPPED) && 1923 (port->slave_flag == 0)) 1924 return 0; 1925 return 1; 1926 } 1927 1928 int 1929 all_ports_stopped(void) 1930 { 1931 portid_t pi; 1932 1933 RTE_ETH_FOREACH_DEV(pi) { 1934 if (!port_is_stopped(pi)) 1935 return 0; 1936 } 1937 1938 return 1; 1939 } 1940 1941 int 1942 port_is_started(portid_t port_id) 1943 { 1944 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1945 return 0; 1946 1947 if (ports[port_id].port_status != RTE_PORT_STARTED) 1948 return 0; 1949 1950 return 1; 1951 } 1952 1953 int 1954 start_port(portid_t pid) 1955 { 1956 int diag, need_check_link_status = -1; 1957 portid_t pi; 1958 queueid_t qi; 1959 struct rte_port *port; 1960 struct ether_addr mac_addr; 1961 enum rte_eth_event_type event_type; 1962 1963 if (port_id_is_invalid(pid, ENABLED_WARN)) 1964 return 0; 1965 1966 if(dcb_config) 1967 dcb_test = 1; 1968 RTE_ETH_FOREACH_DEV(pi) { 1969 if (pid != pi && pid != (portid_t)RTE_PORT_ALL) 1970 continue; 1971 1972 need_check_link_status = 0; 1973 port = &ports[pi]; 1974 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED, 1975 RTE_PORT_HANDLING) == 0) { 1976 printf("Port %d is now not stopped\n", pi); 1977 continue; 1978 } 1979 1980 if (port->need_reconfig > 0) { 1981 port->need_reconfig = 0; 1982 1983 if (flow_isolate_all) { 1984 int ret = port_flow_isolate(pi, 1); 1985 if (ret) { 1986 printf("Failed to apply isolated" 1987 " mode on port %d\n", pi); 1988 return -1; 1989 } 1990 } 1991 configure_rxtx_dump_callbacks(0); 1992 printf("Configuring Port %d (socket %u)\n", pi, 1993 port->socket_id); 1994 /* configure port */ 1995 diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq, 1996 &(port->dev_conf)); 1997 if (diag != 0) { 1998 if (rte_atomic16_cmpset(&(port->port_status), 1999 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0) 2000 printf("Port %d can not be set back " 2001 "to stopped\n", pi); 2002 printf("Fail to configure port %d\n", pi); 2003 /* try to reconfigure port next time */ 2004 port->need_reconfig = 1; 2005 return -1; 2006 } 2007 } 2008 if (port->need_reconfig_queues > 0) { 2009 port->need_reconfig_queues = 0; 2010 /* setup tx queues */ 2011 for (qi = 0; qi < nb_txq; qi++) { 2012 if ((numa_support) && 2013 (txring_numa[pi] != NUMA_NO_CONFIG)) 2014 diag = rte_eth_tx_queue_setup(pi, qi, 2015 port->nb_tx_desc[qi], 2016 txring_numa[pi], 2017 &(port->tx_conf[qi])); 2018 else 2019 diag = rte_eth_tx_queue_setup(pi, qi, 2020 port->nb_tx_desc[qi], 2021 port->socket_id, 2022 &(port->tx_conf[qi])); 2023 2024 if (diag == 0) 2025 continue; 2026 2027 /* Fail to setup tx queue, return */ 2028 if (rte_atomic16_cmpset(&(port->port_status), 2029 RTE_PORT_HANDLING, 2030 RTE_PORT_STOPPED) == 0) 2031 printf("Port %d can not be set back " 2032 "to stopped\n", pi); 2033 printf("Fail to configure port %d tx queues\n", 2034 pi); 2035 /* try to reconfigure queues next time */ 2036 port->need_reconfig_queues = 1; 2037 return -1; 2038 } 2039 for (qi = 0; qi < nb_rxq; qi++) { 2040 /* setup rx queues */ 2041 if ((numa_support) && 2042 (rxring_numa[pi] != NUMA_NO_CONFIG)) { 2043 struct rte_mempool * mp = 2044 mbuf_pool_find(rxring_numa[pi]); 2045 if (mp == NULL) { 2046 printf("Failed to setup RX queue:" 2047 "No mempool allocation" 2048 " on the socket %d\n", 2049 rxring_numa[pi]); 2050 return -1; 2051 } 2052 2053 diag = rte_eth_rx_queue_setup(pi, qi, 2054 port->nb_rx_desc[qi], 2055 rxring_numa[pi], 2056 &(port->rx_conf[qi]), 2057 mp); 2058 } else { 2059 struct rte_mempool *mp = 2060 mbuf_pool_find(port->socket_id); 2061 if (mp == NULL) { 2062 printf("Failed to setup RX queue:" 2063 "No mempool allocation" 2064 " on the socket %d\n", 2065 port->socket_id); 2066 return -1; 2067 } 2068 diag = rte_eth_rx_queue_setup(pi, qi, 2069 port->nb_rx_desc[qi], 2070 port->socket_id, 2071 &(port->rx_conf[qi]), 2072 mp); 2073 } 2074 if (diag == 0) 2075 continue; 2076 2077 /* Fail to setup rx queue, return */ 2078 if (rte_atomic16_cmpset(&(port->port_status), 2079 RTE_PORT_HANDLING, 2080 RTE_PORT_STOPPED) == 0) 2081 printf("Port %d can not be set back " 2082 "to stopped\n", pi); 2083 printf("Fail to configure port %d rx queues\n", 2084 pi); 2085 /* try to reconfigure queues next time */ 2086 port->need_reconfig_queues = 1; 2087 return -1; 2088 } 2089 } 2090 configure_rxtx_dump_callbacks(verbose_level); 2091 /* start port */ 2092 if (rte_eth_dev_start(pi) < 0) { 2093 printf("Fail to start port %d\n", pi); 2094 2095 /* Fail to setup rx queue, return */ 2096 if (rte_atomic16_cmpset(&(port->port_status), 2097 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0) 2098 printf("Port %d can not be set back to " 2099 "stopped\n", pi); 2100 continue; 2101 } 2102 2103 if (rte_atomic16_cmpset(&(port->port_status), 2104 RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0) 2105 printf("Port %d can not be set into started\n", pi); 2106 2107 rte_eth_macaddr_get(pi, &mac_addr); 2108 printf("Port %d: %02X:%02X:%02X:%02X:%02X:%02X\n", pi, 2109 mac_addr.addr_bytes[0], mac_addr.addr_bytes[1], 2110 mac_addr.addr_bytes[2], mac_addr.addr_bytes[3], 2111 mac_addr.addr_bytes[4], mac_addr.addr_bytes[5]); 2112 2113 /* at least one port started, need checking link status */ 2114 need_check_link_status = 1; 2115 } 2116 2117 for (event_type = RTE_ETH_EVENT_UNKNOWN; 2118 event_type < RTE_ETH_EVENT_MAX; 2119 event_type++) { 2120 diag = rte_eth_dev_callback_register(RTE_ETH_ALL, 2121 event_type, 2122 eth_event_callback, 2123 NULL); 2124 if (diag) { 2125 printf("Failed to setup even callback for event %d\n", 2126 event_type); 2127 return -1; 2128 } 2129 } 2130 2131 if (need_check_link_status == 1 && !no_link_check) 2132 check_all_ports_link_status(RTE_PORT_ALL); 2133 else if (need_check_link_status == 0) 2134 printf("Please stop the ports first\n"); 2135 2136 printf("Done\n"); 2137 return 0; 2138 } 2139 2140 void 2141 stop_port(portid_t pid) 2142 { 2143 portid_t pi; 2144 struct rte_port *port; 2145 int need_check_link_status = 0; 2146 2147 if (dcb_test) { 2148 dcb_test = 0; 2149 dcb_config = 0; 2150 } 2151 2152 if (port_id_is_invalid(pid, ENABLED_WARN)) 2153 return; 2154 2155 printf("Stopping ports...\n"); 2156 2157 RTE_ETH_FOREACH_DEV(pi) { 2158 if (pid != pi && pid != (portid_t)RTE_PORT_ALL) 2159 continue; 2160 2161 if (port_is_forwarding(pi) != 0 && test_done == 0) { 2162 printf("Please remove port %d from forwarding configuration.\n", pi); 2163 continue; 2164 } 2165 2166 if (port_is_bonding_slave(pi)) { 2167 printf("Please remove port %d from bonded device.\n", pi); 2168 continue; 2169 } 2170 2171 port = &ports[pi]; 2172 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED, 2173 RTE_PORT_HANDLING) == 0) 2174 continue; 2175 2176 rte_eth_dev_stop(pi); 2177 2178 if (rte_atomic16_cmpset(&(port->port_status), 2179 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0) 2180 printf("Port %d can not be set into stopped\n", pi); 2181 need_check_link_status = 1; 2182 } 2183 if (need_check_link_status && !no_link_check) 2184 check_all_ports_link_status(RTE_PORT_ALL); 2185 2186 printf("Done\n"); 2187 } 2188 2189 static void 2190 remove_unused_fwd_ports(void) 2191 { 2192 int i; 2193 int last_port_idx = nb_ports - 1; 2194 2195 for (i = 0; i <= last_port_idx; i++) { /* iterate in ports_ids */ 2196 if (rte_eth_devices[ports_ids[i]].state != RTE_ETH_DEV_UNUSED) 2197 continue; 2198 /* skip unused ports at the end */ 2199 while (i <= last_port_idx && 2200 rte_eth_devices[ports_ids[last_port_idx]].state 2201 == RTE_ETH_DEV_UNUSED) 2202 last_port_idx--; 2203 if (last_port_idx < i) 2204 break; 2205 /* overwrite unused port with last valid port */ 2206 ports_ids[i] = ports_ids[last_port_idx]; 2207 /* decrease ports count */ 2208 last_port_idx--; 2209 } 2210 nb_ports = rte_eth_dev_count_avail(); 2211 update_fwd_ports(RTE_MAX_ETHPORTS); 2212 } 2213 2214 void 2215 close_port(portid_t pid) 2216 { 2217 portid_t pi; 2218 struct rte_port *port; 2219 2220 if (port_id_is_invalid(pid, ENABLED_WARN)) 2221 return; 2222 2223 printf("Closing ports...\n"); 2224 2225 RTE_ETH_FOREACH_DEV(pi) { 2226 if (pid != pi && pid != (portid_t)RTE_PORT_ALL) 2227 continue; 2228 2229 if (port_is_forwarding(pi) != 0 && test_done == 0) { 2230 printf("Please remove port %d from forwarding configuration.\n", pi); 2231 continue; 2232 } 2233 2234 if (port_is_bonding_slave(pi)) { 2235 printf("Please remove port %d from bonded device.\n", pi); 2236 continue; 2237 } 2238 2239 port = &ports[pi]; 2240 if (rte_atomic16_cmpset(&(port->port_status), 2241 RTE_PORT_CLOSED, RTE_PORT_CLOSED) == 1) { 2242 printf("Port %d is already closed\n", pi); 2243 continue; 2244 } 2245 2246 if (rte_atomic16_cmpset(&(port->port_status), 2247 RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) { 2248 printf("Port %d is now not stopped\n", pi); 2249 continue; 2250 } 2251 2252 if (port->flow_list) 2253 port_flow_flush(pi); 2254 rte_eth_dev_close(pi); 2255 2256 remove_unused_fwd_ports(); 2257 2258 if (rte_atomic16_cmpset(&(port->port_status), 2259 RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0) 2260 printf("Port %d cannot be set to closed\n", pi); 2261 } 2262 2263 printf("Done\n"); 2264 } 2265 2266 void 2267 reset_port(portid_t pid) 2268 { 2269 int diag; 2270 portid_t pi; 2271 struct rte_port *port; 2272 2273 if (port_id_is_invalid(pid, ENABLED_WARN)) 2274 return; 2275 2276 printf("Resetting ports...\n"); 2277 2278 RTE_ETH_FOREACH_DEV(pi) { 2279 if (pid != pi && pid != (portid_t)RTE_PORT_ALL) 2280 continue; 2281 2282 if (port_is_forwarding(pi) != 0 && test_done == 0) { 2283 printf("Please remove port %d from forwarding " 2284 "configuration.\n", pi); 2285 continue; 2286 } 2287 2288 if (port_is_bonding_slave(pi)) { 2289 printf("Please remove port %d from bonded device.\n", 2290 pi); 2291 continue; 2292 } 2293 2294 diag = rte_eth_dev_reset(pi); 2295 if (diag == 0) { 2296 port = &ports[pi]; 2297 port->need_reconfig = 1; 2298 port->need_reconfig_queues = 1; 2299 } else { 2300 printf("Failed to reset port %d. diag=%d\n", pi, diag); 2301 } 2302 } 2303 2304 printf("Done\n"); 2305 } 2306 2307 void 2308 attach_port(char *identifier) 2309 { 2310 portid_t pi = 0; 2311 unsigned int socket_id; 2312 2313 printf("Attaching a new port...\n"); 2314 2315 if (identifier == NULL) { 2316 printf("Invalid parameters are specified\n"); 2317 return; 2318 } 2319 2320 if (rte_eth_dev_attach(identifier, &pi)) 2321 return; 2322 2323 socket_id = (unsigned)rte_eth_dev_socket_id(pi); 2324 /* if socket_id is invalid, set to the first available socket. */ 2325 if (check_socket_id(socket_id) < 0) 2326 socket_id = socket_ids[0]; 2327 reconfig(pi, socket_id); 2328 rte_eth_promiscuous_enable(pi); 2329 2330 ports_ids[nb_ports] = pi; 2331 nb_ports = rte_eth_dev_count_avail(); 2332 2333 ports[pi].port_status = RTE_PORT_STOPPED; 2334 2335 update_fwd_ports(pi); 2336 2337 printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports); 2338 printf("Done\n"); 2339 } 2340 2341 void 2342 detach_port(portid_t port_id) 2343 { 2344 char name[RTE_ETH_NAME_MAX_LEN]; 2345 2346 printf("Detaching a port...\n"); 2347 2348 if (ports[port_id].port_status != RTE_PORT_CLOSED) { 2349 if (ports[port_id].port_status != RTE_PORT_STOPPED) { 2350 printf("Port not stopped\n"); 2351 return; 2352 } 2353 printf("Port was not closed\n"); 2354 if (ports[port_id].flow_list) 2355 port_flow_flush(port_id); 2356 } 2357 2358 if (rte_eth_dev_detach(port_id, name)) { 2359 TESTPMD_LOG(ERR, "Failed to detach port %u\n", port_id); 2360 return; 2361 } 2362 2363 remove_unused_fwd_ports(); 2364 2365 printf("Port %u is detached. Now total ports is %d\n", 2366 port_id, nb_ports); 2367 printf("Done\n"); 2368 return; 2369 } 2370 2371 void 2372 pmd_test_exit(void) 2373 { 2374 struct rte_device *device; 2375 portid_t pt_id; 2376 int ret; 2377 2378 if (test_done == 0) 2379 stop_packet_forwarding(); 2380 2381 if (ports != NULL) { 2382 no_link_check = 1; 2383 RTE_ETH_FOREACH_DEV(pt_id) { 2384 printf("\nShutting down port %d...\n", pt_id); 2385 fflush(stdout); 2386 stop_port(pt_id); 2387 close_port(pt_id); 2388 2389 /* 2390 * This is a workaround to fix a virtio-user issue that 2391 * requires to call clean-up routine to remove existing 2392 * socket. 2393 * This workaround valid only for testpmd, needs a fix 2394 * valid for all applications. 2395 * TODO: Implement proper resource cleanup 2396 */ 2397 device = rte_eth_devices[pt_id].device; 2398 if (device && !strcmp(device->driver->name, "net_virtio_user")) 2399 detach_port(pt_id); 2400 } 2401 } 2402 2403 if (hot_plug) { 2404 ret = rte_dev_event_monitor_stop(); 2405 if (ret) { 2406 RTE_LOG(ERR, EAL, 2407 "fail to stop device event monitor."); 2408 return; 2409 } 2410 2411 ret = rte_dev_event_callback_unregister(NULL, 2412 eth_dev_event_callback, NULL); 2413 if (ret < 0) { 2414 RTE_LOG(ERR, EAL, 2415 "fail to unregister device event callback.\n"); 2416 return; 2417 } 2418 2419 ret = rte_dev_hotplug_handle_disable(); 2420 if (ret) { 2421 RTE_LOG(ERR, EAL, 2422 "fail to disable hotplug handling.\n"); 2423 return; 2424 } 2425 } 2426 2427 printf("\nBye...\n"); 2428 } 2429 2430 typedef void (*cmd_func_t)(void); 2431 struct pmd_test_command { 2432 const char *cmd_name; 2433 cmd_func_t cmd_func; 2434 }; 2435 2436 #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0])) 2437 2438 /* Check the link status of all ports in up to 9s, and print them finally */ 2439 static void 2440 check_all_ports_link_status(uint32_t port_mask) 2441 { 2442 #define CHECK_INTERVAL 100 /* 100ms */ 2443 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */ 2444 portid_t portid; 2445 uint8_t count, all_ports_up, print_flag = 0; 2446 struct rte_eth_link link; 2447 2448 printf("Checking link statuses...\n"); 2449 fflush(stdout); 2450 for (count = 0; count <= MAX_CHECK_TIME; count++) { 2451 all_ports_up = 1; 2452 RTE_ETH_FOREACH_DEV(portid) { 2453 if ((port_mask & (1 << portid)) == 0) 2454 continue; 2455 memset(&link, 0, sizeof(link)); 2456 rte_eth_link_get_nowait(portid, &link); 2457 /* print link status if flag set */ 2458 if (print_flag == 1) { 2459 if (link.link_status) 2460 printf( 2461 "Port%d Link Up. speed %u Mbps- %s\n", 2462 portid, link.link_speed, 2463 (link.link_duplex == ETH_LINK_FULL_DUPLEX) ? 2464 ("full-duplex") : ("half-duplex\n")); 2465 else 2466 printf("Port %d Link Down\n", portid); 2467 continue; 2468 } 2469 /* clear all_ports_up flag if any link down */ 2470 if (link.link_status == ETH_LINK_DOWN) { 2471 all_ports_up = 0; 2472 break; 2473 } 2474 } 2475 /* after finally printing all link status, get out */ 2476 if (print_flag == 1) 2477 break; 2478 2479 if (all_ports_up == 0) { 2480 fflush(stdout); 2481 rte_delay_ms(CHECK_INTERVAL); 2482 } 2483 2484 /* set the print_flag if all ports up or timeout */ 2485 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) { 2486 print_flag = 1; 2487 } 2488 2489 if (lsc_interrupt) 2490 break; 2491 } 2492 } 2493 2494 static void 2495 rmv_event_callback(void *arg) 2496 { 2497 int need_to_start = 0; 2498 int org_no_link_check = no_link_check; 2499 portid_t port_id = (intptr_t)arg; 2500 2501 RTE_ETH_VALID_PORTID_OR_RET(port_id); 2502 2503 if (!test_done && port_is_forwarding(port_id)) { 2504 need_to_start = 1; 2505 stop_packet_forwarding(); 2506 } 2507 no_link_check = 1; 2508 stop_port(port_id); 2509 no_link_check = org_no_link_check; 2510 close_port(port_id); 2511 detach_port(port_id); 2512 if (need_to_start) 2513 start_packet_forwarding(0); 2514 } 2515 2516 /* This function is used by the interrupt thread */ 2517 static int 2518 eth_event_callback(portid_t port_id, enum rte_eth_event_type type, void *param, 2519 void *ret_param) 2520 { 2521 static const char * const event_desc[] = { 2522 [RTE_ETH_EVENT_UNKNOWN] = "Unknown", 2523 [RTE_ETH_EVENT_INTR_LSC] = "LSC", 2524 [RTE_ETH_EVENT_QUEUE_STATE] = "Queue state", 2525 [RTE_ETH_EVENT_INTR_RESET] = "Interrupt reset", 2526 [RTE_ETH_EVENT_VF_MBOX] = "VF Mbox", 2527 [RTE_ETH_EVENT_IPSEC] = "IPsec", 2528 [RTE_ETH_EVENT_MACSEC] = "MACsec", 2529 [RTE_ETH_EVENT_INTR_RMV] = "device removal", 2530 [RTE_ETH_EVENT_NEW] = "device probed", 2531 [RTE_ETH_EVENT_DESTROY] = "device released", 2532 [RTE_ETH_EVENT_MAX] = NULL, 2533 }; 2534 2535 RTE_SET_USED(param); 2536 RTE_SET_USED(ret_param); 2537 2538 if (type >= RTE_ETH_EVENT_MAX) { 2539 fprintf(stderr, "\nPort %" PRIu16 ": %s called upon invalid event %d\n", 2540 port_id, __func__, type); 2541 fflush(stderr); 2542 } else if (event_print_mask & (UINT32_C(1) << type)) { 2543 printf("\nPort %" PRIu16 ": %s event\n", port_id, 2544 event_desc[type]); 2545 fflush(stdout); 2546 } 2547 2548 if (port_id_is_invalid(port_id, DISABLED_WARN)) 2549 return 0; 2550 2551 switch (type) { 2552 case RTE_ETH_EVENT_INTR_RMV: 2553 if (rte_eal_alarm_set(100000, 2554 rmv_event_callback, (void *)(intptr_t)port_id)) 2555 fprintf(stderr, "Could not set up deferred device removal\n"); 2556 break; 2557 default: 2558 break; 2559 } 2560 return 0; 2561 } 2562 2563 /* This function is used by the interrupt thread */ 2564 static void 2565 eth_dev_event_callback(const char *device_name, enum rte_dev_event_type type, 2566 __rte_unused void *arg) 2567 { 2568 uint16_t port_id; 2569 int ret; 2570 2571 if (type >= RTE_DEV_EVENT_MAX) { 2572 fprintf(stderr, "%s called upon invalid event %d\n", 2573 __func__, type); 2574 fflush(stderr); 2575 } 2576 2577 switch (type) { 2578 case RTE_DEV_EVENT_REMOVE: 2579 RTE_LOG(ERR, EAL, "The device: %s has been removed!\n", 2580 device_name); 2581 ret = rte_eth_dev_get_port_by_name(device_name, &port_id); 2582 if (ret) { 2583 RTE_LOG(ERR, EAL, "can not get port by device %s!\n", 2584 device_name); 2585 return; 2586 } 2587 rmv_event_callback((void *)(intptr_t)port_id); 2588 break; 2589 case RTE_DEV_EVENT_ADD: 2590 RTE_LOG(ERR, EAL, "The device: %s has been added!\n", 2591 device_name); 2592 /* TODO: After finish kernel driver binding, 2593 * begin to attach port. 2594 */ 2595 break; 2596 default: 2597 break; 2598 } 2599 } 2600 2601 static int 2602 set_tx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port) 2603 { 2604 uint16_t i; 2605 int diag; 2606 uint8_t mapping_found = 0; 2607 2608 for (i = 0; i < nb_tx_queue_stats_mappings; i++) { 2609 if ((tx_queue_stats_mappings[i].port_id == port_id) && 2610 (tx_queue_stats_mappings[i].queue_id < nb_txq )) { 2611 diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id, 2612 tx_queue_stats_mappings[i].queue_id, 2613 tx_queue_stats_mappings[i].stats_counter_id); 2614 if (diag != 0) 2615 return diag; 2616 mapping_found = 1; 2617 } 2618 } 2619 if (mapping_found) 2620 port->tx_queue_stats_mapping_enabled = 1; 2621 return 0; 2622 } 2623 2624 static int 2625 set_rx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port) 2626 { 2627 uint16_t i; 2628 int diag; 2629 uint8_t mapping_found = 0; 2630 2631 for (i = 0; i < nb_rx_queue_stats_mappings; i++) { 2632 if ((rx_queue_stats_mappings[i].port_id == port_id) && 2633 (rx_queue_stats_mappings[i].queue_id < nb_rxq )) { 2634 diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id, 2635 rx_queue_stats_mappings[i].queue_id, 2636 rx_queue_stats_mappings[i].stats_counter_id); 2637 if (diag != 0) 2638 return diag; 2639 mapping_found = 1; 2640 } 2641 } 2642 if (mapping_found) 2643 port->rx_queue_stats_mapping_enabled = 1; 2644 return 0; 2645 } 2646 2647 static void 2648 map_port_queue_stats_mapping_registers(portid_t pi, struct rte_port *port) 2649 { 2650 int diag = 0; 2651 2652 diag = set_tx_queue_stats_mapping_registers(pi, port); 2653 if (diag != 0) { 2654 if (diag == -ENOTSUP) { 2655 port->tx_queue_stats_mapping_enabled = 0; 2656 printf("TX queue stats mapping not supported port id=%d\n", pi); 2657 } 2658 else 2659 rte_exit(EXIT_FAILURE, 2660 "set_tx_queue_stats_mapping_registers " 2661 "failed for port id=%d diag=%d\n", 2662 pi, diag); 2663 } 2664 2665 diag = set_rx_queue_stats_mapping_registers(pi, port); 2666 if (diag != 0) { 2667 if (diag == -ENOTSUP) { 2668 port->rx_queue_stats_mapping_enabled = 0; 2669 printf("RX queue stats mapping not supported port id=%d\n", pi); 2670 } 2671 else 2672 rte_exit(EXIT_FAILURE, 2673 "set_rx_queue_stats_mapping_registers " 2674 "failed for port id=%d diag=%d\n", 2675 pi, diag); 2676 } 2677 } 2678 2679 static void 2680 rxtx_port_config(struct rte_port *port) 2681 { 2682 uint16_t qid; 2683 2684 for (qid = 0; qid < nb_rxq; qid++) { 2685 port->rx_conf[qid] = port->dev_info.default_rxconf; 2686 2687 /* Check if any Rx parameters have been passed */ 2688 if (rx_pthresh != RTE_PMD_PARAM_UNSET) 2689 port->rx_conf[qid].rx_thresh.pthresh = rx_pthresh; 2690 2691 if (rx_hthresh != RTE_PMD_PARAM_UNSET) 2692 port->rx_conf[qid].rx_thresh.hthresh = rx_hthresh; 2693 2694 if (rx_wthresh != RTE_PMD_PARAM_UNSET) 2695 port->rx_conf[qid].rx_thresh.wthresh = rx_wthresh; 2696 2697 if (rx_free_thresh != RTE_PMD_PARAM_UNSET) 2698 port->rx_conf[qid].rx_free_thresh = rx_free_thresh; 2699 2700 if (rx_drop_en != RTE_PMD_PARAM_UNSET) 2701 port->rx_conf[qid].rx_drop_en = rx_drop_en; 2702 2703 port->nb_rx_desc[qid] = nb_rxd; 2704 } 2705 2706 for (qid = 0; qid < nb_txq; qid++) { 2707 port->tx_conf[qid] = port->dev_info.default_txconf; 2708 2709 /* Check if any Tx parameters have been passed */ 2710 if (tx_pthresh != RTE_PMD_PARAM_UNSET) 2711 port->tx_conf[qid].tx_thresh.pthresh = tx_pthresh; 2712 2713 if (tx_hthresh != RTE_PMD_PARAM_UNSET) 2714 port->tx_conf[qid].tx_thresh.hthresh = tx_hthresh; 2715 2716 if (tx_wthresh != RTE_PMD_PARAM_UNSET) 2717 port->tx_conf[qid].tx_thresh.wthresh = tx_wthresh; 2718 2719 if (tx_rs_thresh != RTE_PMD_PARAM_UNSET) 2720 port->tx_conf[qid].tx_rs_thresh = tx_rs_thresh; 2721 2722 if (tx_free_thresh != RTE_PMD_PARAM_UNSET) 2723 port->tx_conf[qid].tx_free_thresh = tx_free_thresh; 2724 2725 port->nb_tx_desc[qid] = nb_txd; 2726 } 2727 } 2728 2729 void 2730 init_port_config(void) 2731 { 2732 portid_t pid; 2733 struct rte_port *port; 2734 2735 RTE_ETH_FOREACH_DEV(pid) { 2736 port = &ports[pid]; 2737 port->dev_conf.fdir_conf = fdir_conf; 2738 rte_eth_dev_info_get(pid, &port->dev_info); 2739 if (nb_rxq > 1) { 2740 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL; 2741 port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 2742 rss_hf & port->dev_info.flow_type_rss_offloads; 2743 } else { 2744 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL; 2745 port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0; 2746 } 2747 2748 if (port->dcb_flag == 0) { 2749 if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0) 2750 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_RSS; 2751 else 2752 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE; 2753 } 2754 2755 rxtx_port_config(port); 2756 2757 rte_eth_macaddr_get(pid, &port->eth_addr); 2758 2759 map_port_queue_stats_mapping_registers(pid, port); 2760 #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS 2761 rte_pmd_ixgbe_bypass_init(pid); 2762 #endif 2763 2764 if (lsc_interrupt && 2765 (rte_eth_devices[pid].data->dev_flags & 2766 RTE_ETH_DEV_INTR_LSC)) 2767 port->dev_conf.intr_conf.lsc = 1; 2768 if (rmv_interrupt && 2769 (rte_eth_devices[pid].data->dev_flags & 2770 RTE_ETH_DEV_INTR_RMV)) 2771 port->dev_conf.intr_conf.rmv = 1; 2772 } 2773 } 2774 2775 void set_port_slave_flag(portid_t slave_pid) 2776 { 2777 struct rte_port *port; 2778 2779 port = &ports[slave_pid]; 2780 port->slave_flag = 1; 2781 } 2782 2783 void clear_port_slave_flag(portid_t slave_pid) 2784 { 2785 struct rte_port *port; 2786 2787 port = &ports[slave_pid]; 2788 port->slave_flag = 0; 2789 } 2790 2791 uint8_t port_is_bonding_slave(portid_t slave_pid) 2792 { 2793 struct rte_port *port; 2794 2795 port = &ports[slave_pid]; 2796 if ((rte_eth_devices[slave_pid].data->dev_flags & 2797 RTE_ETH_DEV_BONDED_SLAVE) || (port->slave_flag == 1)) 2798 return 1; 2799 return 0; 2800 } 2801 2802 const uint16_t vlan_tags[] = { 2803 0, 1, 2, 3, 4, 5, 6, 7, 2804 8, 9, 10, 11, 12, 13, 14, 15, 2805 16, 17, 18, 19, 20, 21, 22, 23, 2806 24, 25, 26, 27, 28, 29, 30, 31 2807 }; 2808 2809 static int 2810 get_eth_dcb_conf(portid_t pid, struct rte_eth_conf *eth_conf, 2811 enum dcb_mode_enable dcb_mode, 2812 enum rte_eth_nb_tcs num_tcs, 2813 uint8_t pfc_en) 2814 { 2815 uint8_t i; 2816 int32_t rc; 2817 struct rte_eth_rss_conf rss_conf; 2818 2819 /* 2820 * Builds up the correct configuration for dcb+vt based on the vlan tags array 2821 * given above, and the number of traffic classes available for use. 2822 */ 2823 if (dcb_mode == DCB_VT_ENABLED) { 2824 struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf = 2825 ð_conf->rx_adv_conf.vmdq_dcb_conf; 2826 struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf = 2827 ð_conf->tx_adv_conf.vmdq_dcb_tx_conf; 2828 2829 /* VMDQ+DCB RX and TX configurations */ 2830 vmdq_rx_conf->enable_default_pool = 0; 2831 vmdq_rx_conf->default_pool = 0; 2832 vmdq_rx_conf->nb_queue_pools = 2833 (num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS); 2834 vmdq_tx_conf->nb_queue_pools = 2835 (num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS); 2836 2837 vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools; 2838 for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) { 2839 vmdq_rx_conf->pool_map[i].vlan_id = vlan_tags[i]; 2840 vmdq_rx_conf->pool_map[i].pools = 2841 1 << (i % vmdq_rx_conf->nb_queue_pools); 2842 } 2843 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) { 2844 vmdq_rx_conf->dcb_tc[i] = i % num_tcs; 2845 vmdq_tx_conf->dcb_tc[i] = i % num_tcs; 2846 } 2847 2848 /* set DCB mode of RX and TX of multiple queues */ 2849 eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB; 2850 eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB; 2851 } else { 2852 struct rte_eth_dcb_rx_conf *rx_conf = 2853 ð_conf->rx_adv_conf.dcb_rx_conf; 2854 struct rte_eth_dcb_tx_conf *tx_conf = 2855 ð_conf->tx_adv_conf.dcb_tx_conf; 2856 2857 rc = rte_eth_dev_rss_hash_conf_get(pid, &rss_conf); 2858 if (rc != 0) 2859 return rc; 2860 2861 rx_conf->nb_tcs = num_tcs; 2862 tx_conf->nb_tcs = num_tcs; 2863 2864 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) { 2865 rx_conf->dcb_tc[i] = i % num_tcs; 2866 tx_conf->dcb_tc[i] = i % num_tcs; 2867 } 2868 2869 eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB_RSS; 2870 eth_conf->rx_adv_conf.rss_conf = rss_conf; 2871 eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB; 2872 } 2873 2874 if (pfc_en) 2875 eth_conf->dcb_capability_en = 2876 ETH_DCB_PG_SUPPORT | ETH_DCB_PFC_SUPPORT; 2877 else 2878 eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT; 2879 2880 return 0; 2881 } 2882 2883 int 2884 init_port_dcb_config(portid_t pid, 2885 enum dcb_mode_enable dcb_mode, 2886 enum rte_eth_nb_tcs num_tcs, 2887 uint8_t pfc_en) 2888 { 2889 struct rte_eth_conf port_conf; 2890 struct rte_port *rte_port; 2891 int retval; 2892 uint16_t i; 2893 2894 rte_port = &ports[pid]; 2895 2896 memset(&port_conf, 0, sizeof(struct rte_eth_conf)); 2897 /* Enter DCB configuration status */ 2898 dcb_config = 1; 2899 2900 port_conf.rxmode = rte_port->dev_conf.rxmode; 2901 port_conf.txmode = rte_port->dev_conf.txmode; 2902 2903 /*set configuration of DCB in vt mode and DCB in non-vt mode*/ 2904 retval = get_eth_dcb_conf(pid, &port_conf, dcb_mode, num_tcs, pfc_en); 2905 if (retval < 0) 2906 return retval; 2907 port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER; 2908 2909 /* re-configure the device . */ 2910 rte_eth_dev_configure(pid, nb_rxq, nb_rxq, &port_conf); 2911 2912 rte_eth_dev_info_get(pid, &rte_port->dev_info); 2913 2914 /* If dev_info.vmdq_pool_base is greater than 0, 2915 * the queue id of vmdq pools is started after pf queues. 2916 */ 2917 if (dcb_mode == DCB_VT_ENABLED && 2918 rte_port->dev_info.vmdq_pool_base > 0) { 2919 printf("VMDQ_DCB multi-queue mode is nonsensical" 2920 " for port %d.", pid); 2921 return -1; 2922 } 2923 2924 /* Assume the ports in testpmd have the same dcb capability 2925 * and has the same number of rxq and txq in dcb mode 2926 */ 2927 if (dcb_mode == DCB_VT_ENABLED) { 2928 if (rte_port->dev_info.max_vfs > 0) { 2929 nb_rxq = rte_port->dev_info.nb_rx_queues; 2930 nb_txq = rte_port->dev_info.nb_tx_queues; 2931 } else { 2932 nb_rxq = rte_port->dev_info.max_rx_queues; 2933 nb_txq = rte_port->dev_info.max_tx_queues; 2934 } 2935 } else { 2936 /*if vt is disabled, use all pf queues */ 2937 if (rte_port->dev_info.vmdq_pool_base == 0) { 2938 nb_rxq = rte_port->dev_info.max_rx_queues; 2939 nb_txq = rte_port->dev_info.max_tx_queues; 2940 } else { 2941 nb_rxq = (queueid_t)num_tcs; 2942 nb_txq = (queueid_t)num_tcs; 2943 2944 } 2945 } 2946 rx_free_thresh = 64; 2947 2948 memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf)); 2949 2950 rxtx_port_config(rte_port); 2951 /* VLAN filter */ 2952 rte_port->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER; 2953 for (i = 0; i < RTE_DIM(vlan_tags); i++) 2954 rx_vft_set(pid, vlan_tags[i], 1); 2955 2956 rte_eth_macaddr_get(pid, &rte_port->eth_addr); 2957 map_port_queue_stats_mapping_registers(pid, rte_port); 2958 2959 rte_port->dcb_flag = 1; 2960 2961 return 0; 2962 } 2963 2964 static void 2965 init_port(void) 2966 { 2967 /* Configuration of Ethernet ports. */ 2968 ports = rte_zmalloc("testpmd: ports", 2969 sizeof(struct rte_port) * RTE_MAX_ETHPORTS, 2970 RTE_CACHE_LINE_SIZE); 2971 if (ports == NULL) { 2972 rte_exit(EXIT_FAILURE, 2973 "rte_zmalloc(%d struct rte_port) failed\n", 2974 RTE_MAX_ETHPORTS); 2975 } 2976 2977 /* Initialize ports NUMA structures */ 2978 memset(port_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS); 2979 memset(rxring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS); 2980 memset(txring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS); 2981 } 2982 2983 static void 2984 force_quit(void) 2985 { 2986 pmd_test_exit(); 2987 prompt_exit(); 2988 } 2989 2990 static void 2991 print_stats(void) 2992 { 2993 uint8_t i; 2994 const char clr[] = { 27, '[', '2', 'J', '\0' }; 2995 const char top_left[] = { 27, '[', '1', ';', '1', 'H', '\0' }; 2996 2997 /* Clear screen and move to top left */ 2998 printf("%s%s", clr, top_left); 2999 3000 printf("\nPort statistics ===================================="); 3001 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) 3002 nic_stats_display(fwd_ports_ids[i]); 3003 } 3004 3005 static void 3006 signal_handler(int signum) 3007 { 3008 if (signum == SIGINT || signum == SIGTERM) { 3009 printf("\nSignal %d received, preparing to exit...\n", 3010 signum); 3011 #ifdef RTE_LIBRTE_PDUMP 3012 /* uninitialize packet capture framework */ 3013 rte_pdump_uninit(); 3014 #endif 3015 #ifdef RTE_LIBRTE_LATENCY_STATS 3016 rte_latencystats_uninit(); 3017 #endif 3018 force_quit(); 3019 /* Set flag to indicate the force termination. */ 3020 f_quit = 1; 3021 /* exit with the expected status */ 3022 signal(signum, SIG_DFL); 3023 kill(getpid(), signum); 3024 } 3025 } 3026 3027 int 3028 main(int argc, char** argv) 3029 { 3030 int diag; 3031 portid_t port_id; 3032 uint16_t count; 3033 int ret; 3034 3035 signal(SIGINT, signal_handler); 3036 signal(SIGTERM, signal_handler); 3037 3038 diag = rte_eal_init(argc, argv); 3039 if (diag < 0) 3040 rte_panic("Cannot init EAL\n"); 3041 3042 testpmd_logtype = rte_log_register("testpmd"); 3043 if (testpmd_logtype < 0) 3044 rte_panic("Cannot register log type"); 3045 rte_log_set_level(testpmd_logtype, RTE_LOG_DEBUG); 3046 3047 #ifdef RTE_LIBRTE_PDUMP 3048 /* initialize packet capture framework */ 3049 rte_pdump_init(NULL); 3050 #endif 3051 3052 count = 0; 3053 RTE_ETH_FOREACH_DEV(port_id) { 3054 ports_ids[count] = port_id; 3055 count++; 3056 } 3057 nb_ports = (portid_t) count; 3058 if (nb_ports == 0) 3059 TESTPMD_LOG(WARNING, "No probed ethernet devices\n"); 3060 3061 /* allocate port structures, and init them */ 3062 init_port(); 3063 3064 set_def_fwd_config(); 3065 if (nb_lcores == 0) 3066 rte_panic("Empty set of forwarding logical cores - check the " 3067 "core mask supplied in the command parameters\n"); 3068 3069 /* Bitrate/latency stats disabled by default */ 3070 #ifdef RTE_LIBRTE_BITRATE 3071 bitrate_enabled = 0; 3072 #endif 3073 #ifdef RTE_LIBRTE_LATENCY_STATS 3074 latencystats_enabled = 0; 3075 #endif 3076 3077 /* on FreeBSD, mlockall() is disabled by default */ 3078 #ifdef RTE_EXEC_ENV_BSDAPP 3079 do_mlockall = 0; 3080 #else 3081 do_mlockall = 1; 3082 #endif 3083 3084 argc -= diag; 3085 argv += diag; 3086 if (argc > 1) 3087 launch_args_parse(argc, argv); 3088 3089 if (do_mlockall && mlockall(MCL_CURRENT | MCL_FUTURE)) { 3090 TESTPMD_LOG(NOTICE, "mlockall() failed with error \"%s\"\n", 3091 strerror(errno)); 3092 } 3093 3094 if (tx_first && interactive) 3095 rte_exit(EXIT_FAILURE, "--tx-first cannot be used on " 3096 "interactive mode.\n"); 3097 3098 if (tx_first && lsc_interrupt) { 3099 printf("Warning: lsc_interrupt needs to be off when " 3100 " using tx_first. Disabling.\n"); 3101 lsc_interrupt = 0; 3102 } 3103 3104 if (!nb_rxq && !nb_txq) 3105 printf("Warning: Either rx or tx queues should be non-zero\n"); 3106 3107 if (nb_rxq > 1 && nb_rxq > nb_txq) 3108 printf("Warning: nb_rxq=%d enables RSS configuration, " 3109 "but nb_txq=%d will prevent to fully test it.\n", 3110 nb_rxq, nb_txq); 3111 3112 init_config(); 3113 3114 if (hot_plug) { 3115 ret = rte_dev_hotplug_handle_enable(); 3116 if (ret) { 3117 RTE_LOG(ERR, EAL, 3118 "fail to enable hotplug handling."); 3119 return -1; 3120 } 3121 3122 ret = rte_dev_event_monitor_start(); 3123 if (ret) { 3124 RTE_LOG(ERR, EAL, 3125 "fail to start device event monitoring."); 3126 return -1; 3127 } 3128 3129 ret = rte_dev_event_callback_register(NULL, 3130 eth_dev_event_callback, NULL); 3131 if (ret) { 3132 RTE_LOG(ERR, EAL, 3133 "fail to register device event callback\n"); 3134 return -1; 3135 } 3136 } 3137 3138 if (start_port(RTE_PORT_ALL) != 0) 3139 rte_exit(EXIT_FAILURE, "Start ports failed\n"); 3140 3141 /* set all ports to promiscuous mode by default */ 3142 RTE_ETH_FOREACH_DEV(port_id) 3143 rte_eth_promiscuous_enable(port_id); 3144 3145 /* Init metrics library */ 3146 rte_metrics_init(rte_socket_id()); 3147 3148 #ifdef RTE_LIBRTE_LATENCY_STATS 3149 if (latencystats_enabled != 0) { 3150 int ret = rte_latencystats_init(1, NULL); 3151 if (ret) 3152 printf("Warning: latencystats init()" 3153 " returned error %d\n", ret); 3154 printf("Latencystats running on lcore %d\n", 3155 latencystats_lcore_id); 3156 } 3157 #endif 3158 3159 /* Setup bitrate stats */ 3160 #ifdef RTE_LIBRTE_BITRATE 3161 if (bitrate_enabled != 0) { 3162 bitrate_data = rte_stats_bitrate_create(); 3163 if (bitrate_data == NULL) 3164 rte_exit(EXIT_FAILURE, 3165 "Could not allocate bitrate data.\n"); 3166 rte_stats_bitrate_reg(bitrate_data); 3167 } 3168 #endif 3169 3170 #ifdef RTE_LIBRTE_CMDLINE 3171 if (strlen(cmdline_filename) != 0) 3172 cmdline_read_from_file(cmdline_filename); 3173 3174 if (interactive == 1) { 3175 if (auto_start) { 3176 printf("Start automatic packet forwarding\n"); 3177 start_packet_forwarding(0); 3178 } 3179 prompt(); 3180 pmd_test_exit(); 3181 } else 3182 #endif 3183 { 3184 char c; 3185 int rc; 3186 3187 f_quit = 0; 3188 3189 printf("No commandline core given, start packet forwarding\n"); 3190 start_packet_forwarding(tx_first); 3191 if (stats_period != 0) { 3192 uint64_t prev_time = 0, cur_time, diff_time = 0; 3193 uint64_t timer_period; 3194 3195 /* Convert to number of cycles */ 3196 timer_period = stats_period * rte_get_timer_hz(); 3197 3198 while (f_quit == 0) { 3199 cur_time = rte_get_timer_cycles(); 3200 diff_time += cur_time - prev_time; 3201 3202 if (diff_time >= timer_period) { 3203 print_stats(); 3204 /* Reset the timer */ 3205 diff_time = 0; 3206 } 3207 /* Sleep to avoid unnecessary checks */ 3208 prev_time = cur_time; 3209 sleep(1); 3210 } 3211 } 3212 3213 printf("Press enter to exit\n"); 3214 rc = read(0, &c, 1); 3215 pmd_test_exit(); 3216 if (rc < 0) 3217 return 1; 3218 } 3219 3220 return 0; 3221 } 3222