1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2010-2017 Intel Corporation 3 */ 4 5 #include <stdarg.h> 6 #include <stdio.h> 7 #include <stdlib.h> 8 #include <signal.h> 9 #include <string.h> 10 #include <time.h> 11 #include <fcntl.h> 12 #include <sys/mman.h> 13 #include <sys/types.h> 14 #include <errno.h> 15 #include <stdbool.h> 16 17 #include <sys/queue.h> 18 #include <sys/stat.h> 19 20 #include <stdint.h> 21 #include <unistd.h> 22 #include <inttypes.h> 23 24 #include <rte_common.h> 25 #include <rte_errno.h> 26 #include <rte_byteorder.h> 27 #include <rte_log.h> 28 #include <rte_debug.h> 29 #include <rte_cycles.h> 30 #include <rte_memory.h> 31 #include <rte_memcpy.h> 32 #include <rte_launch.h> 33 #include <rte_eal.h> 34 #include <rte_alarm.h> 35 #include <rte_per_lcore.h> 36 #include <rte_lcore.h> 37 #include <rte_atomic.h> 38 #include <rte_branch_prediction.h> 39 #include <rte_mempool.h> 40 #include <rte_malloc.h> 41 #include <rte_mbuf.h> 42 #include <rte_mbuf_pool_ops.h> 43 #include <rte_interrupts.h> 44 #include <rte_pci.h> 45 #include <rte_ether.h> 46 #include <rte_ethdev.h> 47 #include <rte_dev.h> 48 #include <rte_string_fns.h> 49 #ifdef RTE_LIBRTE_IXGBE_PMD 50 #include <rte_pmd_ixgbe.h> 51 #endif 52 #ifdef RTE_LIBRTE_PDUMP 53 #include <rte_pdump.h> 54 #endif 55 #include <rte_flow.h> 56 #include <rte_metrics.h> 57 #ifdef RTE_LIBRTE_BITRATE 58 #include <rte_bitrate.h> 59 #endif 60 #ifdef RTE_LIBRTE_LATENCY_STATS 61 #include <rte_latencystats.h> 62 #endif 63 64 #include "testpmd.h" 65 66 uint16_t verbose_level = 0; /**< Silent by default. */ 67 int testpmd_logtype; /**< Log type for testpmd logs */ 68 69 /* use master core for command line ? */ 70 uint8_t interactive = 0; 71 uint8_t auto_start = 0; 72 uint8_t tx_first; 73 char cmdline_filename[PATH_MAX] = {0}; 74 75 /* 76 * NUMA support configuration. 77 * When set, the NUMA support attempts to dispatch the allocation of the 78 * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the 79 * probed ports among the CPU sockets 0 and 1. 80 * Otherwise, all memory is allocated from CPU socket 0. 81 */ 82 uint8_t numa_support = 1; /**< numa enabled by default */ 83 84 /* 85 * In UMA mode,all memory is allocated from socket 0 if --socket-num is 86 * not configured. 87 */ 88 uint8_t socket_num = UMA_NO_CONFIG; 89 90 /* 91 * Use ANONYMOUS mapped memory (might be not physically continuous) for mbufs. 92 */ 93 uint8_t mp_anon = 0; 94 95 /* 96 * Store specified sockets on which memory pool to be used by ports 97 * is allocated. 98 */ 99 uint8_t port_numa[RTE_MAX_ETHPORTS]; 100 101 /* 102 * Store specified sockets on which RX ring to be used by ports 103 * is allocated. 104 */ 105 uint8_t rxring_numa[RTE_MAX_ETHPORTS]; 106 107 /* 108 * Store specified sockets on which TX ring to be used by ports 109 * is allocated. 110 */ 111 uint8_t txring_numa[RTE_MAX_ETHPORTS]; 112 113 /* 114 * Record the Ethernet address of peer target ports to which packets are 115 * forwarded. 116 * Must be instantiated with the ethernet addresses of peer traffic generator 117 * ports. 118 */ 119 struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS]; 120 portid_t nb_peer_eth_addrs = 0; 121 122 /* 123 * Probed Target Environment. 124 */ 125 struct rte_port *ports; /**< For all probed ethernet ports. */ 126 portid_t nb_ports; /**< Number of probed ethernet ports. */ 127 struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */ 128 lcoreid_t nb_lcores; /**< Number of probed logical cores. */ 129 130 /* 131 * Test Forwarding Configuration. 132 * nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores 133 * nb_fwd_ports <= nb_cfg_ports <= nb_ports 134 */ 135 lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */ 136 lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */ 137 portid_t nb_cfg_ports; /**< Number of configured ports. */ 138 portid_t nb_fwd_ports; /**< Number of forwarding ports. */ 139 140 unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */ 141 portid_t fwd_ports_ids[RTE_MAX_ETHPORTS]; /**< Port ids configuration. */ 142 143 struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */ 144 streamid_t nb_fwd_streams; /**< Is equal to (nb_ports * nb_rxq). */ 145 146 /* 147 * Forwarding engines. 148 */ 149 struct fwd_engine * fwd_engines[] = { 150 &io_fwd_engine, 151 &mac_fwd_engine, 152 &mac_swap_engine, 153 &flow_gen_engine, 154 &rx_only_engine, 155 &tx_only_engine, 156 &csum_fwd_engine, 157 &icmp_echo_engine, 158 #if defined RTE_LIBRTE_PMD_SOFTNIC && defined RTE_LIBRTE_SCHED 159 &softnic_tm_engine, 160 &softnic_tm_bypass_engine, 161 #endif 162 #ifdef RTE_LIBRTE_IEEE1588 163 &ieee1588_fwd_engine, 164 #endif 165 NULL, 166 }; 167 168 struct fwd_config cur_fwd_config; 169 struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */ 170 uint32_t retry_enabled; 171 uint32_t burst_tx_delay_time = BURST_TX_WAIT_US; 172 uint32_t burst_tx_retry_num = BURST_TX_RETRIES; 173 174 uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */ 175 uint32_t param_total_num_mbufs = 0; /**< number of mbufs in all pools - if 176 * specified on command-line. */ 177 uint16_t stats_period; /**< Period to show statistics (disabled by default) */ 178 179 /* 180 * In container, it cannot terminate the process which running with 'stats-period' 181 * option. Set flag to exit stats period loop after received SIGINT/SIGTERM. 182 */ 183 uint8_t f_quit; 184 185 /* 186 * Configuration of packet segments used by the "txonly" processing engine. 187 */ 188 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */ 189 uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = { 190 TXONLY_DEF_PACKET_LEN, 191 }; 192 uint8_t tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */ 193 194 enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF; 195 /**< Split policy for packets to TX. */ 196 197 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */ 198 uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */ 199 200 /* current configuration is in DCB or not,0 means it is not in DCB mode */ 201 uint8_t dcb_config = 0; 202 203 /* Whether the dcb is in testing status */ 204 uint8_t dcb_test = 0; 205 206 /* 207 * Configurable number of RX/TX queues. 208 */ 209 queueid_t nb_rxq = 1; /**< Number of RX queues per port. */ 210 queueid_t nb_txq = 1; /**< Number of TX queues per port. */ 211 212 /* 213 * Configurable number of RX/TX ring descriptors. 214 * Defaults are supplied by drivers via ethdev. 215 */ 216 #define RTE_TEST_RX_DESC_DEFAULT 0 217 #define RTE_TEST_TX_DESC_DEFAULT 0 218 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */ 219 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */ 220 221 #define RTE_PMD_PARAM_UNSET -1 222 /* 223 * Configurable values of RX and TX ring threshold registers. 224 */ 225 226 int8_t rx_pthresh = RTE_PMD_PARAM_UNSET; 227 int8_t rx_hthresh = RTE_PMD_PARAM_UNSET; 228 int8_t rx_wthresh = RTE_PMD_PARAM_UNSET; 229 230 int8_t tx_pthresh = RTE_PMD_PARAM_UNSET; 231 int8_t tx_hthresh = RTE_PMD_PARAM_UNSET; 232 int8_t tx_wthresh = RTE_PMD_PARAM_UNSET; 233 234 /* 235 * Configurable value of RX free threshold. 236 */ 237 int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET; 238 239 /* 240 * Configurable value of RX drop enable. 241 */ 242 int8_t rx_drop_en = RTE_PMD_PARAM_UNSET; 243 244 /* 245 * Configurable value of TX free threshold. 246 */ 247 int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET; 248 249 /* 250 * Configurable value of TX RS bit threshold. 251 */ 252 int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET; 253 254 /* 255 * Receive Side Scaling (RSS) configuration. 256 */ 257 uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */ 258 259 /* 260 * Port topology configuration 261 */ 262 uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */ 263 264 /* 265 * Avoids to flush all the RX streams before starts forwarding. 266 */ 267 uint8_t no_flush_rx = 0; /* flush by default */ 268 269 /* 270 * Flow API isolated mode. 271 */ 272 uint8_t flow_isolate_all; 273 274 /* 275 * Avoids to check link status when starting/stopping a port. 276 */ 277 uint8_t no_link_check = 0; /* check by default */ 278 279 /* 280 * Enable link status change notification 281 */ 282 uint8_t lsc_interrupt = 1; /* enabled by default */ 283 284 /* 285 * Enable device removal notification. 286 */ 287 uint8_t rmv_interrupt = 1; /* enabled by default */ 288 289 uint8_t hot_plug = 0; /**< hotplug disabled by default. */ 290 291 /* 292 * Display or mask ether events 293 * Default to all events except VF_MBOX 294 */ 295 uint32_t event_print_mask = (UINT32_C(1) << RTE_ETH_EVENT_UNKNOWN) | 296 (UINT32_C(1) << RTE_ETH_EVENT_INTR_LSC) | 297 (UINT32_C(1) << RTE_ETH_EVENT_QUEUE_STATE) | 298 (UINT32_C(1) << RTE_ETH_EVENT_INTR_RESET) | 299 (UINT32_C(1) << RTE_ETH_EVENT_IPSEC) | 300 (UINT32_C(1) << RTE_ETH_EVENT_MACSEC) | 301 (UINT32_C(1) << RTE_ETH_EVENT_INTR_RMV); 302 /* 303 * Decide if all memory are locked for performance. 304 */ 305 int do_mlockall = 0; 306 307 /* 308 * NIC bypass mode configuration options. 309 */ 310 311 #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS 312 /* The NIC bypass watchdog timeout. */ 313 uint32_t bypass_timeout = RTE_PMD_IXGBE_BYPASS_TMT_OFF; 314 #endif 315 316 317 #ifdef RTE_LIBRTE_LATENCY_STATS 318 319 /* 320 * Set when latency stats is enabled in the commandline 321 */ 322 uint8_t latencystats_enabled; 323 324 /* 325 * Lcore ID to serive latency statistics. 326 */ 327 lcoreid_t latencystats_lcore_id = -1; 328 329 #endif 330 331 /* 332 * Ethernet device configuration. 333 */ 334 struct rte_eth_rxmode rx_mode = { 335 .max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */ 336 .offloads = DEV_RX_OFFLOAD_CRC_STRIP, 337 .ignore_offload_bitfield = 1, 338 }; 339 340 struct rte_eth_txmode tx_mode = { 341 .offloads = DEV_TX_OFFLOAD_MBUF_FAST_FREE, 342 }; 343 344 struct rte_fdir_conf fdir_conf = { 345 .mode = RTE_FDIR_MODE_NONE, 346 .pballoc = RTE_FDIR_PBALLOC_64K, 347 .status = RTE_FDIR_REPORT_STATUS, 348 .mask = { 349 .vlan_tci_mask = 0x0, 350 .ipv4_mask = { 351 .src_ip = 0xFFFFFFFF, 352 .dst_ip = 0xFFFFFFFF, 353 }, 354 .ipv6_mask = { 355 .src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF}, 356 .dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF}, 357 }, 358 .src_port_mask = 0xFFFF, 359 .dst_port_mask = 0xFFFF, 360 .mac_addr_byte_mask = 0xFF, 361 .tunnel_type_mask = 1, 362 .tunnel_id_mask = 0xFFFFFFFF, 363 }, 364 .drop_queue = 127, 365 }; 366 367 volatile int test_done = 1; /* stop packet forwarding when set to 1. */ 368 369 struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS]; 370 struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS]; 371 372 struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array; 373 struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array; 374 375 uint16_t nb_tx_queue_stats_mappings = 0; 376 uint16_t nb_rx_queue_stats_mappings = 0; 377 378 /* 379 * Display zero values by default for xstats 380 */ 381 uint8_t xstats_hide_zero; 382 383 unsigned int num_sockets = 0; 384 unsigned int socket_ids[RTE_MAX_NUMA_NODES]; 385 386 #ifdef RTE_LIBRTE_BITRATE 387 /* Bitrate statistics */ 388 struct rte_stats_bitrates *bitrate_data; 389 lcoreid_t bitrate_lcore_id; 390 uint8_t bitrate_enabled; 391 #endif 392 393 struct gro_status gro_ports[RTE_MAX_ETHPORTS]; 394 uint8_t gro_flush_cycles = GRO_DEFAULT_FLUSH_CYCLES; 395 396 /* Forward function declarations */ 397 static void map_port_queue_stats_mapping_registers(portid_t pi, 398 struct rte_port *port); 399 static void check_all_ports_link_status(uint32_t port_mask); 400 static int eth_event_callback(portid_t port_id, 401 enum rte_eth_event_type type, 402 void *param, void *ret_param); 403 static void eth_dev_event_callback(char *device_name, 404 enum rte_dev_event_type type, 405 void *param); 406 static int eth_dev_event_callback_register(void); 407 static int eth_dev_event_callback_unregister(void); 408 409 410 /* 411 * Check if all the ports are started. 412 * If yes, return positive value. If not, return zero. 413 */ 414 static int all_ports_started(void); 415 416 struct gso_status gso_ports[RTE_MAX_ETHPORTS]; 417 uint16_t gso_max_segment_size = ETHER_MAX_LEN - ETHER_CRC_LEN; 418 419 /* 420 * Helper function to check if socket is already discovered. 421 * If yes, return positive value. If not, return zero. 422 */ 423 int 424 new_socket_id(unsigned int socket_id) 425 { 426 unsigned int i; 427 428 for (i = 0; i < num_sockets; i++) { 429 if (socket_ids[i] == socket_id) 430 return 0; 431 } 432 return 1; 433 } 434 435 /* 436 * Setup default configuration. 437 */ 438 static void 439 set_default_fwd_lcores_config(void) 440 { 441 unsigned int i; 442 unsigned int nb_lc; 443 unsigned int sock_num; 444 445 nb_lc = 0; 446 for (i = 0; i < RTE_MAX_LCORE; i++) { 447 sock_num = rte_lcore_to_socket_id(i); 448 if (new_socket_id(sock_num)) { 449 if (num_sockets >= RTE_MAX_NUMA_NODES) { 450 rte_exit(EXIT_FAILURE, 451 "Total sockets greater than %u\n", 452 RTE_MAX_NUMA_NODES); 453 } 454 socket_ids[num_sockets++] = sock_num; 455 } 456 if (!rte_lcore_is_enabled(i)) 457 continue; 458 if (i == rte_get_master_lcore()) 459 continue; 460 fwd_lcores_cpuids[nb_lc++] = i; 461 } 462 nb_lcores = (lcoreid_t) nb_lc; 463 nb_cfg_lcores = nb_lcores; 464 nb_fwd_lcores = 1; 465 } 466 467 static void 468 set_def_peer_eth_addrs(void) 469 { 470 portid_t i; 471 472 for (i = 0; i < RTE_MAX_ETHPORTS; i++) { 473 peer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR; 474 peer_eth_addrs[i].addr_bytes[5] = i; 475 } 476 } 477 478 static void 479 set_default_fwd_ports_config(void) 480 { 481 portid_t pt_id; 482 int i = 0; 483 484 RTE_ETH_FOREACH_DEV(pt_id) 485 fwd_ports_ids[i++] = pt_id; 486 487 nb_cfg_ports = nb_ports; 488 nb_fwd_ports = nb_ports; 489 } 490 491 void 492 set_def_fwd_config(void) 493 { 494 set_default_fwd_lcores_config(); 495 set_def_peer_eth_addrs(); 496 set_default_fwd_ports_config(); 497 } 498 499 /* 500 * Configuration initialisation done once at init time. 501 */ 502 static void 503 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf, 504 unsigned int socket_id) 505 { 506 char pool_name[RTE_MEMPOOL_NAMESIZE]; 507 struct rte_mempool *rte_mp = NULL; 508 uint32_t mb_size; 509 510 mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size; 511 mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name)); 512 513 TESTPMD_LOG(INFO, 514 "create a new mbuf pool <%s>: n=%u, size=%u, socket=%u\n", 515 pool_name, nb_mbuf, mbuf_seg_size, socket_id); 516 517 if (mp_anon != 0) { 518 rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf, 519 mb_size, (unsigned) mb_mempool_cache, 520 sizeof(struct rte_pktmbuf_pool_private), 521 socket_id, 0); 522 if (rte_mp == NULL) 523 goto err; 524 525 if (rte_mempool_populate_anon(rte_mp) == 0) { 526 rte_mempool_free(rte_mp); 527 rte_mp = NULL; 528 goto err; 529 } 530 rte_pktmbuf_pool_init(rte_mp, NULL); 531 rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL); 532 } else { 533 /* wrapper to rte_mempool_create() */ 534 TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n", 535 rte_mbuf_best_mempool_ops()); 536 rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf, 537 mb_mempool_cache, 0, mbuf_seg_size, socket_id); 538 } 539 540 err: 541 if (rte_mp == NULL) { 542 rte_exit(EXIT_FAILURE, 543 "Creation of mbuf pool for socket %u failed: %s\n", 544 socket_id, rte_strerror(rte_errno)); 545 } else if (verbose_level > 0) { 546 rte_mempool_dump(stdout, rte_mp); 547 } 548 } 549 550 /* 551 * Check given socket id is valid or not with NUMA mode, 552 * if valid, return 0, else return -1 553 */ 554 static int 555 check_socket_id(const unsigned int socket_id) 556 { 557 static int warning_once = 0; 558 559 if (new_socket_id(socket_id)) { 560 if (!warning_once && numa_support) 561 printf("Warning: NUMA should be configured manually by" 562 " using --port-numa-config and" 563 " --ring-numa-config parameters along with" 564 " --numa.\n"); 565 warning_once = 1; 566 return -1; 567 } 568 return 0; 569 } 570 571 /* 572 * Get the allowed maximum number of RX queues. 573 * *pid return the port id which has minimal value of 574 * max_rx_queues in all ports. 575 */ 576 queueid_t 577 get_allowed_max_nb_rxq(portid_t *pid) 578 { 579 queueid_t allowed_max_rxq = MAX_QUEUE_ID; 580 portid_t pi; 581 struct rte_eth_dev_info dev_info; 582 583 RTE_ETH_FOREACH_DEV(pi) { 584 rte_eth_dev_info_get(pi, &dev_info); 585 if (dev_info.max_rx_queues < allowed_max_rxq) { 586 allowed_max_rxq = dev_info.max_rx_queues; 587 *pid = pi; 588 } 589 } 590 return allowed_max_rxq; 591 } 592 593 /* 594 * Check input rxq is valid or not. 595 * If input rxq is not greater than any of maximum number 596 * of RX queues of all ports, it is valid. 597 * if valid, return 0, else return -1 598 */ 599 int 600 check_nb_rxq(queueid_t rxq) 601 { 602 queueid_t allowed_max_rxq; 603 portid_t pid = 0; 604 605 allowed_max_rxq = get_allowed_max_nb_rxq(&pid); 606 if (rxq > allowed_max_rxq) { 607 printf("Fail: input rxq (%u) can't be greater " 608 "than max_rx_queues (%u) of port %u\n", 609 rxq, 610 allowed_max_rxq, 611 pid); 612 return -1; 613 } 614 return 0; 615 } 616 617 /* 618 * Get the allowed maximum number of TX queues. 619 * *pid return the port id which has minimal value of 620 * max_tx_queues in all ports. 621 */ 622 queueid_t 623 get_allowed_max_nb_txq(portid_t *pid) 624 { 625 queueid_t allowed_max_txq = MAX_QUEUE_ID; 626 portid_t pi; 627 struct rte_eth_dev_info dev_info; 628 629 RTE_ETH_FOREACH_DEV(pi) { 630 rte_eth_dev_info_get(pi, &dev_info); 631 if (dev_info.max_tx_queues < allowed_max_txq) { 632 allowed_max_txq = dev_info.max_tx_queues; 633 *pid = pi; 634 } 635 } 636 return allowed_max_txq; 637 } 638 639 /* 640 * Check input txq is valid or not. 641 * If input txq is not greater than any of maximum number 642 * of TX queues of all ports, it is valid. 643 * if valid, return 0, else return -1 644 */ 645 int 646 check_nb_txq(queueid_t txq) 647 { 648 queueid_t allowed_max_txq; 649 portid_t pid = 0; 650 651 allowed_max_txq = get_allowed_max_nb_txq(&pid); 652 if (txq > allowed_max_txq) { 653 printf("Fail: input txq (%u) can't be greater " 654 "than max_tx_queues (%u) of port %u\n", 655 txq, 656 allowed_max_txq, 657 pid); 658 return -1; 659 } 660 return 0; 661 } 662 663 static void 664 init_config(void) 665 { 666 portid_t pid; 667 struct rte_port *port; 668 struct rte_mempool *mbp; 669 unsigned int nb_mbuf_per_pool; 670 lcoreid_t lc_id; 671 uint8_t port_per_socket[RTE_MAX_NUMA_NODES]; 672 struct rte_gro_param gro_param; 673 uint32_t gso_types; 674 int k; 675 676 memset(port_per_socket,0,RTE_MAX_NUMA_NODES); 677 678 if (numa_support) { 679 memset(port_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS); 680 memset(rxring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS); 681 memset(txring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS); 682 } 683 684 /* Configuration of logical cores. */ 685 fwd_lcores = rte_zmalloc("testpmd: fwd_lcores", 686 sizeof(struct fwd_lcore *) * nb_lcores, 687 RTE_CACHE_LINE_SIZE); 688 if (fwd_lcores == NULL) { 689 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) " 690 "failed\n", nb_lcores); 691 } 692 for (lc_id = 0; lc_id < nb_lcores; lc_id++) { 693 fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore", 694 sizeof(struct fwd_lcore), 695 RTE_CACHE_LINE_SIZE); 696 if (fwd_lcores[lc_id] == NULL) { 697 rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) " 698 "failed\n"); 699 } 700 fwd_lcores[lc_id]->cpuid_idx = lc_id; 701 } 702 703 RTE_ETH_FOREACH_DEV(pid) { 704 port = &ports[pid]; 705 /* Apply default TxRx configuration for all ports */ 706 port->dev_conf.txmode = tx_mode; 707 port->dev_conf.rxmode = rx_mode; 708 rte_eth_dev_info_get(pid, &port->dev_info); 709 710 if (!(port->dev_info.rx_offload_capa & 711 DEV_RX_OFFLOAD_CRC_STRIP)) 712 port->dev_conf.rxmode.offloads &= 713 ~DEV_RX_OFFLOAD_CRC_STRIP; 714 if (!(port->dev_info.tx_offload_capa & 715 DEV_TX_OFFLOAD_MBUF_FAST_FREE)) 716 port->dev_conf.txmode.offloads &= 717 ~DEV_TX_OFFLOAD_MBUF_FAST_FREE; 718 if (numa_support) { 719 if (port_numa[pid] != NUMA_NO_CONFIG) 720 port_per_socket[port_numa[pid]]++; 721 else { 722 uint32_t socket_id = rte_eth_dev_socket_id(pid); 723 724 /* if socket_id is invalid, set to 0 */ 725 if (check_socket_id(socket_id) < 0) 726 socket_id = 0; 727 port_per_socket[socket_id]++; 728 } 729 } 730 731 /* Apply Rx offloads configuration */ 732 for (k = 0; k < port->dev_info.max_rx_queues; k++) 733 port->rx_conf[k].offloads = 734 port->dev_conf.rxmode.offloads; 735 /* Apply Tx offloads configuration */ 736 for (k = 0; k < port->dev_info.max_tx_queues; k++) 737 port->tx_conf[k].offloads = 738 port->dev_conf.txmode.offloads; 739 740 /* set flag to initialize port/queue */ 741 port->need_reconfig = 1; 742 port->need_reconfig_queues = 1; 743 } 744 745 /* 746 * Create pools of mbuf. 747 * If NUMA support is disabled, create a single pool of mbuf in 748 * socket 0 memory by default. 749 * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1. 750 * 751 * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and 752 * nb_txd can be configured at run time. 753 */ 754 if (param_total_num_mbufs) 755 nb_mbuf_per_pool = param_total_num_mbufs; 756 else { 757 nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX + 758 (nb_lcores * mb_mempool_cache) + 759 RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST; 760 nb_mbuf_per_pool *= RTE_MAX_ETHPORTS; 761 } 762 763 if (numa_support) { 764 uint8_t i; 765 766 for (i = 0; i < num_sockets; i++) 767 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 768 socket_ids[i]); 769 } else { 770 if (socket_num == UMA_NO_CONFIG) 771 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0); 772 else 773 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 774 socket_num); 775 } 776 777 init_port_config(); 778 779 gso_types = DEV_TX_OFFLOAD_TCP_TSO | DEV_TX_OFFLOAD_VXLAN_TNL_TSO | 780 DEV_TX_OFFLOAD_GRE_TNL_TSO; 781 /* 782 * Records which Mbuf pool to use by each logical core, if needed. 783 */ 784 for (lc_id = 0; lc_id < nb_lcores; lc_id++) { 785 mbp = mbuf_pool_find( 786 rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id])); 787 788 if (mbp == NULL) 789 mbp = mbuf_pool_find(0); 790 fwd_lcores[lc_id]->mbp = mbp; 791 /* initialize GSO context */ 792 fwd_lcores[lc_id]->gso_ctx.direct_pool = mbp; 793 fwd_lcores[lc_id]->gso_ctx.indirect_pool = mbp; 794 fwd_lcores[lc_id]->gso_ctx.gso_types = gso_types; 795 fwd_lcores[lc_id]->gso_ctx.gso_size = ETHER_MAX_LEN - 796 ETHER_CRC_LEN; 797 fwd_lcores[lc_id]->gso_ctx.flag = 0; 798 } 799 800 /* Configuration of packet forwarding streams. */ 801 if (init_fwd_streams() < 0) 802 rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n"); 803 804 fwd_config_setup(); 805 806 /* create a gro context for each lcore */ 807 gro_param.gro_types = RTE_GRO_TCP_IPV4; 808 gro_param.max_flow_num = GRO_MAX_FLUSH_CYCLES; 809 gro_param.max_item_per_flow = MAX_PKT_BURST; 810 for (lc_id = 0; lc_id < nb_lcores; lc_id++) { 811 gro_param.socket_id = rte_lcore_to_socket_id( 812 fwd_lcores_cpuids[lc_id]); 813 fwd_lcores[lc_id]->gro_ctx = rte_gro_ctx_create(&gro_param); 814 if (fwd_lcores[lc_id]->gro_ctx == NULL) { 815 rte_exit(EXIT_FAILURE, 816 "rte_gro_ctx_create() failed\n"); 817 } 818 } 819 } 820 821 822 void 823 reconfig(portid_t new_port_id, unsigned socket_id) 824 { 825 struct rte_port *port; 826 827 /* Reconfiguration of Ethernet ports. */ 828 port = &ports[new_port_id]; 829 rte_eth_dev_info_get(new_port_id, &port->dev_info); 830 831 /* set flag to initialize port/queue */ 832 port->need_reconfig = 1; 833 port->need_reconfig_queues = 1; 834 port->socket_id = socket_id; 835 836 init_port_config(); 837 } 838 839 840 int 841 init_fwd_streams(void) 842 { 843 portid_t pid; 844 struct rte_port *port; 845 streamid_t sm_id, nb_fwd_streams_new; 846 queueid_t q; 847 848 /* set socket id according to numa or not */ 849 RTE_ETH_FOREACH_DEV(pid) { 850 port = &ports[pid]; 851 if (nb_rxq > port->dev_info.max_rx_queues) { 852 printf("Fail: nb_rxq(%d) is greater than " 853 "max_rx_queues(%d)\n", nb_rxq, 854 port->dev_info.max_rx_queues); 855 return -1; 856 } 857 if (nb_txq > port->dev_info.max_tx_queues) { 858 printf("Fail: nb_txq(%d) is greater than " 859 "max_tx_queues(%d)\n", nb_txq, 860 port->dev_info.max_tx_queues); 861 return -1; 862 } 863 if (numa_support) { 864 if (port_numa[pid] != NUMA_NO_CONFIG) 865 port->socket_id = port_numa[pid]; 866 else { 867 port->socket_id = rte_eth_dev_socket_id(pid); 868 869 /* if socket_id is invalid, set to 0 */ 870 if (check_socket_id(port->socket_id) < 0) 871 port->socket_id = 0; 872 } 873 } 874 else { 875 if (socket_num == UMA_NO_CONFIG) 876 port->socket_id = 0; 877 else 878 port->socket_id = socket_num; 879 } 880 } 881 882 q = RTE_MAX(nb_rxq, nb_txq); 883 if (q == 0) { 884 printf("Fail: Cannot allocate fwd streams as number of queues is 0\n"); 885 return -1; 886 } 887 nb_fwd_streams_new = (streamid_t)(nb_ports * q); 888 if (nb_fwd_streams_new == nb_fwd_streams) 889 return 0; 890 /* clear the old */ 891 if (fwd_streams != NULL) { 892 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) { 893 if (fwd_streams[sm_id] == NULL) 894 continue; 895 rte_free(fwd_streams[sm_id]); 896 fwd_streams[sm_id] = NULL; 897 } 898 rte_free(fwd_streams); 899 fwd_streams = NULL; 900 } 901 902 /* init new */ 903 nb_fwd_streams = nb_fwd_streams_new; 904 if (nb_fwd_streams) { 905 fwd_streams = rte_zmalloc("testpmd: fwd_streams", 906 sizeof(struct fwd_stream *) * nb_fwd_streams, 907 RTE_CACHE_LINE_SIZE); 908 if (fwd_streams == NULL) 909 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d" 910 " (struct fwd_stream *)) failed\n", 911 nb_fwd_streams); 912 913 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) { 914 fwd_streams[sm_id] = rte_zmalloc("testpmd:" 915 " struct fwd_stream", sizeof(struct fwd_stream), 916 RTE_CACHE_LINE_SIZE); 917 if (fwd_streams[sm_id] == NULL) 918 rte_exit(EXIT_FAILURE, "rte_zmalloc" 919 "(struct fwd_stream) failed\n"); 920 } 921 } 922 923 return 0; 924 } 925 926 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS 927 static void 928 pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs) 929 { 930 unsigned int total_burst; 931 unsigned int nb_burst; 932 unsigned int burst_stats[3]; 933 uint16_t pktnb_stats[3]; 934 uint16_t nb_pkt; 935 int burst_percent[3]; 936 937 /* 938 * First compute the total number of packet bursts and the 939 * two highest numbers of bursts of the same number of packets. 940 */ 941 total_burst = 0; 942 burst_stats[0] = burst_stats[1] = burst_stats[2] = 0; 943 pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0; 944 for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) { 945 nb_burst = pbs->pkt_burst_spread[nb_pkt]; 946 if (nb_burst == 0) 947 continue; 948 total_burst += nb_burst; 949 if (nb_burst > burst_stats[0]) { 950 burst_stats[1] = burst_stats[0]; 951 pktnb_stats[1] = pktnb_stats[0]; 952 burst_stats[0] = nb_burst; 953 pktnb_stats[0] = nb_pkt; 954 } else if (nb_burst > burst_stats[1]) { 955 burst_stats[1] = nb_burst; 956 pktnb_stats[1] = nb_pkt; 957 } 958 } 959 if (total_burst == 0) 960 return; 961 burst_percent[0] = (burst_stats[0] * 100) / total_burst; 962 printf(" %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst, 963 burst_percent[0], (int) pktnb_stats[0]); 964 if (burst_stats[0] == total_burst) { 965 printf("]\n"); 966 return; 967 } 968 if (burst_stats[0] + burst_stats[1] == total_burst) { 969 printf(" + %d%% of %d pkts]\n", 970 100 - burst_percent[0], pktnb_stats[1]); 971 return; 972 } 973 burst_percent[1] = (burst_stats[1] * 100) / total_burst; 974 burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]); 975 if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) { 976 printf(" + %d%% of others]\n", 100 - burst_percent[0]); 977 return; 978 } 979 printf(" + %d%% of %d pkts + %d%% of others]\n", 980 burst_percent[1], (int) pktnb_stats[1], burst_percent[2]); 981 } 982 #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */ 983 984 static void 985 fwd_port_stats_display(portid_t port_id, struct rte_eth_stats *stats) 986 { 987 struct rte_port *port; 988 uint8_t i; 989 990 static const char *fwd_stats_border = "----------------------"; 991 992 port = &ports[port_id]; 993 printf("\n %s Forward statistics for port %-2d %s\n", 994 fwd_stats_border, port_id, fwd_stats_border); 995 996 if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) { 997 printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: " 998 "%-"PRIu64"\n", 999 stats->ipackets, stats->imissed, 1000 (uint64_t) (stats->ipackets + stats->imissed)); 1001 1002 if (cur_fwd_eng == &csum_fwd_engine) 1003 printf(" Bad-ipcsum: %-14"PRIu64" Bad-l4csum: %-14"PRIu64" \n", 1004 port->rx_bad_ip_csum, port->rx_bad_l4_csum); 1005 if ((stats->ierrors + stats->rx_nombuf) > 0) { 1006 printf(" RX-error: %-"PRIu64"\n", stats->ierrors); 1007 printf(" RX-nombufs: %-14"PRIu64"\n", stats->rx_nombuf); 1008 } 1009 1010 printf(" TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: " 1011 "%-"PRIu64"\n", 1012 stats->opackets, port->tx_dropped, 1013 (uint64_t) (stats->opackets + port->tx_dropped)); 1014 } 1015 else { 1016 printf(" RX-packets: %14"PRIu64" RX-dropped:%14"PRIu64" RX-total:" 1017 "%14"PRIu64"\n", 1018 stats->ipackets, stats->imissed, 1019 (uint64_t) (stats->ipackets + stats->imissed)); 1020 1021 if (cur_fwd_eng == &csum_fwd_engine) 1022 printf(" Bad-ipcsum:%14"PRIu64" Bad-l4csum:%14"PRIu64"\n", 1023 port->rx_bad_ip_csum, port->rx_bad_l4_csum); 1024 if ((stats->ierrors + stats->rx_nombuf) > 0) { 1025 printf(" RX-error:%"PRIu64"\n", stats->ierrors); 1026 printf(" RX-nombufs: %14"PRIu64"\n", 1027 stats->rx_nombuf); 1028 } 1029 1030 printf(" TX-packets: %14"PRIu64" TX-dropped:%14"PRIu64" TX-total:" 1031 "%14"PRIu64"\n", 1032 stats->opackets, port->tx_dropped, 1033 (uint64_t) (stats->opackets + port->tx_dropped)); 1034 } 1035 1036 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS 1037 if (port->rx_stream) 1038 pkt_burst_stats_display("RX", 1039 &port->rx_stream->rx_burst_stats); 1040 if (port->tx_stream) 1041 pkt_burst_stats_display("TX", 1042 &port->tx_stream->tx_burst_stats); 1043 #endif 1044 1045 if (port->rx_queue_stats_mapping_enabled) { 1046 printf("\n"); 1047 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) { 1048 printf(" Stats reg %2d RX-packets:%14"PRIu64 1049 " RX-errors:%14"PRIu64 1050 " RX-bytes:%14"PRIu64"\n", 1051 i, stats->q_ipackets[i], stats->q_errors[i], stats->q_ibytes[i]); 1052 } 1053 printf("\n"); 1054 } 1055 if (port->tx_queue_stats_mapping_enabled) { 1056 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) { 1057 printf(" Stats reg %2d TX-packets:%14"PRIu64 1058 " TX-bytes:%14"PRIu64"\n", 1059 i, stats->q_opackets[i], stats->q_obytes[i]); 1060 } 1061 } 1062 1063 printf(" %s--------------------------------%s\n", 1064 fwd_stats_border, fwd_stats_border); 1065 } 1066 1067 static void 1068 fwd_stream_stats_display(streamid_t stream_id) 1069 { 1070 struct fwd_stream *fs; 1071 static const char *fwd_top_stats_border = "-------"; 1072 1073 fs = fwd_streams[stream_id]; 1074 if ((fs->rx_packets == 0) && (fs->tx_packets == 0) && 1075 (fs->fwd_dropped == 0)) 1076 return; 1077 printf("\n %s Forward Stats for RX Port=%2d/Queue=%2d -> " 1078 "TX Port=%2d/Queue=%2d %s\n", 1079 fwd_top_stats_border, fs->rx_port, fs->rx_queue, 1080 fs->tx_port, fs->tx_queue, fwd_top_stats_border); 1081 printf(" RX-packets: %-14u TX-packets: %-14u TX-dropped: %-14u", 1082 fs->rx_packets, fs->tx_packets, fs->fwd_dropped); 1083 1084 /* if checksum mode */ 1085 if (cur_fwd_eng == &csum_fwd_engine) { 1086 printf(" RX- bad IP checksum: %-14u Rx- bad L4 checksum: " 1087 "%-14u\n", fs->rx_bad_ip_csum, fs->rx_bad_l4_csum); 1088 } 1089 1090 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS 1091 pkt_burst_stats_display("RX", &fs->rx_burst_stats); 1092 pkt_burst_stats_display("TX", &fs->tx_burst_stats); 1093 #endif 1094 } 1095 1096 static void 1097 flush_fwd_rx_queues(void) 1098 { 1099 struct rte_mbuf *pkts_burst[MAX_PKT_BURST]; 1100 portid_t rxp; 1101 portid_t port_id; 1102 queueid_t rxq; 1103 uint16_t nb_rx; 1104 uint16_t i; 1105 uint8_t j; 1106 uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0; 1107 uint64_t timer_period; 1108 1109 /* convert to number of cycles */ 1110 timer_period = rte_get_timer_hz(); /* 1 second timeout */ 1111 1112 for (j = 0; j < 2; j++) { 1113 for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) { 1114 for (rxq = 0; rxq < nb_rxq; rxq++) { 1115 port_id = fwd_ports_ids[rxp]; 1116 /** 1117 * testpmd can stuck in the below do while loop 1118 * if rte_eth_rx_burst() always returns nonzero 1119 * packets. So timer is added to exit this loop 1120 * after 1sec timer expiry. 1121 */ 1122 prev_tsc = rte_rdtsc(); 1123 do { 1124 nb_rx = rte_eth_rx_burst(port_id, rxq, 1125 pkts_burst, MAX_PKT_BURST); 1126 for (i = 0; i < nb_rx; i++) 1127 rte_pktmbuf_free(pkts_burst[i]); 1128 1129 cur_tsc = rte_rdtsc(); 1130 diff_tsc = cur_tsc - prev_tsc; 1131 timer_tsc += diff_tsc; 1132 } while ((nb_rx > 0) && 1133 (timer_tsc < timer_period)); 1134 timer_tsc = 0; 1135 } 1136 } 1137 rte_delay_ms(10); /* wait 10 milli-seconds before retrying */ 1138 } 1139 } 1140 1141 static void 1142 run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd) 1143 { 1144 struct fwd_stream **fsm; 1145 streamid_t nb_fs; 1146 streamid_t sm_id; 1147 #ifdef RTE_LIBRTE_BITRATE 1148 uint64_t tics_per_1sec; 1149 uint64_t tics_datum; 1150 uint64_t tics_current; 1151 uint16_t idx_port; 1152 1153 tics_datum = rte_rdtsc(); 1154 tics_per_1sec = rte_get_timer_hz(); 1155 #endif 1156 fsm = &fwd_streams[fc->stream_idx]; 1157 nb_fs = fc->stream_nb; 1158 do { 1159 for (sm_id = 0; sm_id < nb_fs; sm_id++) 1160 (*pkt_fwd)(fsm[sm_id]); 1161 #ifdef RTE_LIBRTE_BITRATE 1162 if (bitrate_enabled != 0 && 1163 bitrate_lcore_id == rte_lcore_id()) { 1164 tics_current = rte_rdtsc(); 1165 if (tics_current - tics_datum >= tics_per_1sec) { 1166 /* Periodic bitrate calculation */ 1167 RTE_ETH_FOREACH_DEV(idx_port) 1168 rte_stats_bitrate_calc(bitrate_data, 1169 idx_port); 1170 tics_datum = tics_current; 1171 } 1172 } 1173 #endif 1174 #ifdef RTE_LIBRTE_LATENCY_STATS 1175 if (latencystats_enabled != 0 && 1176 latencystats_lcore_id == rte_lcore_id()) 1177 rte_latencystats_update(); 1178 #endif 1179 1180 } while (! fc->stopped); 1181 } 1182 1183 static int 1184 start_pkt_forward_on_core(void *fwd_arg) 1185 { 1186 run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg, 1187 cur_fwd_config.fwd_eng->packet_fwd); 1188 return 0; 1189 } 1190 1191 /* 1192 * Run the TXONLY packet forwarding engine to send a single burst of packets. 1193 * Used to start communication flows in network loopback test configurations. 1194 */ 1195 static int 1196 run_one_txonly_burst_on_core(void *fwd_arg) 1197 { 1198 struct fwd_lcore *fwd_lc; 1199 struct fwd_lcore tmp_lcore; 1200 1201 fwd_lc = (struct fwd_lcore *) fwd_arg; 1202 tmp_lcore = *fwd_lc; 1203 tmp_lcore.stopped = 1; 1204 run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd); 1205 return 0; 1206 } 1207 1208 /* 1209 * Launch packet forwarding: 1210 * - Setup per-port forwarding context. 1211 * - launch logical cores with their forwarding configuration. 1212 */ 1213 static void 1214 launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore) 1215 { 1216 port_fwd_begin_t port_fwd_begin; 1217 unsigned int i; 1218 unsigned int lc_id; 1219 int diag; 1220 1221 port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin; 1222 if (port_fwd_begin != NULL) { 1223 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) 1224 (*port_fwd_begin)(fwd_ports_ids[i]); 1225 } 1226 for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) { 1227 lc_id = fwd_lcores_cpuids[i]; 1228 if ((interactive == 0) || (lc_id != rte_lcore_id())) { 1229 fwd_lcores[i]->stopped = 0; 1230 diag = rte_eal_remote_launch(pkt_fwd_on_lcore, 1231 fwd_lcores[i], lc_id); 1232 if (diag != 0) 1233 printf("launch lcore %u failed - diag=%d\n", 1234 lc_id, diag); 1235 } 1236 } 1237 } 1238 1239 /* 1240 * Update the forward ports list. 1241 */ 1242 void 1243 update_fwd_ports(portid_t new_pid) 1244 { 1245 unsigned int i; 1246 unsigned int new_nb_fwd_ports = 0; 1247 int move = 0; 1248 1249 for (i = 0; i < nb_fwd_ports; ++i) { 1250 if (port_id_is_invalid(fwd_ports_ids[i], DISABLED_WARN)) 1251 move = 1; 1252 else if (move) 1253 fwd_ports_ids[new_nb_fwd_ports++] = fwd_ports_ids[i]; 1254 else 1255 new_nb_fwd_ports++; 1256 } 1257 if (new_pid < RTE_MAX_ETHPORTS) 1258 fwd_ports_ids[new_nb_fwd_ports++] = new_pid; 1259 1260 nb_fwd_ports = new_nb_fwd_ports; 1261 nb_cfg_ports = new_nb_fwd_ports; 1262 } 1263 1264 /* 1265 * Launch packet forwarding configuration. 1266 */ 1267 void 1268 start_packet_forwarding(int with_tx_first) 1269 { 1270 port_fwd_begin_t port_fwd_begin; 1271 port_fwd_end_t port_fwd_end; 1272 struct rte_port *port; 1273 unsigned int i; 1274 portid_t pt_id; 1275 streamid_t sm_id; 1276 1277 if (strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") == 0 && !nb_rxq) 1278 rte_exit(EXIT_FAILURE, "rxq are 0, cannot use rxonly fwd mode\n"); 1279 1280 if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 && !nb_txq) 1281 rte_exit(EXIT_FAILURE, "txq are 0, cannot use txonly fwd mode\n"); 1282 1283 if ((strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") != 0 && 1284 strcmp(cur_fwd_eng->fwd_mode_name, "txonly") != 0) && 1285 (!nb_rxq || !nb_txq)) 1286 rte_exit(EXIT_FAILURE, 1287 "Either rxq or txq are 0, cannot use %s fwd mode\n", 1288 cur_fwd_eng->fwd_mode_name); 1289 1290 if (all_ports_started() == 0) { 1291 printf("Not all ports were started\n"); 1292 return; 1293 } 1294 if (test_done == 0) { 1295 printf("Packet forwarding already started\n"); 1296 return; 1297 } 1298 1299 1300 if(dcb_test) { 1301 for (i = 0; i < nb_fwd_ports; i++) { 1302 pt_id = fwd_ports_ids[i]; 1303 port = &ports[pt_id]; 1304 if (!port->dcb_flag) { 1305 printf("In DCB mode, all forwarding ports must " 1306 "be configured in this mode.\n"); 1307 return; 1308 } 1309 } 1310 if (nb_fwd_lcores == 1) { 1311 printf("In DCB mode,the nb forwarding cores " 1312 "should be larger than 1.\n"); 1313 return; 1314 } 1315 } 1316 test_done = 0; 1317 1318 fwd_config_setup(); 1319 1320 if(!no_flush_rx) 1321 flush_fwd_rx_queues(); 1322 1323 pkt_fwd_config_display(&cur_fwd_config); 1324 rxtx_config_display(); 1325 1326 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) { 1327 pt_id = fwd_ports_ids[i]; 1328 port = &ports[pt_id]; 1329 rte_eth_stats_get(pt_id, &port->stats); 1330 port->tx_dropped = 0; 1331 1332 map_port_queue_stats_mapping_registers(pt_id, port); 1333 } 1334 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) { 1335 fwd_streams[sm_id]->rx_packets = 0; 1336 fwd_streams[sm_id]->tx_packets = 0; 1337 fwd_streams[sm_id]->fwd_dropped = 0; 1338 fwd_streams[sm_id]->rx_bad_ip_csum = 0; 1339 fwd_streams[sm_id]->rx_bad_l4_csum = 0; 1340 1341 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS 1342 memset(&fwd_streams[sm_id]->rx_burst_stats, 0, 1343 sizeof(fwd_streams[sm_id]->rx_burst_stats)); 1344 memset(&fwd_streams[sm_id]->tx_burst_stats, 0, 1345 sizeof(fwd_streams[sm_id]->tx_burst_stats)); 1346 #endif 1347 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES 1348 fwd_streams[sm_id]->core_cycles = 0; 1349 #endif 1350 } 1351 if (with_tx_first) { 1352 port_fwd_begin = tx_only_engine.port_fwd_begin; 1353 if (port_fwd_begin != NULL) { 1354 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) 1355 (*port_fwd_begin)(fwd_ports_ids[i]); 1356 } 1357 while (with_tx_first--) { 1358 launch_packet_forwarding( 1359 run_one_txonly_burst_on_core); 1360 rte_eal_mp_wait_lcore(); 1361 } 1362 port_fwd_end = tx_only_engine.port_fwd_end; 1363 if (port_fwd_end != NULL) { 1364 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) 1365 (*port_fwd_end)(fwd_ports_ids[i]); 1366 } 1367 } 1368 launch_packet_forwarding(start_pkt_forward_on_core); 1369 } 1370 1371 void 1372 stop_packet_forwarding(void) 1373 { 1374 struct rte_eth_stats stats; 1375 struct rte_port *port; 1376 port_fwd_end_t port_fwd_end; 1377 int i; 1378 portid_t pt_id; 1379 streamid_t sm_id; 1380 lcoreid_t lc_id; 1381 uint64_t total_recv; 1382 uint64_t total_xmit; 1383 uint64_t total_rx_dropped; 1384 uint64_t total_tx_dropped; 1385 uint64_t total_rx_nombuf; 1386 uint64_t tx_dropped; 1387 uint64_t rx_bad_ip_csum; 1388 uint64_t rx_bad_l4_csum; 1389 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES 1390 uint64_t fwd_cycles; 1391 #endif 1392 1393 static const char *acc_stats_border = "+++++++++++++++"; 1394 1395 if (test_done) { 1396 printf("Packet forwarding not started\n"); 1397 return; 1398 } 1399 printf("Telling cores to stop..."); 1400 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) 1401 fwd_lcores[lc_id]->stopped = 1; 1402 printf("\nWaiting for lcores to finish...\n"); 1403 rte_eal_mp_wait_lcore(); 1404 port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end; 1405 if (port_fwd_end != NULL) { 1406 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) { 1407 pt_id = fwd_ports_ids[i]; 1408 (*port_fwd_end)(pt_id); 1409 } 1410 } 1411 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES 1412 fwd_cycles = 0; 1413 #endif 1414 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) { 1415 if (cur_fwd_config.nb_fwd_streams > 1416 cur_fwd_config.nb_fwd_ports) { 1417 fwd_stream_stats_display(sm_id); 1418 ports[fwd_streams[sm_id]->tx_port].tx_stream = NULL; 1419 ports[fwd_streams[sm_id]->rx_port].rx_stream = NULL; 1420 } else { 1421 ports[fwd_streams[sm_id]->tx_port].tx_stream = 1422 fwd_streams[sm_id]; 1423 ports[fwd_streams[sm_id]->rx_port].rx_stream = 1424 fwd_streams[sm_id]; 1425 } 1426 tx_dropped = ports[fwd_streams[sm_id]->tx_port].tx_dropped; 1427 tx_dropped = (uint64_t) (tx_dropped + 1428 fwd_streams[sm_id]->fwd_dropped); 1429 ports[fwd_streams[sm_id]->tx_port].tx_dropped = tx_dropped; 1430 1431 rx_bad_ip_csum = 1432 ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum; 1433 rx_bad_ip_csum = (uint64_t) (rx_bad_ip_csum + 1434 fwd_streams[sm_id]->rx_bad_ip_csum); 1435 ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum = 1436 rx_bad_ip_csum; 1437 1438 rx_bad_l4_csum = 1439 ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum; 1440 rx_bad_l4_csum = (uint64_t) (rx_bad_l4_csum + 1441 fwd_streams[sm_id]->rx_bad_l4_csum); 1442 ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum = 1443 rx_bad_l4_csum; 1444 1445 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES 1446 fwd_cycles = (uint64_t) (fwd_cycles + 1447 fwd_streams[sm_id]->core_cycles); 1448 #endif 1449 } 1450 total_recv = 0; 1451 total_xmit = 0; 1452 total_rx_dropped = 0; 1453 total_tx_dropped = 0; 1454 total_rx_nombuf = 0; 1455 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) { 1456 pt_id = fwd_ports_ids[i]; 1457 1458 port = &ports[pt_id]; 1459 rte_eth_stats_get(pt_id, &stats); 1460 stats.ipackets -= port->stats.ipackets; 1461 port->stats.ipackets = 0; 1462 stats.opackets -= port->stats.opackets; 1463 port->stats.opackets = 0; 1464 stats.ibytes -= port->stats.ibytes; 1465 port->stats.ibytes = 0; 1466 stats.obytes -= port->stats.obytes; 1467 port->stats.obytes = 0; 1468 stats.imissed -= port->stats.imissed; 1469 port->stats.imissed = 0; 1470 stats.oerrors -= port->stats.oerrors; 1471 port->stats.oerrors = 0; 1472 stats.rx_nombuf -= port->stats.rx_nombuf; 1473 port->stats.rx_nombuf = 0; 1474 1475 total_recv += stats.ipackets; 1476 total_xmit += stats.opackets; 1477 total_rx_dropped += stats.imissed; 1478 total_tx_dropped += port->tx_dropped; 1479 total_rx_nombuf += stats.rx_nombuf; 1480 1481 fwd_port_stats_display(pt_id, &stats); 1482 } 1483 1484 printf("\n %s Accumulated forward statistics for all ports" 1485 "%s\n", 1486 acc_stats_border, acc_stats_border); 1487 printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: " 1488 "%-"PRIu64"\n" 1489 " TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: " 1490 "%-"PRIu64"\n", 1491 total_recv, total_rx_dropped, total_recv + total_rx_dropped, 1492 total_xmit, total_tx_dropped, total_xmit + total_tx_dropped); 1493 if (total_rx_nombuf > 0) 1494 printf(" RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf); 1495 printf(" %s++++++++++++++++++++++++++++++++++++++++++++++" 1496 "%s\n", 1497 acc_stats_border, acc_stats_border); 1498 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES 1499 if (total_recv > 0) 1500 printf("\n CPU cycles/packet=%u (total cycles=" 1501 "%"PRIu64" / total RX packets=%"PRIu64")\n", 1502 (unsigned int)(fwd_cycles / total_recv), 1503 fwd_cycles, total_recv); 1504 #endif 1505 printf("\nDone.\n"); 1506 test_done = 1; 1507 } 1508 1509 void 1510 dev_set_link_up(portid_t pid) 1511 { 1512 if (rte_eth_dev_set_link_up(pid) < 0) 1513 printf("\nSet link up fail.\n"); 1514 } 1515 1516 void 1517 dev_set_link_down(portid_t pid) 1518 { 1519 if (rte_eth_dev_set_link_down(pid) < 0) 1520 printf("\nSet link down fail.\n"); 1521 } 1522 1523 static int 1524 all_ports_started(void) 1525 { 1526 portid_t pi; 1527 struct rte_port *port; 1528 1529 RTE_ETH_FOREACH_DEV(pi) { 1530 port = &ports[pi]; 1531 /* Check if there is a port which is not started */ 1532 if ((port->port_status != RTE_PORT_STARTED) && 1533 (port->slave_flag == 0)) 1534 return 0; 1535 } 1536 1537 /* No port is not started */ 1538 return 1; 1539 } 1540 1541 int 1542 port_is_stopped(portid_t port_id) 1543 { 1544 struct rte_port *port = &ports[port_id]; 1545 1546 if ((port->port_status != RTE_PORT_STOPPED) && 1547 (port->slave_flag == 0)) 1548 return 0; 1549 return 1; 1550 } 1551 1552 int 1553 all_ports_stopped(void) 1554 { 1555 portid_t pi; 1556 1557 RTE_ETH_FOREACH_DEV(pi) { 1558 if (!port_is_stopped(pi)) 1559 return 0; 1560 } 1561 1562 return 1; 1563 } 1564 1565 int 1566 port_is_started(portid_t port_id) 1567 { 1568 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1569 return 0; 1570 1571 if (ports[port_id].port_status != RTE_PORT_STARTED) 1572 return 0; 1573 1574 return 1; 1575 } 1576 1577 static int 1578 port_is_closed(portid_t port_id) 1579 { 1580 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1581 return 0; 1582 1583 if (ports[port_id].port_status != RTE_PORT_CLOSED) 1584 return 0; 1585 1586 return 1; 1587 } 1588 1589 int 1590 start_port(portid_t pid) 1591 { 1592 int diag, need_check_link_status = -1; 1593 portid_t pi; 1594 queueid_t qi; 1595 struct rte_port *port; 1596 struct ether_addr mac_addr; 1597 enum rte_eth_event_type event_type; 1598 1599 if (port_id_is_invalid(pid, ENABLED_WARN)) 1600 return 0; 1601 1602 if(dcb_config) 1603 dcb_test = 1; 1604 RTE_ETH_FOREACH_DEV(pi) { 1605 if (pid != pi && pid != (portid_t)RTE_PORT_ALL) 1606 continue; 1607 1608 need_check_link_status = 0; 1609 port = &ports[pi]; 1610 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED, 1611 RTE_PORT_HANDLING) == 0) { 1612 printf("Port %d is now not stopped\n", pi); 1613 continue; 1614 } 1615 1616 if (port->need_reconfig > 0) { 1617 port->need_reconfig = 0; 1618 1619 if (flow_isolate_all) { 1620 int ret = port_flow_isolate(pi, 1); 1621 if (ret) { 1622 printf("Failed to apply isolated" 1623 " mode on port %d\n", pi); 1624 return -1; 1625 } 1626 } 1627 1628 printf("Configuring Port %d (socket %u)\n", pi, 1629 port->socket_id); 1630 /* configure port */ 1631 diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq, 1632 &(port->dev_conf)); 1633 if (diag != 0) { 1634 if (rte_atomic16_cmpset(&(port->port_status), 1635 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0) 1636 printf("Port %d can not be set back " 1637 "to stopped\n", pi); 1638 printf("Fail to configure port %d\n", pi); 1639 /* try to reconfigure port next time */ 1640 port->need_reconfig = 1; 1641 return -1; 1642 } 1643 } 1644 if (port->need_reconfig_queues > 0) { 1645 port->need_reconfig_queues = 0; 1646 /* setup tx queues */ 1647 for (qi = 0; qi < nb_txq; qi++) { 1648 port->tx_conf[qi].txq_flags = 1649 ETH_TXQ_FLAGS_IGNORE; 1650 if ((numa_support) && 1651 (txring_numa[pi] != NUMA_NO_CONFIG)) 1652 diag = rte_eth_tx_queue_setup(pi, qi, 1653 port->nb_tx_desc[qi], 1654 txring_numa[pi], 1655 &(port->tx_conf[qi])); 1656 else 1657 diag = rte_eth_tx_queue_setup(pi, qi, 1658 port->nb_tx_desc[qi], 1659 port->socket_id, 1660 &(port->tx_conf[qi])); 1661 1662 if (diag == 0) 1663 continue; 1664 1665 /* Fail to setup tx queue, return */ 1666 if (rte_atomic16_cmpset(&(port->port_status), 1667 RTE_PORT_HANDLING, 1668 RTE_PORT_STOPPED) == 0) 1669 printf("Port %d can not be set back " 1670 "to stopped\n", pi); 1671 printf("Fail to configure port %d tx queues\n", 1672 pi); 1673 /* try to reconfigure queues next time */ 1674 port->need_reconfig_queues = 1; 1675 return -1; 1676 } 1677 for (qi = 0; qi < nb_rxq; qi++) { 1678 /* setup rx queues */ 1679 if ((numa_support) && 1680 (rxring_numa[pi] != NUMA_NO_CONFIG)) { 1681 struct rte_mempool * mp = 1682 mbuf_pool_find(rxring_numa[pi]); 1683 if (mp == NULL) { 1684 printf("Failed to setup RX queue:" 1685 "No mempool allocation" 1686 " on the socket %d\n", 1687 rxring_numa[pi]); 1688 return -1; 1689 } 1690 1691 diag = rte_eth_rx_queue_setup(pi, qi, 1692 port->nb_rx_desc[pi], 1693 rxring_numa[pi], 1694 &(port->rx_conf[qi]), 1695 mp); 1696 } else { 1697 struct rte_mempool *mp = 1698 mbuf_pool_find(port->socket_id); 1699 if (mp == NULL) { 1700 printf("Failed to setup RX queue:" 1701 "No mempool allocation" 1702 " on the socket %d\n", 1703 port->socket_id); 1704 return -1; 1705 } 1706 diag = rte_eth_rx_queue_setup(pi, qi, 1707 port->nb_rx_desc[pi], 1708 port->socket_id, 1709 &(port->rx_conf[qi]), 1710 mp); 1711 } 1712 if (diag == 0) 1713 continue; 1714 1715 /* Fail to setup rx queue, return */ 1716 if (rte_atomic16_cmpset(&(port->port_status), 1717 RTE_PORT_HANDLING, 1718 RTE_PORT_STOPPED) == 0) 1719 printf("Port %d can not be set back " 1720 "to stopped\n", pi); 1721 printf("Fail to configure port %d rx queues\n", 1722 pi); 1723 /* try to reconfigure queues next time */ 1724 port->need_reconfig_queues = 1; 1725 return -1; 1726 } 1727 } 1728 1729 /* start port */ 1730 if (rte_eth_dev_start(pi) < 0) { 1731 printf("Fail to start port %d\n", pi); 1732 1733 /* Fail to setup rx queue, return */ 1734 if (rte_atomic16_cmpset(&(port->port_status), 1735 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0) 1736 printf("Port %d can not be set back to " 1737 "stopped\n", pi); 1738 continue; 1739 } 1740 1741 if (rte_atomic16_cmpset(&(port->port_status), 1742 RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0) 1743 printf("Port %d can not be set into started\n", pi); 1744 1745 rte_eth_macaddr_get(pi, &mac_addr); 1746 printf("Port %d: %02X:%02X:%02X:%02X:%02X:%02X\n", pi, 1747 mac_addr.addr_bytes[0], mac_addr.addr_bytes[1], 1748 mac_addr.addr_bytes[2], mac_addr.addr_bytes[3], 1749 mac_addr.addr_bytes[4], mac_addr.addr_bytes[5]); 1750 1751 /* at least one port started, need checking link status */ 1752 need_check_link_status = 1; 1753 } 1754 1755 for (event_type = RTE_ETH_EVENT_UNKNOWN; 1756 event_type < RTE_ETH_EVENT_MAX; 1757 event_type++) { 1758 diag = rte_eth_dev_callback_register(RTE_ETH_ALL, 1759 event_type, 1760 eth_event_callback, 1761 NULL); 1762 if (diag) { 1763 printf("Failed to setup even callback for event %d\n", 1764 event_type); 1765 return -1; 1766 } 1767 } 1768 1769 if (need_check_link_status == 1 && !no_link_check) 1770 check_all_ports_link_status(RTE_PORT_ALL); 1771 else if (need_check_link_status == 0) 1772 printf("Please stop the ports first\n"); 1773 1774 printf("Done\n"); 1775 return 0; 1776 } 1777 1778 void 1779 stop_port(portid_t pid) 1780 { 1781 portid_t pi; 1782 struct rte_port *port; 1783 int need_check_link_status = 0; 1784 1785 if (dcb_test) { 1786 dcb_test = 0; 1787 dcb_config = 0; 1788 } 1789 1790 if (port_id_is_invalid(pid, ENABLED_WARN)) 1791 return; 1792 1793 printf("Stopping ports...\n"); 1794 1795 RTE_ETH_FOREACH_DEV(pi) { 1796 if (pid != pi && pid != (portid_t)RTE_PORT_ALL) 1797 continue; 1798 1799 if (port_is_forwarding(pi) != 0 && test_done == 0) { 1800 printf("Please remove port %d from forwarding configuration.\n", pi); 1801 continue; 1802 } 1803 1804 if (port_is_bonding_slave(pi)) { 1805 printf("Please remove port %d from bonded device.\n", pi); 1806 continue; 1807 } 1808 1809 port = &ports[pi]; 1810 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED, 1811 RTE_PORT_HANDLING) == 0) 1812 continue; 1813 1814 rte_eth_dev_stop(pi); 1815 1816 if (rte_atomic16_cmpset(&(port->port_status), 1817 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0) 1818 printf("Port %d can not be set into stopped\n", pi); 1819 need_check_link_status = 1; 1820 } 1821 if (need_check_link_status && !no_link_check) 1822 check_all_ports_link_status(RTE_PORT_ALL); 1823 1824 printf("Done\n"); 1825 } 1826 1827 void 1828 close_port(portid_t pid) 1829 { 1830 portid_t pi; 1831 struct rte_port *port; 1832 1833 if (port_id_is_invalid(pid, ENABLED_WARN)) 1834 return; 1835 1836 printf("Closing ports...\n"); 1837 1838 RTE_ETH_FOREACH_DEV(pi) { 1839 if (pid != pi && pid != (portid_t)RTE_PORT_ALL) 1840 continue; 1841 1842 if (port_is_forwarding(pi) != 0 && test_done == 0) { 1843 printf("Please remove port %d from forwarding configuration.\n", pi); 1844 continue; 1845 } 1846 1847 if (port_is_bonding_slave(pi)) { 1848 printf("Please remove port %d from bonded device.\n", pi); 1849 continue; 1850 } 1851 1852 port = &ports[pi]; 1853 if (rte_atomic16_cmpset(&(port->port_status), 1854 RTE_PORT_CLOSED, RTE_PORT_CLOSED) == 1) { 1855 printf("Port %d is already closed\n", pi); 1856 continue; 1857 } 1858 1859 if (rte_atomic16_cmpset(&(port->port_status), 1860 RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) { 1861 printf("Port %d is now not stopped\n", pi); 1862 continue; 1863 } 1864 1865 if (port->flow_list) 1866 port_flow_flush(pi); 1867 rte_eth_dev_close(pi); 1868 1869 if (rte_atomic16_cmpset(&(port->port_status), 1870 RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0) 1871 printf("Port %d cannot be set to closed\n", pi); 1872 } 1873 1874 printf("Done\n"); 1875 } 1876 1877 void 1878 reset_port(portid_t pid) 1879 { 1880 int diag; 1881 portid_t pi; 1882 struct rte_port *port; 1883 1884 if (port_id_is_invalid(pid, ENABLED_WARN)) 1885 return; 1886 1887 printf("Resetting ports...\n"); 1888 1889 RTE_ETH_FOREACH_DEV(pi) { 1890 if (pid != pi && pid != (portid_t)RTE_PORT_ALL) 1891 continue; 1892 1893 if (port_is_forwarding(pi) != 0 && test_done == 0) { 1894 printf("Please remove port %d from forwarding " 1895 "configuration.\n", pi); 1896 continue; 1897 } 1898 1899 if (port_is_bonding_slave(pi)) { 1900 printf("Please remove port %d from bonded device.\n", 1901 pi); 1902 continue; 1903 } 1904 1905 diag = rte_eth_dev_reset(pi); 1906 if (diag == 0) { 1907 port = &ports[pi]; 1908 port->need_reconfig = 1; 1909 port->need_reconfig_queues = 1; 1910 } else { 1911 printf("Failed to reset port %d. diag=%d\n", pi, diag); 1912 } 1913 } 1914 1915 printf("Done\n"); 1916 } 1917 1918 static int 1919 eth_dev_event_callback_register(void) 1920 { 1921 int ret; 1922 1923 /* register the device event callback */ 1924 ret = rte_dev_event_callback_register(NULL, 1925 eth_dev_event_callback, NULL); 1926 if (ret) { 1927 printf("Failed to register device event callback\n"); 1928 return -1; 1929 } 1930 1931 return 0; 1932 } 1933 1934 1935 static int 1936 eth_dev_event_callback_unregister(void) 1937 { 1938 int ret; 1939 1940 /* unregister the device event callback */ 1941 ret = rte_dev_event_callback_unregister(NULL, 1942 eth_dev_event_callback, NULL); 1943 if (ret < 0) { 1944 printf("Failed to unregister device event callback\n"); 1945 return -1; 1946 } 1947 1948 return 0; 1949 } 1950 1951 void 1952 attach_port(char *identifier) 1953 { 1954 portid_t pi = 0; 1955 unsigned int socket_id; 1956 1957 printf("Attaching a new port...\n"); 1958 1959 if (identifier == NULL) { 1960 printf("Invalid parameters are specified\n"); 1961 return; 1962 } 1963 1964 if (rte_eth_dev_attach(identifier, &pi)) 1965 return; 1966 1967 socket_id = (unsigned)rte_eth_dev_socket_id(pi); 1968 /* if socket_id is invalid, set to 0 */ 1969 if (check_socket_id(socket_id) < 0) 1970 socket_id = 0; 1971 reconfig(pi, socket_id); 1972 rte_eth_promiscuous_enable(pi); 1973 1974 nb_ports = rte_eth_dev_count_avail(); 1975 1976 ports[pi].port_status = RTE_PORT_STOPPED; 1977 1978 update_fwd_ports(pi); 1979 1980 printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports); 1981 printf("Done\n"); 1982 } 1983 1984 void 1985 detach_port(portid_t port_id) 1986 { 1987 char name[RTE_ETH_NAME_MAX_LEN]; 1988 1989 printf("Detaching a port...\n"); 1990 1991 if (!port_is_closed(port_id)) { 1992 printf("Please close port first\n"); 1993 return; 1994 } 1995 1996 if (ports[port_id].flow_list) 1997 port_flow_flush(port_id); 1998 1999 if (rte_eth_dev_detach(port_id, name)) { 2000 TESTPMD_LOG(ERR, "Failed to detach port %u\n", port_id); 2001 return; 2002 } 2003 2004 nb_ports = rte_eth_dev_count_avail(); 2005 2006 update_fwd_ports(RTE_MAX_ETHPORTS); 2007 2008 printf("Port %u is detached. Now total ports is %d\n", 2009 port_id, nb_ports); 2010 printf("Done\n"); 2011 return; 2012 } 2013 2014 void 2015 pmd_test_exit(void) 2016 { 2017 struct rte_device *device; 2018 portid_t pt_id; 2019 int ret; 2020 2021 if (test_done == 0) 2022 stop_packet_forwarding(); 2023 2024 if (ports != NULL) { 2025 no_link_check = 1; 2026 RTE_ETH_FOREACH_DEV(pt_id) { 2027 printf("\nShutting down port %d...\n", pt_id); 2028 fflush(stdout); 2029 stop_port(pt_id); 2030 close_port(pt_id); 2031 2032 /* 2033 * This is a workaround to fix a virtio-user issue that 2034 * requires to call clean-up routine to remove existing 2035 * socket. 2036 * This workaround valid only for testpmd, needs a fix 2037 * valid for all applications. 2038 * TODO: Implement proper resource cleanup 2039 */ 2040 device = rte_eth_devices[pt_id].device; 2041 if (device && !strcmp(device->driver->name, "net_virtio_user")) 2042 detach_port(pt_id); 2043 } 2044 } 2045 2046 if (hot_plug) { 2047 ret = rte_dev_event_monitor_stop(); 2048 if (ret) 2049 RTE_LOG(ERR, EAL, 2050 "fail to stop device event monitor."); 2051 2052 ret = eth_dev_event_callback_unregister(); 2053 if (ret) 2054 RTE_LOG(ERR, EAL, 2055 "fail to unregister all event callbacks."); 2056 } 2057 2058 printf("\nBye...\n"); 2059 } 2060 2061 typedef void (*cmd_func_t)(void); 2062 struct pmd_test_command { 2063 const char *cmd_name; 2064 cmd_func_t cmd_func; 2065 }; 2066 2067 #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0])) 2068 2069 /* Check the link status of all ports in up to 9s, and print them finally */ 2070 static void 2071 check_all_ports_link_status(uint32_t port_mask) 2072 { 2073 #define CHECK_INTERVAL 100 /* 100ms */ 2074 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */ 2075 portid_t portid; 2076 uint8_t count, all_ports_up, print_flag = 0; 2077 struct rte_eth_link link; 2078 2079 printf("Checking link statuses...\n"); 2080 fflush(stdout); 2081 for (count = 0; count <= MAX_CHECK_TIME; count++) { 2082 all_ports_up = 1; 2083 RTE_ETH_FOREACH_DEV(portid) { 2084 if ((port_mask & (1 << portid)) == 0) 2085 continue; 2086 memset(&link, 0, sizeof(link)); 2087 rte_eth_link_get_nowait(portid, &link); 2088 /* print link status if flag set */ 2089 if (print_flag == 1) { 2090 if (link.link_status) 2091 printf( 2092 "Port%d Link Up. speed %u Mbps- %s\n", 2093 portid, link.link_speed, 2094 (link.link_duplex == ETH_LINK_FULL_DUPLEX) ? 2095 ("full-duplex") : ("half-duplex\n")); 2096 else 2097 printf("Port %d Link Down\n", portid); 2098 continue; 2099 } 2100 /* clear all_ports_up flag if any link down */ 2101 if (link.link_status == ETH_LINK_DOWN) { 2102 all_ports_up = 0; 2103 break; 2104 } 2105 } 2106 /* after finally printing all link status, get out */ 2107 if (print_flag == 1) 2108 break; 2109 2110 if (all_ports_up == 0) { 2111 fflush(stdout); 2112 rte_delay_ms(CHECK_INTERVAL); 2113 } 2114 2115 /* set the print_flag if all ports up or timeout */ 2116 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) { 2117 print_flag = 1; 2118 } 2119 2120 if (lsc_interrupt) 2121 break; 2122 } 2123 } 2124 2125 static void 2126 rmv_event_callback(void *arg) 2127 { 2128 int need_to_start = 0; 2129 int org_no_link_check = no_link_check; 2130 portid_t port_id = (intptr_t)arg; 2131 2132 RTE_ETH_VALID_PORTID_OR_RET(port_id); 2133 2134 if (!test_done && port_is_forwarding(port_id)) { 2135 need_to_start = 1; 2136 stop_packet_forwarding(); 2137 } 2138 no_link_check = 1; 2139 stop_port(port_id); 2140 no_link_check = org_no_link_check; 2141 close_port(port_id); 2142 detach_port(port_id); 2143 if (need_to_start) 2144 start_packet_forwarding(0); 2145 } 2146 2147 /* This function is used by the interrupt thread */ 2148 static int 2149 eth_event_callback(portid_t port_id, enum rte_eth_event_type type, void *param, 2150 void *ret_param) 2151 { 2152 static const char * const event_desc[] = { 2153 [RTE_ETH_EVENT_UNKNOWN] = "Unknown", 2154 [RTE_ETH_EVENT_INTR_LSC] = "LSC", 2155 [RTE_ETH_EVENT_QUEUE_STATE] = "Queue state", 2156 [RTE_ETH_EVENT_INTR_RESET] = "Interrupt reset", 2157 [RTE_ETH_EVENT_VF_MBOX] = "VF Mbox", 2158 [RTE_ETH_EVENT_IPSEC] = "IPsec", 2159 [RTE_ETH_EVENT_MACSEC] = "MACsec", 2160 [RTE_ETH_EVENT_INTR_RMV] = "device removal", 2161 [RTE_ETH_EVENT_NEW] = "device probed", 2162 [RTE_ETH_EVENT_DESTROY] = "device released", 2163 [RTE_ETH_EVENT_MAX] = NULL, 2164 }; 2165 2166 RTE_SET_USED(param); 2167 RTE_SET_USED(ret_param); 2168 2169 if (type >= RTE_ETH_EVENT_MAX) { 2170 fprintf(stderr, "\nPort %" PRIu8 ": %s called upon invalid event %d\n", 2171 port_id, __func__, type); 2172 fflush(stderr); 2173 } else if (event_print_mask & (UINT32_C(1) << type)) { 2174 printf("\nPort %" PRIu8 ": %s event\n", port_id, 2175 event_desc[type]); 2176 fflush(stdout); 2177 } 2178 2179 if (port_id_is_invalid(port_id, DISABLED_WARN)) 2180 return 0; 2181 2182 switch (type) { 2183 case RTE_ETH_EVENT_INTR_RMV: 2184 if (rte_eal_alarm_set(100000, 2185 rmv_event_callback, (void *)(intptr_t)port_id)) 2186 fprintf(stderr, "Could not set up deferred device removal\n"); 2187 break; 2188 default: 2189 break; 2190 } 2191 return 0; 2192 } 2193 2194 /* This function is used by the interrupt thread */ 2195 static void 2196 eth_dev_event_callback(char *device_name, enum rte_dev_event_type type, 2197 __rte_unused void *arg) 2198 { 2199 if (type >= RTE_DEV_EVENT_MAX) { 2200 fprintf(stderr, "%s called upon invalid event %d\n", 2201 __func__, type); 2202 fflush(stderr); 2203 } 2204 2205 switch (type) { 2206 case RTE_DEV_EVENT_REMOVE: 2207 RTE_LOG(ERR, EAL, "The device: %s has been removed!\n", 2208 device_name); 2209 /* TODO: After finish failure handle, begin to stop 2210 * packet forward, stop port, close port, detach port. 2211 */ 2212 break; 2213 case RTE_DEV_EVENT_ADD: 2214 RTE_LOG(ERR, EAL, "The device: %s has been added!\n", 2215 device_name); 2216 /* TODO: After finish kernel driver binding, 2217 * begin to attach port. 2218 */ 2219 break; 2220 default: 2221 break; 2222 } 2223 } 2224 2225 static int 2226 set_tx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port) 2227 { 2228 uint16_t i; 2229 int diag; 2230 uint8_t mapping_found = 0; 2231 2232 for (i = 0; i < nb_tx_queue_stats_mappings; i++) { 2233 if ((tx_queue_stats_mappings[i].port_id == port_id) && 2234 (tx_queue_stats_mappings[i].queue_id < nb_txq )) { 2235 diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id, 2236 tx_queue_stats_mappings[i].queue_id, 2237 tx_queue_stats_mappings[i].stats_counter_id); 2238 if (diag != 0) 2239 return diag; 2240 mapping_found = 1; 2241 } 2242 } 2243 if (mapping_found) 2244 port->tx_queue_stats_mapping_enabled = 1; 2245 return 0; 2246 } 2247 2248 static int 2249 set_rx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port) 2250 { 2251 uint16_t i; 2252 int diag; 2253 uint8_t mapping_found = 0; 2254 2255 for (i = 0; i < nb_rx_queue_stats_mappings; i++) { 2256 if ((rx_queue_stats_mappings[i].port_id == port_id) && 2257 (rx_queue_stats_mappings[i].queue_id < nb_rxq )) { 2258 diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id, 2259 rx_queue_stats_mappings[i].queue_id, 2260 rx_queue_stats_mappings[i].stats_counter_id); 2261 if (diag != 0) 2262 return diag; 2263 mapping_found = 1; 2264 } 2265 } 2266 if (mapping_found) 2267 port->rx_queue_stats_mapping_enabled = 1; 2268 return 0; 2269 } 2270 2271 static void 2272 map_port_queue_stats_mapping_registers(portid_t pi, struct rte_port *port) 2273 { 2274 int diag = 0; 2275 2276 diag = set_tx_queue_stats_mapping_registers(pi, port); 2277 if (diag != 0) { 2278 if (diag == -ENOTSUP) { 2279 port->tx_queue_stats_mapping_enabled = 0; 2280 printf("TX queue stats mapping not supported port id=%d\n", pi); 2281 } 2282 else 2283 rte_exit(EXIT_FAILURE, 2284 "set_tx_queue_stats_mapping_registers " 2285 "failed for port id=%d diag=%d\n", 2286 pi, diag); 2287 } 2288 2289 diag = set_rx_queue_stats_mapping_registers(pi, port); 2290 if (diag != 0) { 2291 if (diag == -ENOTSUP) { 2292 port->rx_queue_stats_mapping_enabled = 0; 2293 printf("RX queue stats mapping not supported port id=%d\n", pi); 2294 } 2295 else 2296 rte_exit(EXIT_FAILURE, 2297 "set_rx_queue_stats_mapping_registers " 2298 "failed for port id=%d diag=%d\n", 2299 pi, diag); 2300 } 2301 } 2302 2303 static void 2304 rxtx_port_config(struct rte_port *port) 2305 { 2306 uint16_t qid; 2307 2308 for (qid = 0; qid < nb_rxq; qid++) { 2309 port->rx_conf[qid] = port->dev_info.default_rxconf; 2310 2311 /* Check if any Rx parameters have been passed */ 2312 if (rx_pthresh != RTE_PMD_PARAM_UNSET) 2313 port->rx_conf[qid].rx_thresh.pthresh = rx_pthresh; 2314 2315 if (rx_hthresh != RTE_PMD_PARAM_UNSET) 2316 port->rx_conf[qid].rx_thresh.hthresh = rx_hthresh; 2317 2318 if (rx_wthresh != RTE_PMD_PARAM_UNSET) 2319 port->rx_conf[qid].rx_thresh.wthresh = rx_wthresh; 2320 2321 if (rx_free_thresh != RTE_PMD_PARAM_UNSET) 2322 port->rx_conf[qid].rx_free_thresh = rx_free_thresh; 2323 2324 if (rx_drop_en != RTE_PMD_PARAM_UNSET) 2325 port->rx_conf[qid].rx_drop_en = rx_drop_en; 2326 2327 port->nb_rx_desc[qid] = nb_rxd; 2328 } 2329 2330 for (qid = 0; qid < nb_txq; qid++) { 2331 port->tx_conf[qid] = port->dev_info.default_txconf; 2332 2333 /* Check if any Tx parameters have been passed */ 2334 if (tx_pthresh != RTE_PMD_PARAM_UNSET) 2335 port->tx_conf[qid].tx_thresh.pthresh = tx_pthresh; 2336 2337 if (tx_hthresh != RTE_PMD_PARAM_UNSET) 2338 port->tx_conf[qid].tx_thresh.hthresh = tx_hthresh; 2339 2340 if (tx_wthresh != RTE_PMD_PARAM_UNSET) 2341 port->tx_conf[qid].tx_thresh.wthresh = tx_wthresh; 2342 2343 if (tx_rs_thresh != RTE_PMD_PARAM_UNSET) 2344 port->tx_conf[qid].tx_rs_thresh = tx_rs_thresh; 2345 2346 if (tx_free_thresh != RTE_PMD_PARAM_UNSET) 2347 port->tx_conf[qid].tx_free_thresh = tx_free_thresh; 2348 2349 port->nb_tx_desc[qid] = nb_txd; 2350 } 2351 } 2352 2353 void 2354 init_port_config(void) 2355 { 2356 portid_t pid; 2357 struct rte_port *port; 2358 struct rte_eth_dev_info dev_info; 2359 2360 RTE_ETH_FOREACH_DEV(pid) { 2361 port = &ports[pid]; 2362 port->dev_conf.fdir_conf = fdir_conf; 2363 if (nb_rxq > 1) { 2364 rte_eth_dev_info_get(pid, &dev_info); 2365 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL; 2366 port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 2367 rss_hf & dev_info.flow_type_rss_offloads; 2368 } else { 2369 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL; 2370 port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0; 2371 } 2372 2373 if (port->dcb_flag == 0) { 2374 if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0) 2375 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_RSS; 2376 else 2377 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE; 2378 } 2379 2380 rxtx_port_config(port); 2381 2382 rte_eth_macaddr_get(pid, &port->eth_addr); 2383 2384 map_port_queue_stats_mapping_registers(pid, port); 2385 #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS 2386 rte_pmd_ixgbe_bypass_init(pid); 2387 #endif 2388 2389 if (lsc_interrupt && 2390 (rte_eth_devices[pid].data->dev_flags & 2391 RTE_ETH_DEV_INTR_LSC)) 2392 port->dev_conf.intr_conf.lsc = 1; 2393 if (rmv_interrupt && 2394 (rte_eth_devices[pid].data->dev_flags & 2395 RTE_ETH_DEV_INTR_RMV)) 2396 port->dev_conf.intr_conf.rmv = 1; 2397 2398 #if defined RTE_LIBRTE_PMD_SOFTNIC && defined RTE_LIBRTE_SCHED 2399 /* Detect softnic port */ 2400 if (!strcmp(port->dev_info.driver_name, "net_softnic")) { 2401 port->softnic_enable = 1; 2402 memset(&port->softport, 0, sizeof(struct softnic_port)); 2403 2404 if (!strcmp(cur_fwd_eng->fwd_mode_name, "tm")) 2405 port->softport.tm_flag = 1; 2406 } 2407 #endif 2408 } 2409 } 2410 2411 void set_port_slave_flag(portid_t slave_pid) 2412 { 2413 struct rte_port *port; 2414 2415 port = &ports[slave_pid]; 2416 port->slave_flag = 1; 2417 } 2418 2419 void clear_port_slave_flag(portid_t slave_pid) 2420 { 2421 struct rte_port *port; 2422 2423 port = &ports[slave_pid]; 2424 port->slave_flag = 0; 2425 } 2426 2427 uint8_t port_is_bonding_slave(portid_t slave_pid) 2428 { 2429 struct rte_port *port; 2430 2431 port = &ports[slave_pid]; 2432 if ((rte_eth_devices[slave_pid].data->dev_flags & 2433 RTE_ETH_DEV_BONDED_SLAVE) || (port->slave_flag == 1)) 2434 return 1; 2435 return 0; 2436 } 2437 2438 const uint16_t vlan_tags[] = { 2439 0, 1, 2, 3, 4, 5, 6, 7, 2440 8, 9, 10, 11, 12, 13, 14, 15, 2441 16, 17, 18, 19, 20, 21, 22, 23, 2442 24, 25, 26, 27, 28, 29, 30, 31 2443 }; 2444 2445 static int 2446 get_eth_dcb_conf(struct rte_eth_conf *eth_conf, 2447 enum dcb_mode_enable dcb_mode, 2448 enum rte_eth_nb_tcs num_tcs, 2449 uint8_t pfc_en) 2450 { 2451 uint8_t i; 2452 2453 /* 2454 * Builds up the correct configuration for dcb+vt based on the vlan tags array 2455 * given above, and the number of traffic classes available for use. 2456 */ 2457 if (dcb_mode == DCB_VT_ENABLED) { 2458 struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf = 2459 ð_conf->rx_adv_conf.vmdq_dcb_conf; 2460 struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf = 2461 ð_conf->tx_adv_conf.vmdq_dcb_tx_conf; 2462 2463 /* VMDQ+DCB RX and TX configurations */ 2464 vmdq_rx_conf->enable_default_pool = 0; 2465 vmdq_rx_conf->default_pool = 0; 2466 vmdq_rx_conf->nb_queue_pools = 2467 (num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS); 2468 vmdq_tx_conf->nb_queue_pools = 2469 (num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS); 2470 2471 vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools; 2472 for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) { 2473 vmdq_rx_conf->pool_map[i].vlan_id = vlan_tags[i]; 2474 vmdq_rx_conf->pool_map[i].pools = 2475 1 << (i % vmdq_rx_conf->nb_queue_pools); 2476 } 2477 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) { 2478 vmdq_rx_conf->dcb_tc[i] = i % num_tcs; 2479 vmdq_tx_conf->dcb_tc[i] = i % num_tcs; 2480 } 2481 2482 /* set DCB mode of RX and TX of multiple queues */ 2483 eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB; 2484 eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB; 2485 } else { 2486 struct rte_eth_dcb_rx_conf *rx_conf = 2487 ð_conf->rx_adv_conf.dcb_rx_conf; 2488 struct rte_eth_dcb_tx_conf *tx_conf = 2489 ð_conf->tx_adv_conf.dcb_tx_conf; 2490 2491 rx_conf->nb_tcs = num_tcs; 2492 tx_conf->nb_tcs = num_tcs; 2493 2494 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) { 2495 rx_conf->dcb_tc[i] = i % num_tcs; 2496 tx_conf->dcb_tc[i] = i % num_tcs; 2497 } 2498 eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB_RSS; 2499 eth_conf->rx_adv_conf.rss_conf.rss_hf = rss_hf; 2500 eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB; 2501 } 2502 2503 if (pfc_en) 2504 eth_conf->dcb_capability_en = 2505 ETH_DCB_PG_SUPPORT | ETH_DCB_PFC_SUPPORT; 2506 else 2507 eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT; 2508 2509 return 0; 2510 } 2511 2512 int 2513 init_port_dcb_config(portid_t pid, 2514 enum dcb_mode_enable dcb_mode, 2515 enum rte_eth_nb_tcs num_tcs, 2516 uint8_t pfc_en) 2517 { 2518 struct rte_eth_conf port_conf; 2519 struct rte_port *rte_port; 2520 int retval; 2521 uint16_t i; 2522 2523 rte_port = &ports[pid]; 2524 2525 memset(&port_conf, 0, sizeof(struct rte_eth_conf)); 2526 /* Enter DCB configuration status */ 2527 dcb_config = 1; 2528 2529 port_conf.rxmode = rte_port->dev_conf.rxmode; 2530 port_conf.txmode = rte_port->dev_conf.txmode; 2531 2532 /*set configuration of DCB in vt mode and DCB in non-vt mode*/ 2533 retval = get_eth_dcb_conf(&port_conf, dcb_mode, num_tcs, pfc_en); 2534 if (retval < 0) 2535 return retval; 2536 port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER; 2537 2538 /* re-configure the device . */ 2539 rte_eth_dev_configure(pid, nb_rxq, nb_rxq, &port_conf); 2540 2541 rte_eth_dev_info_get(pid, &rte_port->dev_info); 2542 2543 /* If dev_info.vmdq_pool_base is greater than 0, 2544 * the queue id of vmdq pools is started after pf queues. 2545 */ 2546 if (dcb_mode == DCB_VT_ENABLED && 2547 rte_port->dev_info.vmdq_pool_base > 0) { 2548 printf("VMDQ_DCB multi-queue mode is nonsensical" 2549 " for port %d.", pid); 2550 return -1; 2551 } 2552 2553 /* Assume the ports in testpmd have the same dcb capability 2554 * and has the same number of rxq and txq in dcb mode 2555 */ 2556 if (dcb_mode == DCB_VT_ENABLED) { 2557 if (rte_port->dev_info.max_vfs > 0) { 2558 nb_rxq = rte_port->dev_info.nb_rx_queues; 2559 nb_txq = rte_port->dev_info.nb_tx_queues; 2560 } else { 2561 nb_rxq = rte_port->dev_info.max_rx_queues; 2562 nb_txq = rte_port->dev_info.max_tx_queues; 2563 } 2564 } else { 2565 /*if vt is disabled, use all pf queues */ 2566 if (rte_port->dev_info.vmdq_pool_base == 0) { 2567 nb_rxq = rte_port->dev_info.max_rx_queues; 2568 nb_txq = rte_port->dev_info.max_tx_queues; 2569 } else { 2570 nb_rxq = (queueid_t)num_tcs; 2571 nb_txq = (queueid_t)num_tcs; 2572 2573 } 2574 } 2575 rx_free_thresh = 64; 2576 2577 memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf)); 2578 2579 rxtx_port_config(rte_port); 2580 /* VLAN filter */ 2581 rte_port->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER; 2582 for (i = 0; i < RTE_DIM(vlan_tags); i++) 2583 rx_vft_set(pid, vlan_tags[i], 1); 2584 2585 rte_eth_macaddr_get(pid, &rte_port->eth_addr); 2586 map_port_queue_stats_mapping_registers(pid, rte_port); 2587 2588 rte_port->dcb_flag = 1; 2589 2590 return 0; 2591 } 2592 2593 static void 2594 init_port(void) 2595 { 2596 /* Configuration of Ethernet ports. */ 2597 ports = rte_zmalloc("testpmd: ports", 2598 sizeof(struct rte_port) * RTE_MAX_ETHPORTS, 2599 RTE_CACHE_LINE_SIZE); 2600 if (ports == NULL) { 2601 rte_exit(EXIT_FAILURE, 2602 "rte_zmalloc(%d struct rte_port) failed\n", 2603 RTE_MAX_ETHPORTS); 2604 } 2605 } 2606 2607 static void 2608 force_quit(void) 2609 { 2610 pmd_test_exit(); 2611 prompt_exit(); 2612 } 2613 2614 static void 2615 print_stats(void) 2616 { 2617 uint8_t i; 2618 const char clr[] = { 27, '[', '2', 'J', '\0' }; 2619 const char top_left[] = { 27, '[', '1', ';', '1', 'H', '\0' }; 2620 2621 /* Clear screen and move to top left */ 2622 printf("%s%s", clr, top_left); 2623 2624 printf("\nPort statistics ===================================="); 2625 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) 2626 nic_stats_display(fwd_ports_ids[i]); 2627 } 2628 2629 static void 2630 signal_handler(int signum) 2631 { 2632 if (signum == SIGINT || signum == SIGTERM) { 2633 printf("\nSignal %d received, preparing to exit...\n", 2634 signum); 2635 #ifdef RTE_LIBRTE_PDUMP 2636 /* uninitialize packet capture framework */ 2637 rte_pdump_uninit(); 2638 #endif 2639 #ifdef RTE_LIBRTE_LATENCY_STATS 2640 rte_latencystats_uninit(); 2641 #endif 2642 force_quit(); 2643 /* Set flag to indicate the force termination. */ 2644 f_quit = 1; 2645 /* exit with the expected status */ 2646 signal(signum, SIG_DFL); 2647 kill(getpid(), signum); 2648 } 2649 } 2650 2651 int 2652 main(int argc, char** argv) 2653 { 2654 int diag; 2655 portid_t port_id; 2656 int ret; 2657 2658 signal(SIGINT, signal_handler); 2659 signal(SIGTERM, signal_handler); 2660 2661 diag = rte_eal_init(argc, argv); 2662 if (diag < 0) 2663 rte_panic("Cannot init EAL\n"); 2664 2665 testpmd_logtype = rte_log_register("testpmd"); 2666 if (testpmd_logtype < 0) 2667 rte_panic("Cannot register log type"); 2668 rte_log_set_level(testpmd_logtype, RTE_LOG_DEBUG); 2669 2670 #ifdef RTE_LIBRTE_PDUMP 2671 /* initialize packet capture framework */ 2672 rte_pdump_init(NULL); 2673 #endif 2674 2675 nb_ports = (portid_t) rte_eth_dev_count_avail(); 2676 if (nb_ports == 0) 2677 TESTPMD_LOG(WARNING, "No probed ethernet devices\n"); 2678 2679 /* allocate port structures, and init them */ 2680 init_port(); 2681 2682 set_def_fwd_config(); 2683 if (nb_lcores == 0) 2684 rte_panic("Empty set of forwarding logical cores - check the " 2685 "core mask supplied in the command parameters\n"); 2686 2687 /* Bitrate/latency stats disabled by default */ 2688 #ifdef RTE_LIBRTE_BITRATE 2689 bitrate_enabled = 0; 2690 #endif 2691 #ifdef RTE_LIBRTE_LATENCY_STATS 2692 latencystats_enabled = 0; 2693 #endif 2694 2695 /* on FreeBSD, mlockall() is disabled by default */ 2696 #ifdef RTE_EXEC_ENV_BSDAPP 2697 do_mlockall = 0; 2698 #else 2699 do_mlockall = 1; 2700 #endif 2701 2702 argc -= diag; 2703 argv += diag; 2704 if (argc > 1) 2705 launch_args_parse(argc, argv); 2706 2707 if (do_mlockall && mlockall(MCL_CURRENT | MCL_FUTURE)) { 2708 TESTPMD_LOG(NOTICE, "mlockall() failed with error \"%s\"\n", 2709 strerror(errno)); 2710 } 2711 2712 if (tx_first && interactive) 2713 rte_exit(EXIT_FAILURE, "--tx-first cannot be used on " 2714 "interactive mode.\n"); 2715 2716 if (tx_first && lsc_interrupt) { 2717 printf("Warning: lsc_interrupt needs to be off when " 2718 " using tx_first. Disabling.\n"); 2719 lsc_interrupt = 0; 2720 } 2721 2722 if (!nb_rxq && !nb_txq) 2723 printf("Warning: Either rx or tx queues should be non-zero\n"); 2724 2725 if (nb_rxq > 1 && nb_rxq > nb_txq) 2726 printf("Warning: nb_rxq=%d enables RSS configuration, " 2727 "but nb_txq=%d will prevent to fully test it.\n", 2728 nb_rxq, nb_txq); 2729 2730 init_config(); 2731 2732 if (hot_plug) { 2733 /* enable hot plug monitoring */ 2734 ret = rte_dev_event_monitor_start(); 2735 if (ret) { 2736 rte_errno = EINVAL; 2737 return -1; 2738 } 2739 eth_dev_event_callback_register(); 2740 2741 } 2742 2743 if (start_port(RTE_PORT_ALL) != 0) 2744 rte_exit(EXIT_FAILURE, "Start ports failed\n"); 2745 2746 /* set all ports to promiscuous mode by default */ 2747 RTE_ETH_FOREACH_DEV(port_id) 2748 rte_eth_promiscuous_enable(port_id); 2749 2750 /* Init metrics library */ 2751 rte_metrics_init(rte_socket_id()); 2752 2753 #ifdef RTE_LIBRTE_LATENCY_STATS 2754 if (latencystats_enabled != 0) { 2755 int ret = rte_latencystats_init(1, NULL); 2756 if (ret) 2757 printf("Warning: latencystats init()" 2758 " returned error %d\n", ret); 2759 printf("Latencystats running on lcore %d\n", 2760 latencystats_lcore_id); 2761 } 2762 #endif 2763 2764 /* Setup bitrate stats */ 2765 #ifdef RTE_LIBRTE_BITRATE 2766 if (bitrate_enabled != 0) { 2767 bitrate_data = rte_stats_bitrate_create(); 2768 if (bitrate_data == NULL) 2769 rte_exit(EXIT_FAILURE, 2770 "Could not allocate bitrate data.\n"); 2771 rte_stats_bitrate_reg(bitrate_data); 2772 } 2773 #endif 2774 2775 #ifdef RTE_LIBRTE_CMDLINE 2776 if (strlen(cmdline_filename) != 0) 2777 cmdline_read_from_file(cmdline_filename); 2778 2779 if (interactive == 1) { 2780 if (auto_start) { 2781 printf("Start automatic packet forwarding\n"); 2782 start_packet_forwarding(0); 2783 } 2784 prompt(); 2785 pmd_test_exit(); 2786 } else 2787 #endif 2788 { 2789 char c; 2790 int rc; 2791 2792 f_quit = 0; 2793 2794 printf("No commandline core given, start packet forwarding\n"); 2795 start_packet_forwarding(tx_first); 2796 if (stats_period != 0) { 2797 uint64_t prev_time = 0, cur_time, diff_time = 0; 2798 uint64_t timer_period; 2799 2800 /* Convert to number of cycles */ 2801 timer_period = stats_period * rte_get_timer_hz(); 2802 2803 while (f_quit == 0) { 2804 cur_time = rte_get_timer_cycles(); 2805 diff_time += cur_time - prev_time; 2806 2807 if (diff_time >= timer_period) { 2808 print_stats(); 2809 /* Reset the timer */ 2810 diff_time = 0; 2811 } 2812 /* Sleep to avoid unnecessary checks */ 2813 prev_time = cur_time; 2814 sleep(1); 2815 } 2816 } 2817 2818 printf("Press enter to exit\n"); 2819 rc = read(0, &c, 1); 2820 pmd_test_exit(); 2821 if (rc < 0) 2822 return 1; 2823 } 2824 2825 return 0; 2826 } 2827