1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2010-2017 Intel Corporation 3 */ 4 5 #include <stdarg.h> 6 #include <stdio.h> 7 #include <stdlib.h> 8 #include <signal.h> 9 #include <string.h> 10 #include <time.h> 11 #include <fcntl.h> 12 #include <sys/mman.h> 13 #include <sys/types.h> 14 #include <errno.h> 15 #include <stdbool.h> 16 17 #include <sys/queue.h> 18 #include <sys/stat.h> 19 20 #include <stdint.h> 21 #include <unistd.h> 22 #include <inttypes.h> 23 24 #include <rte_common.h> 25 #include <rte_errno.h> 26 #include <rte_byteorder.h> 27 #include <rte_log.h> 28 #include <rte_debug.h> 29 #include <rte_cycles.h> 30 #include <rte_memory.h> 31 #include <rte_memcpy.h> 32 #include <rte_launch.h> 33 #include <rte_eal.h> 34 #include <rte_alarm.h> 35 #include <rte_per_lcore.h> 36 #include <rte_lcore.h> 37 #include <rte_atomic.h> 38 #include <rte_branch_prediction.h> 39 #include <rte_mempool.h> 40 #include <rte_malloc.h> 41 #include <rte_mbuf.h> 42 #include <rte_mbuf_pool_ops.h> 43 #include <rte_interrupts.h> 44 #include <rte_pci.h> 45 #include <rte_ether.h> 46 #include <rte_ethdev.h> 47 #include <rte_dev.h> 48 #include <rte_string_fns.h> 49 #ifdef RTE_LIBRTE_IXGBE_PMD 50 #include <rte_pmd_ixgbe.h> 51 #endif 52 #ifdef RTE_LIBRTE_PDUMP 53 #include <rte_pdump.h> 54 #endif 55 #include <rte_flow.h> 56 #include <rte_metrics.h> 57 #ifdef RTE_LIBRTE_BITRATE 58 #include <rte_bitrate.h> 59 #endif 60 #ifdef RTE_LIBRTE_LATENCY_STATS 61 #include <rte_latencystats.h> 62 #endif 63 64 #include "testpmd.h" 65 66 uint16_t verbose_level = 0; /**< Silent by default. */ 67 int testpmd_logtype; /**< Log type for testpmd logs */ 68 69 /* use master core for command line ? */ 70 uint8_t interactive = 0; 71 uint8_t auto_start = 0; 72 uint8_t tx_first; 73 char cmdline_filename[PATH_MAX] = {0}; 74 75 /* 76 * NUMA support configuration. 77 * When set, the NUMA support attempts to dispatch the allocation of the 78 * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the 79 * probed ports among the CPU sockets 0 and 1. 80 * Otherwise, all memory is allocated from CPU socket 0. 81 */ 82 uint8_t numa_support = 1; /**< numa enabled by default */ 83 84 /* 85 * In UMA mode,all memory is allocated from socket 0 if --socket-num is 86 * not configured. 87 */ 88 uint8_t socket_num = UMA_NO_CONFIG; 89 90 /* 91 * Use ANONYMOUS mapped memory (might be not physically continuous) for mbufs. 92 */ 93 uint8_t mp_anon = 0; 94 95 /* 96 * Store specified sockets on which memory pool to be used by ports 97 * is allocated. 98 */ 99 uint8_t port_numa[RTE_MAX_ETHPORTS]; 100 101 /* 102 * Store specified sockets on which RX ring to be used by ports 103 * is allocated. 104 */ 105 uint8_t rxring_numa[RTE_MAX_ETHPORTS]; 106 107 /* 108 * Store specified sockets on which TX ring to be used by ports 109 * is allocated. 110 */ 111 uint8_t txring_numa[RTE_MAX_ETHPORTS]; 112 113 /* 114 * Record the Ethernet address of peer target ports to which packets are 115 * forwarded. 116 * Must be instantiated with the ethernet addresses of peer traffic generator 117 * ports. 118 */ 119 struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS]; 120 portid_t nb_peer_eth_addrs = 0; 121 122 /* 123 * Probed Target Environment. 124 */ 125 struct rte_port *ports; /**< For all probed ethernet ports. */ 126 portid_t nb_ports; /**< Number of probed ethernet ports. */ 127 struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */ 128 lcoreid_t nb_lcores; /**< Number of probed logical cores. */ 129 130 portid_t ports_ids[RTE_MAX_ETHPORTS]; /**< Store all port ids. */ 131 132 /* 133 * Test Forwarding Configuration. 134 * nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores 135 * nb_fwd_ports <= nb_cfg_ports <= nb_ports 136 */ 137 lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */ 138 lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */ 139 portid_t nb_cfg_ports; /**< Number of configured ports. */ 140 portid_t nb_fwd_ports; /**< Number of forwarding ports. */ 141 142 unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */ 143 portid_t fwd_ports_ids[RTE_MAX_ETHPORTS]; /**< Port ids configuration. */ 144 145 struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */ 146 streamid_t nb_fwd_streams; /**< Is equal to (nb_ports * nb_rxq). */ 147 148 /* 149 * Forwarding engines. 150 */ 151 struct fwd_engine * fwd_engines[] = { 152 &io_fwd_engine, 153 &mac_fwd_engine, 154 &mac_swap_engine, 155 &flow_gen_engine, 156 &rx_only_engine, 157 &tx_only_engine, 158 &csum_fwd_engine, 159 &icmp_echo_engine, 160 #if defined RTE_LIBRTE_PMD_SOFTNIC 161 &softnic_fwd_engine, 162 #endif 163 #ifdef RTE_LIBRTE_IEEE1588 164 &ieee1588_fwd_engine, 165 #endif 166 NULL, 167 }; 168 169 struct fwd_config cur_fwd_config; 170 struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */ 171 uint32_t retry_enabled; 172 uint32_t burst_tx_delay_time = BURST_TX_WAIT_US; 173 uint32_t burst_tx_retry_num = BURST_TX_RETRIES; 174 175 uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */ 176 uint32_t param_total_num_mbufs = 0; /**< number of mbufs in all pools - if 177 * specified on command-line. */ 178 uint16_t stats_period; /**< Period to show statistics (disabled by default) */ 179 180 /* 181 * In container, it cannot terminate the process which running with 'stats-period' 182 * option. Set flag to exit stats period loop after received SIGINT/SIGTERM. 183 */ 184 uint8_t f_quit; 185 186 /* 187 * Configuration of packet segments used by the "txonly" processing engine. 188 */ 189 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */ 190 uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = { 191 TXONLY_DEF_PACKET_LEN, 192 }; 193 uint8_t tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */ 194 195 enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF; 196 /**< Split policy for packets to TX. */ 197 198 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */ 199 uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */ 200 201 /* current configuration is in DCB or not,0 means it is not in DCB mode */ 202 uint8_t dcb_config = 0; 203 204 /* Whether the dcb is in testing status */ 205 uint8_t dcb_test = 0; 206 207 /* 208 * Configurable number of RX/TX queues. 209 */ 210 queueid_t nb_rxq = 1; /**< Number of RX queues per port. */ 211 queueid_t nb_txq = 1; /**< Number of TX queues per port. */ 212 213 /* 214 * Configurable number of RX/TX ring descriptors. 215 * Defaults are supplied by drivers via ethdev. 216 */ 217 #define RTE_TEST_RX_DESC_DEFAULT 0 218 #define RTE_TEST_TX_DESC_DEFAULT 0 219 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */ 220 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */ 221 222 #define RTE_PMD_PARAM_UNSET -1 223 /* 224 * Configurable values of RX and TX ring threshold registers. 225 */ 226 227 int8_t rx_pthresh = RTE_PMD_PARAM_UNSET; 228 int8_t rx_hthresh = RTE_PMD_PARAM_UNSET; 229 int8_t rx_wthresh = RTE_PMD_PARAM_UNSET; 230 231 int8_t tx_pthresh = RTE_PMD_PARAM_UNSET; 232 int8_t tx_hthresh = RTE_PMD_PARAM_UNSET; 233 int8_t tx_wthresh = RTE_PMD_PARAM_UNSET; 234 235 /* 236 * Configurable value of RX free threshold. 237 */ 238 int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET; 239 240 /* 241 * Configurable value of RX drop enable. 242 */ 243 int8_t rx_drop_en = RTE_PMD_PARAM_UNSET; 244 245 /* 246 * Configurable value of TX free threshold. 247 */ 248 int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET; 249 250 /* 251 * Configurable value of TX RS bit threshold. 252 */ 253 int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET; 254 255 /* 256 * Receive Side Scaling (RSS) configuration. 257 */ 258 uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */ 259 260 /* 261 * Port topology configuration 262 */ 263 uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */ 264 265 /* 266 * Avoids to flush all the RX streams before starts forwarding. 267 */ 268 uint8_t no_flush_rx = 0; /* flush by default */ 269 270 /* 271 * Flow API isolated mode. 272 */ 273 uint8_t flow_isolate_all; 274 275 /* 276 * Avoids to check link status when starting/stopping a port. 277 */ 278 uint8_t no_link_check = 0; /* check by default */ 279 280 /* 281 * Enable link status change notification 282 */ 283 uint8_t lsc_interrupt = 1; /* enabled by default */ 284 285 /* 286 * Enable device removal notification. 287 */ 288 uint8_t rmv_interrupt = 1; /* enabled by default */ 289 290 uint8_t hot_plug = 0; /**< hotplug disabled by default. */ 291 292 /* 293 * Display or mask ether events 294 * Default to all events except VF_MBOX 295 */ 296 uint32_t event_print_mask = (UINT32_C(1) << RTE_ETH_EVENT_UNKNOWN) | 297 (UINT32_C(1) << RTE_ETH_EVENT_INTR_LSC) | 298 (UINT32_C(1) << RTE_ETH_EVENT_QUEUE_STATE) | 299 (UINT32_C(1) << RTE_ETH_EVENT_INTR_RESET) | 300 (UINT32_C(1) << RTE_ETH_EVENT_IPSEC) | 301 (UINT32_C(1) << RTE_ETH_EVENT_MACSEC) | 302 (UINT32_C(1) << RTE_ETH_EVENT_INTR_RMV); 303 /* 304 * Decide if all memory are locked for performance. 305 */ 306 int do_mlockall = 0; 307 308 /* 309 * NIC bypass mode configuration options. 310 */ 311 312 #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS 313 /* The NIC bypass watchdog timeout. */ 314 uint32_t bypass_timeout = RTE_PMD_IXGBE_BYPASS_TMT_OFF; 315 #endif 316 317 318 #ifdef RTE_LIBRTE_LATENCY_STATS 319 320 /* 321 * Set when latency stats is enabled in the commandline 322 */ 323 uint8_t latencystats_enabled; 324 325 /* 326 * Lcore ID to serive latency statistics. 327 */ 328 lcoreid_t latencystats_lcore_id = -1; 329 330 #endif 331 332 /* 333 * Ethernet device configuration. 334 */ 335 struct rte_eth_rxmode rx_mode = { 336 .max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */ 337 }; 338 339 struct rte_eth_txmode tx_mode = { 340 .offloads = DEV_TX_OFFLOAD_MBUF_FAST_FREE, 341 }; 342 343 struct rte_fdir_conf fdir_conf = { 344 .mode = RTE_FDIR_MODE_NONE, 345 .pballoc = RTE_FDIR_PBALLOC_64K, 346 .status = RTE_FDIR_REPORT_STATUS, 347 .mask = { 348 .vlan_tci_mask = 0xFFEF, 349 .ipv4_mask = { 350 .src_ip = 0xFFFFFFFF, 351 .dst_ip = 0xFFFFFFFF, 352 }, 353 .ipv6_mask = { 354 .src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF}, 355 .dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF}, 356 }, 357 .src_port_mask = 0xFFFF, 358 .dst_port_mask = 0xFFFF, 359 .mac_addr_byte_mask = 0xFF, 360 .tunnel_type_mask = 1, 361 .tunnel_id_mask = 0xFFFFFFFF, 362 }, 363 .drop_queue = 127, 364 }; 365 366 volatile int test_done = 1; /* stop packet forwarding when set to 1. */ 367 368 struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS]; 369 struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS]; 370 371 struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array; 372 struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array; 373 374 uint16_t nb_tx_queue_stats_mappings = 0; 375 uint16_t nb_rx_queue_stats_mappings = 0; 376 377 /* 378 * Display zero values by default for xstats 379 */ 380 uint8_t xstats_hide_zero; 381 382 unsigned int num_sockets = 0; 383 unsigned int socket_ids[RTE_MAX_NUMA_NODES]; 384 385 #ifdef RTE_LIBRTE_BITRATE 386 /* Bitrate statistics */ 387 struct rte_stats_bitrates *bitrate_data; 388 lcoreid_t bitrate_lcore_id; 389 uint8_t bitrate_enabled; 390 #endif 391 392 struct gro_status gro_ports[RTE_MAX_ETHPORTS]; 393 uint8_t gro_flush_cycles = GRO_DEFAULT_FLUSH_CYCLES; 394 395 struct vxlan_encap_conf vxlan_encap_conf = { 396 .select_ipv4 = 1, 397 .select_vlan = 0, 398 .vni = "\x00\x00\x00", 399 .udp_src = 0, 400 .udp_dst = RTE_BE16(4789), 401 .ipv4_src = IPv4(127, 0, 0, 1), 402 .ipv4_dst = IPv4(255, 255, 255, 255), 403 .ipv6_src = "\x00\x00\x00\x00\x00\x00\x00\x00" 404 "\x00\x00\x00\x00\x00\x00\x00\x01", 405 .ipv6_dst = "\x00\x00\x00\x00\x00\x00\x00\x00" 406 "\x00\x00\x00\x00\x00\x00\x11\x11", 407 .vlan_tci = 0, 408 .eth_src = "\x00\x00\x00\x00\x00\x00", 409 .eth_dst = "\xff\xff\xff\xff\xff\xff", 410 }; 411 412 struct nvgre_encap_conf nvgre_encap_conf = { 413 .select_ipv4 = 1, 414 .select_vlan = 0, 415 .tni = "\x00\x00\x00", 416 .ipv4_src = IPv4(127, 0, 0, 1), 417 .ipv4_dst = IPv4(255, 255, 255, 255), 418 .ipv6_src = "\x00\x00\x00\x00\x00\x00\x00\x00" 419 "\x00\x00\x00\x00\x00\x00\x00\x01", 420 .ipv6_dst = "\x00\x00\x00\x00\x00\x00\x00\x00" 421 "\x00\x00\x00\x00\x00\x00\x11\x11", 422 .vlan_tci = 0, 423 .eth_src = "\x00\x00\x00\x00\x00\x00", 424 .eth_dst = "\xff\xff\xff\xff\xff\xff", 425 }; 426 427 /* Forward function declarations */ 428 static void map_port_queue_stats_mapping_registers(portid_t pi, 429 struct rte_port *port); 430 static void check_all_ports_link_status(uint32_t port_mask); 431 static int eth_event_callback(portid_t port_id, 432 enum rte_eth_event_type type, 433 void *param, void *ret_param); 434 static void eth_dev_event_callback(char *device_name, 435 enum rte_dev_event_type type, 436 void *param); 437 static int eth_dev_event_callback_register(void); 438 static int eth_dev_event_callback_unregister(void); 439 440 441 /* 442 * Check if all the ports are started. 443 * If yes, return positive value. If not, return zero. 444 */ 445 static int all_ports_started(void); 446 447 struct gso_status gso_ports[RTE_MAX_ETHPORTS]; 448 uint16_t gso_max_segment_size = ETHER_MAX_LEN - ETHER_CRC_LEN; 449 450 /* 451 * Helper function to check if socket is already discovered. 452 * If yes, return positive value. If not, return zero. 453 */ 454 int 455 new_socket_id(unsigned int socket_id) 456 { 457 unsigned int i; 458 459 for (i = 0; i < num_sockets; i++) { 460 if (socket_ids[i] == socket_id) 461 return 0; 462 } 463 return 1; 464 } 465 466 /* 467 * Setup default configuration. 468 */ 469 static void 470 set_default_fwd_lcores_config(void) 471 { 472 unsigned int i; 473 unsigned int nb_lc; 474 unsigned int sock_num; 475 476 nb_lc = 0; 477 for (i = 0; i < RTE_MAX_LCORE; i++) { 478 if (!rte_lcore_is_enabled(i)) 479 continue; 480 sock_num = rte_lcore_to_socket_id(i); 481 if (new_socket_id(sock_num)) { 482 if (num_sockets >= RTE_MAX_NUMA_NODES) { 483 rte_exit(EXIT_FAILURE, 484 "Total sockets greater than %u\n", 485 RTE_MAX_NUMA_NODES); 486 } 487 socket_ids[num_sockets++] = sock_num; 488 } 489 if (i == rte_get_master_lcore()) 490 continue; 491 fwd_lcores_cpuids[nb_lc++] = i; 492 } 493 nb_lcores = (lcoreid_t) nb_lc; 494 nb_cfg_lcores = nb_lcores; 495 nb_fwd_lcores = 1; 496 } 497 498 static void 499 set_def_peer_eth_addrs(void) 500 { 501 portid_t i; 502 503 for (i = 0; i < RTE_MAX_ETHPORTS; i++) { 504 peer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR; 505 peer_eth_addrs[i].addr_bytes[5] = i; 506 } 507 } 508 509 static void 510 set_default_fwd_ports_config(void) 511 { 512 portid_t pt_id; 513 int i = 0; 514 515 RTE_ETH_FOREACH_DEV(pt_id) 516 fwd_ports_ids[i++] = pt_id; 517 518 nb_cfg_ports = nb_ports; 519 nb_fwd_ports = nb_ports; 520 } 521 522 void 523 set_def_fwd_config(void) 524 { 525 set_default_fwd_lcores_config(); 526 set_def_peer_eth_addrs(); 527 set_default_fwd_ports_config(); 528 } 529 530 /* 531 * Configuration initialisation done once at init time. 532 */ 533 static void 534 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf, 535 unsigned int socket_id) 536 { 537 char pool_name[RTE_MEMPOOL_NAMESIZE]; 538 struct rte_mempool *rte_mp = NULL; 539 uint32_t mb_size; 540 541 mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size; 542 mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name)); 543 544 TESTPMD_LOG(INFO, 545 "create a new mbuf pool <%s>: n=%u, size=%u, socket=%u\n", 546 pool_name, nb_mbuf, mbuf_seg_size, socket_id); 547 548 if (mp_anon != 0) { 549 rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf, 550 mb_size, (unsigned) mb_mempool_cache, 551 sizeof(struct rte_pktmbuf_pool_private), 552 socket_id, 0); 553 if (rte_mp == NULL) 554 goto err; 555 556 if (rte_mempool_populate_anon(rte_mp) == 0) { 557 rte_mempool_free(rte_mp); 558 rte_mp = NULL; 559 goto err; 560 } 561 rte_pktmbuf_pool_init(rte_mp, NULL); 562 rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL); 563 } else { 564 /* wrapper to rte_mempool_create() */ 565 TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n", 566 rte_mbuf_best_mempool_ops()); 567 rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf, 568 mb_mempool_cache, 0, mbuf_seg_size, socket_id); 569 } 570 571 err: 572 if (rte_mp == NULL) { 573 rte_exit(EXIT_FAILURE, 574 "Creation of mbuf pool for socket %u failed: %s\n", 575 socket_id, rte_strerror(rte_errno)); 576 } else if (verbose_level > 0) { 577 rte_mempool_dump(stdout, rte_mp); 578 } 579 } 580 581 /* 582 * Check given socket id is valid or not with NUMA mode, 583 * if valid, return 0, else return -1 584 */ 585 static int 586 check_socket_id(const unsigned int socket_id) 587 { 588 static int warning_once = 0; 589 590 if (new_socket_id(socket_id)) { 591 if (!warning_once && numa_support) 592 printf("Warning: NUMA should be configured manually by" 593 " using --port-numa-config and" 594 " --ring-numa-config parameters along with" 595 " --numa.\n"); 596 warning_once = 1; 597 return -1; 598 } 599 return 0; 600 } 601 602 /* 603 * Get the allowed maximum number of RX queues. 604 * *pid return the port id which has minimal value of 605 * max_rx_queues in all ports. 606 */ 607 queueid_t 608 get_allowed_max_nb_rxq(portid_t *pid) 609 { 610 queueid_t allowed_max_rxq = MAX_QUEUE_ID; 611 portid_t pi; 612 struct rte_eth_dev_info dev_info; 613 614 RTE_ETH_FOREACH_DEV(pi) { 615 rte_eth_dev_info_get(pi, &dev_info); 616 if (dev_info.max_rx_queues < allowed_max_rxq) { 617 allowed_max_rxq = dev_info.max_rx_queues; 618 *pid = pi; 619 } 620 } 621 return allowed_max_rxq; 622 } 623 624 /* 625 * Check input rxq is valid or not. 626 * If input rxq is not greater than any of maximum number 627 * of RX queues of all ports, it is valid. 628 * if valid, return 0, else return -1 629 */ 630 int 631 check_nb_rxq(queueid_t rxq) 632 { 633 queueid_t allowed_max_rxq; 634 portid_t pid = 0; 635 636 allowed_max_rxq = get_allowed_max_nb_rxq(&pid); 637 if (rxq > allowed_max_rxq) { 638 printf("Fail: input rxq (%u) can't be greater " 639 "than max_rx_queues (%u) of port %u\n", 640 rxq, 641 allowed_max_rxq, 642 pid); 643 return -1; 644 } 645 return 0; 646 } 647 648 /* 649 * Get the allowed maximum number of TX queues. 650 * *pid return the port id which has minimal value of 651 * max_tx_queues in all ports. 652 */ 653 queueid_t 654 get_allowed_max_nb_txq(portid_t *pid) 655 { 656 queueid_t allowed_max_txq = MAX_QUEUE_ID; 657 portid_t pi; 658 struct rte_eth_dev_info dev_info; 659 660 RTE_ETH_FOREACH_DEV(pi) { 661 rte_eth_dev_info_get(pi, &dev_info); 662 if (dev_info.max_tx_queues < allowed_max_txq) { 663 allowed_max_txq = dev_info.max_tx_queues; 664 *pid = pi; 665 } 666 } 667 return allowed_max_txq; 668 } 669 670 /* 671 * Check input txq is valid or not. 672 * If input txq is not greater than any of maximum number 673 * of TX queues of all ports, it is valid. 674 * if valid, return 0, else return -1 675 */ 676 int 677 check_nb_txq(queueid_t txq) 678 { 679 queueid_t allowed_max_txq; 680 portid_t pid = 0; 681 682 allowed_max_txq = get_allowed_max_nb_txq(&pid); 683 if (txq > allowed_max_txq) { 684 printf("Fail: input txq (%u) can't be greater " 685 "than max_tx_queues (%u) of port %u\n", 686 txq, 687 allowed_max_txq, 688 pid); 689 return -1; 690 } 691 return 0; 692 } 693 694 static void 695 init_config(void) 696 { 697 portid_t pid; 698 struct rte_port *port; 699 struct rte_mempool *mbp; 700 unsigned int nb_mbuf_per_pool; 701 lcoreid_t lc_id; 702 uint8_t port_per_socket[RTE_MAX_NUMA_NODES]; 703 struct rte_gro_param gro_param; 704 uint32_t gso_types; 705 int k; 706 707 memset(port_per_socket,0,RTE_MAX_NUMA_NODES); 708 709 if (numa_support) { 710 memset(port_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS); 711 memset(rxring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS); 712 memset(txring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS); 713 } 714 715 /* Configuration of logical cores. */ 716 fwd_lcores = rte_zmalloc("testpmd: fwd_lcores", 717 sizeof(struct fwd_lcore *) * nb_lcores, 718 RTE_CACHE_LINE_SIZE); 719 if (fwd_lcores == NULL) { 720 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) " 721 "failed\n", nb_lcores); 722 } 723 for (lc_id = 0; lc_id < nb_lcores; lc_id++) { 724 fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore", 725 sizeof(struct fwd_lcore), 726 RTE_CACHE_LINE_SIZE); 727 if (fwd_lcores[lc_id] == NULL) { 728 rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) " 729 "failed\n"); 730 } 731 fwd_lcores[lc_id]->cpuid_idx = lc_id; 732 } 733 734 RTE_ETH_FOREACH_DEV(pid) { 735 port = &ports[pid]; 736 /* Apply default TxRx configuration for all ports */ 737 port->dev_conf.txmode = tx_mode; 738 port->dev_conf.rxmode = rx_mode; 739 rte_eth_dev_info_get(pid, &port->dev_info); 740 741 if (!(port->dev_info.tx_offload_capa & 742 DEV_TX_OFFLOAD_MBUF_FAST_FREE)) 743 port->dev_conf.txmode.offloads &= 744 ~DEV_TX_OFFLOAD_MBUF_FAST_FREE; 745 if (numa_support) { 746 if (port_numa[pid] != NUMA_NO_CONFIG) 747 port_per_socket[port_numa[pid]]++; 748 else { 749 uint32_t socket_id = rte_eth_dev_socket_id(pid); 750 751 /* if socket_id is invalid, set to 0 */ 752 if (check_socket_id(socket_id) < 0) 753 socket_id = 0; 754 port_per_socket[socket_id]++; 755 } 756 } 757 758 /* Apply Rx offloads configuration */ 759 for (k = 0; k < port->dev_info.max_rx_queues; k++) 760 port->rx_conf[k].offloads = 761 port->dev_conf.rxmode.offloads; 762 /* Apply Tx offloads configuration */ 763 for (k = 0; k < port->dev_info.max_tx_queues; k++) 764 port->tx_conf[k].offloads = 765 port->dev_conf.txmode.offloads; 766 767 /* set flag to initialize port/queue */ 768 port->need_reconfig = 1; 769 port->need_reconfig_queues = 1; 770 } 771 772 /* 773 * Create pools of mbuf. 774 * If NUMA support is disabled, create a single pool of mbuf in 775 * socket 0 memory by default. 776 * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1. 777 * 778 * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and 779 * nb_txd can be configured at run time. 780 */ 781 if (param_total_num_mbufs) 782 nb_mbuf_per_pool = param_total_num_mbufs; 783 else { 784 nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX + 785 (nb_lcores * mb_mempool_cache) + 786 RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST; 787 nb_mbuf_per_pool *= RTE_MAX_ETHPORTS; 788 } 789 790 if (numa_support) { 791 uint8_t i; 792 793 for (i = 0; i < num_sockets; i++) 794 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 795 socket_ids[i]); 796 } else { 797 if (socket_num == UMA_NO_CONFIG) 798 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0); 799 else 800 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 801 socket_num); 802 } 803 804 init_port_config(); 805 806 gso_types = DEV_TX_OFFLOAD_TCP_TSO | DEV_TX_OFFLOAD_VXLAN_TNL_TSO | 807 DEV_TX_OFFLOAD_GRE_TNL_TSO | DEV_TX_OFFLOAD_UDP_TSO; 808 /* 809 * Records which Mbuf pool to use by each logical core, if needed. 810 */ 811 for (lc_id = 0; lc_id < nb_lcores; lc_id++) { 812 mbp = mbuf_pool_find( 813 rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id])); 814 815 if (mbp == NULL) 816 mbp = mbuf_pool_find(0); 817 fwd_lcores[lc_id]->mbp = mbp; 818 /* initialize GSO context */ 819 fwd_lcores[lc_id]->gso_ctx.direct_pool = mbp; 820 fwd_lcores[lc_id]->gso_ctx.indirect_pool = mbp; 821 fwd_lcores[lc_id]->gso_ctx.gso_types = gso_types; 822 fwd_lcores[lc_id]->gso_ctx.gso_size = ETHER_MAX_LEN - 823 ETHER_CRC_LEN; 824 fwd_lcores[lc_id]->gso_ctx.flag = 0; 825 } 826 827 /* Configuration of packet forwarding streams. */ 828 if (init_fwd_streams() < 0) 829 rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n"); 830 831 fwd_config_setup(); 832 833 /* create a gro context for each lcore */ 834 gro_param.gro_types = RTE_GRO_TCP_IPV4; 835 gro_param.max_flow_num = GRO_MAX_FLUSH_CYCLES; 836 gro_param.max_item_per_flow = MAX_PKT_BURST; 837 for (lc_id = 0; lc_id < nb_lcores; lc_id++) { 838 gro_param.socket_id = rte_lcore_to_socket_id( 839 fwd_lcores_cpuids[lc_id]); 840 fwd_lcores[lc_id]->gro_ctx = rte_gro_ctx_create(&gro_param); 841 if (fwd_lcores[lc_id]->gro_ctx == NULL) { 842 rte_exit(EXIT_FAILURE, 843 "rte_gro_ctx_create() failed\n"); 844 } 845 } 846 847 #if defined RTE_LIBRTE_PMD_SOFTNIC 848 if (strcmp(cur_fwd_eng->fwd_mode_name, "softnic") == 0) { 849 RTE_ETH_FOREACH_DEV(pid) { 850 port = &ports[pid]; 851 const char *driver = port->dev_info.driver_name; 852 853 if (strcmp(driver, "net_softnic") == 0) 854 port->softport.fwd_lcore_arg = fwd_lcores; 855 } 856 } 857 #endif 858 859 } 860 861 862 void 863 reconfig(portid_t new_port_id, unsigned socket_id) 864 { 865 struct rte_port *port; 866 867 /* Reconfiguration of Ethernet ports. */ 868 port = &ports[new_port_id]; 869 rte_eth_dev_info_get(new_port_id, &port->dev_info); 870 871 /* set flag to initialize port/queue */ 872 port->need_reconfig = 1; 873 port->need_reconfig_queues = 1; 874 port->socket_id = socket_id; 875 876 init_port_config(); 877 } 878 879 880 int 881 init_fwd_streams(void) 882 { 883 portid_t pid; 884 struct rte_port *port; 885 streamid_t sm_id, nb_fwd_streams_new; 886 queueid_t q; 887 888 /* set socket id according to numa or not */ 889 RTE_ETH_FOREACH_DEV(pid) { 890 port = &ports[pid]; 891 if (nb_rxq > port->dev_info.max_rx_queues) { 892 printf("Fail: nb_rxq(%d) is greater than " 893 "max_rx_queues(%d)\n", nb_rxq, 894 port->dev_info.max_rx_queues); 895 return -1; 896 } 897 if (nb_txq > port->dev_info.max_tx_queues) { 898 printf("Fail: nb_txq(%d) is greater than " 899 "max_tx_queues(%d)\n", nb_txq, 900 port->dev_info.max_tx_queues); 901 return -1; 902 } 903 if (numa_support) { 904 if (port_numa[pid] != NUMA_NO_CONFIG) 905 port->socket_id = port_numa[pid]; 906 else { 907 port->socket_id = rte_eth_dev_socket_id(pid); 908 909 /* if socket_id is invalid, set to 0 */ 910 if (check_socket_id(port->socket_id) < 0) 911 port->socket_id = 0; 912 } 913 } 914 else { 915 if (socket_num == UMA_NO_CONFIG) 916 port->socket_id = 0; 917 else 918 port->socket_id = socket_num; 919 } 920 } 921 922 q = RTE_MAX(nb_rxq, nb_txq); 923 if (q == 0) { 924 printf("Fail: Cannot allocate fwd streams as number of queues is 0\n"); 925 return -1; 926 } 927 nb_fwd_streams_new = (streamid_t)(nb_ports * q); 928 if (nb_fwd_streams_new == nb_fwd_streams) 929 return 0; 930 /* clear the old */ 931 if (fwd_streams != NULL) { 932 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) { 933 if (fwd_streams[sm_id] == NULL) 934 continue; 935 rte_free(fwd_streams[sm_id]); 936 fwd_streams[sm_id] = NULL; 937 } 938 rte_free(fwd_streams); 939 fwd_streams = NULL; 940 } 941 942 /* init new */ 943 nb_fwd_streams = nb_fwd_streams_new; 944 if (nb_fwd_streams) { 945 fwd_streams = rte_zmalloc("testpmd: fwd_streams", 946 sizeof(struct fwd_stream *) * nb_fwd_streams, 947 RTE_CACHE_LINE_SIZE); 948 if (fwd_streams == NULL) 949 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d" 950 " (struct fwd_stream *)) failed\n", 951 nb_fwd_streams); 952 953 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) { 954 fwd_streams[sm_id] = rte_zmalloc("testpmd:" 955 " struct fwd_stream", sizeof(struct fwd_stream), 956 RTE_CACHE_LINE_SIZE); 957 if (fwd_streams[sm_id] == NULL) 958 rte_exit(EXIT_FAILURE, "rte_zmalloc" 959 "(struct fwd_stream) failed\n"); 960 } 961 } 962 963 return 0; 964 } 965 966 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS 967 static void 968 pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs) 969 { 970 unsigned int total_burst; 971 unsigned int nb_burst; 972 unsigned int burst_stats[3]; 973 uint16_t pktnb_stats[3]; 974 uint16_t nb_pkt; 975 int burst_percent[3]; 976 977 /* 978 * First compute the total number of packet bursts and the 979 * two highest numbers of bursts of the same number of packets. 980 */ 981 total_burst = 0; 982 burst_stats[0] = burst_stats[1] = burst_stats[2] = 0; 983 pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0; 984 for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) { 985 nb_burst = pbs->pkt_burst_spread[nb_pkt]; 986 if (nb_burst == 0) 987 continue; 988 total_burst += nb_burst; 989 if (nb_burst > burst_stats[0]) { 990 burst_stats[1] = burst_stats[0]; 991 pktnb_stats[1] = pktnb_stats[0]; 992 burst_stats[0] = nb_burst; 993 pktnb_stats[0] = nb_pkt; 994 } else if (nb_burst > burst_stats[1]) { 995 burst_stats[1] = nb_burst; 996 pktnb_stats[1] = nb_pkt; 997 } 998 } 999 if (total_burst == 0) 1000 return; 1001 burst_percent[0] = (burst_stats[0] * 100) / total_burst; 1002 printf(" %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst, 1003 burst_percent[0], (int) pktnb_stats[0]); 1004 if (burst_stats[0] == total_burst) { 1005 printf("]\n"); 1006 return; 1007 } 1008 if (burst_stats[0] + burst_stats[1] == total_burst) { 1009 printf(" + %d%% of %d pkts]\n", 1010 100 - burst_percent[0], pktnb_stats[1]); 1011 return; 1012 } 1013 burst_percent[1] = (burst_stats[1] * 100) / total_burst; 1014 burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]); 1015 if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) { 1016 printf(" + %d%% of others]\n", 100 - burst_percent[0]); 1017 return; 1018 } 1019 printf(" + %d%% of %d pkts + %d%% of others]\n", 1020 burst_percent[1], (int) pktnb_stats[1], burst_percent[2]); 1021 } 1022 #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */ 1023 1024 static void 1025 fwd_port_stats_display(portid_t port_id, struct rte_eth_stats *stats) 1026 { 1027 struct rte_port *port; 1028 uint8_t i; 1029 1030 static const char *fwd_stats_border = "----------------------"; 1031 1032 port = &ports[port_id]; 1033 printf("\n %s Forward statistics for port %-2d %s\n", 1034 fwd_stats_border, port_id, fwd_stats_border); 1035 1036 if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) { 1037 printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: " 1038 "%-"PRIu64"\n", 1039 stats->ipackets, stats->imissed, 1040 (uint64_t) (stats->ipackets + stats->imissed)); 1041 1042 if (cur_fwd_eng == &csum_fwd_engine) 1043 printf(" Bad-ipcsum: %-14"PRIu64" Bad-l4csum: %-14"PRIu64" \n", 1044 port->rx_bad_ip_csum, port->rx_bad_l4_csum); 1045 if ((stats->ierrors + stats->rx_nombuf) > 0) { 1046 printf(" RX-error: %-"PRIu64"\n", stats->ierrors); 1047 printf(" RX-nombufs: %-14"PRIu64"\n", stats->rx_nombuf); 1048 } 1049 1050 printf(" TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: " 1051 "%-"PRIu64"\n", 1052 stats->opackets, port->tx_dropped, 1053 (uint64_t) (stats->opackets + port->tx_dropped)); 1054 } 1055 else { 1056 printf(" RX-packets: %14"PRIu64" RX-dropped:%14"PRIu64" RX-total:" 1057 "%14"PRIu64"\n", 1058 stats->ipackets, stats->imissed, 1059 (uint64_t) (stats->ipackets + stats->imissed)); 1060 1061 if (cur_fwd_eng == &csum_fwd_engine) 1062 printf(" Bad-ipcsum:%14"PRIu64" Bad-l4csum:%14"PRIu64"\n", 1063 port->rx_bad_ip_csum, port->rx_bad_l4_csum); 1064 if ((stats->ierrors + stats->rx_nombuf) > 0) { 1065 printf(" RX-error:%"PRIu64"\n", stats->ierrors); 1066 printf(" RX-nombufs: %14"PRIu64"\n", 1067 stats->rx_nombuf); 1068 } 1069 1070 printf(" TX-packets: %14"PRIu64" TX-dropped:%14"PRIu64" TX-total:" 1071 "%14"PRIu64"\n", 1072 stats->opackets, port->tx_dropped, 1073 (uint64_t) (stats->opackets + port->tx_dropped)); 1074 } 1075 1076 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS 1077 if (port->rx_stream) 1078 pkt_burst_stats_display("RX", 1079 &port->rx_stream->rx_burst_stats); 1080 if (port->tx_stream) 1081 pkt_burst_stats_display("TX", 1082 &port->tx_stream->tx_burst_stats); 1083 #endif 1084 1085 if (port->rx_queue_stats_mapping_enabled) { 1086 printf("\n"); 1087 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) { 1088 printf(" Stats reg %2d RX-packets:%14"PRIu64 1089 " RX-errors:%14"PRIu64 1090 " RX-bytes:%14"PRIu64"\n", 1091 i, stats->q_ipackets[i], stats->q_errors[i], stats->q_ibytes[i]); 1092 } 1093 printf("\n"); 1094 } 1095 if (port->tx_queue_stats_mapping_enabled) { 1096 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) { 1097 printf(" Stats reg %2d TX-packets:%14"PRIu64 1098 " TX-bytes:%14"PRIu64"\n", 1099 i, stats->q_opackets[i], stats->q_obytes[i]); 1100 } 1101 } 1102 1103 printf(" %s--------------------------------%s\n", 1104 fwd_stats_border, fwd_stats_border); 1105 } 1106 1107 static void 1108 fwd_stream_stats_display(streamid_t stream_id) 1109 { 1110 struct fwd_stream *fs; 1111 static const char *fwd_top_stats_border = "-------"; 1112 1113 fs = fwd_streams[stream_id]; 1114 if ((fs->rx_packets == 0) && (fs->tx_packets == 0) && 1115 (fs->fwd_dropped == 0)) 1116 return; 1117 printf("\n %s Forward Stats for RX Port=%2d/Queue=%2d -> " 1118 "TX Port=%2d/Queue=%2d %s\n", 1119 fwd_top_stats_border, fs->rx_port, fs->rx_queue, 1120 fs->tx_port, fs->tx_queue, fwd_top_stats_border); 1121 printf(" RX-packets: %-14u TX-packets: %-14u TX-dropped: %-14u", 1122 fs->rx_packets, fs->tx_packets, fs->fwd_dropped); 1123 1124 /* if checksum mode */ 1125 if (cur_fwd_eng == &csum_fwd_engine) { 1126 printf(" RX- bad IP checksum: %-14u Rx- bad L4 checksum: " 1127 "%-14u\n", fs->rx_bad_ip_csum, fs->rx_bad_l4_csum); 1128 } 1129 1130 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS 1131 pkt_burst_stats_display("RX", &fs->rx_burst_stats); 1132 pkt_burst_stats_display("TX", &fs->tx_burst_stats); 1133 #endif 1134 } 1135 1136 static void 1137 flush_fwd_rx_queues(void) 1138 { 1139 struct rte_mbuf *pkts_burst[MAX_PKT_BURST]; 1140 portid_t rxp; 1141 portid_t port_id; 1142 queueid_t rxq; 1143 uint16_t nb_rx; 1144 uint16_t i; 1145 uint8_t j; 1146 uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0; 1147 uint64_t timer_period; 1148 1149 /* convert to number of cycles */ 1150 timer_period = rte_get_timer_hz(); /* 1 second timeout */ 1151 1152 for (j = 0; j < 2; j++) { 1153 for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) { 1154 for (rxq = 0; rxq < nb_rxq; rxq++) { 1155 port_id = fwd_ports_ids[rxp]; 1156 /** 1157 * testpmd can stuck in the below do while loop 1158 * if rte_eth_rx_burst() always returns nonzero 1159 * packets. So timer is added to exit this loop 1160 * after 1sec timer expiry. 1161 */ 1162 prev_tsc = rte_rdtsc(); 1163 do { 1164 nb_rx = rte_eth_rx_burst(port_id, rxq, 1165 pkts_burst, MAX_PKT_BURST); 1166 for (i = 0; i < nb_rx; i++) 1167 rte_pktmbuf_free(pkts_burst[i]); 1168 1169 cur_tsc = rte_rdtsc(); 1170 diff_tsc = cur_tsc - prev_tsc; 1171 timer_tsc += diff_tsc; 1172 } while ((nb_rx > 0) && 1173 (timer_tsc < timer_period)); 1174 timer_tsc = 0; 1175 } 1176 } 1177 rte_delay_ms(10); /* wait 10 milli-seconds before retrying */ 1178 } 1179 } 1180 1181 static void 1182 run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd) 1183 { 1184 struct fwd_stream **fsm; 1185 streamid_t nb_fs; 1186 streamid_t sm_id; 1187 #ifdef RTE_LIBRTE_BITRATE 1188 uint64_t tics_per_1sec; 1189 uint64_t tics_datum; 1190 uint64_t tics_current; 1191 uint16_t i, cnt_ports; 1192 1193 cnt_ports = nb_ports; 1194 tics_datum = rte_rdtsc(); 1195 tics_per_1sec = rte_get_timer_hz(); 1196 #endif 1197 fsm = &fwd_streams[fc->stream_idx]; 1198 nb_fs = fc->stream_nb; 1199 do { 1200 for (sm_id = 0; sm_id < nb_fs; sm_id++) 1201 (*pkt_fwd)(fsm[sm_id]); 1202 #ifdef RTE_LIBRTE_BITRATE 1203 if (bitrate_enabled != 0 && 1204 bitrate_lcore_id == rte_lcore_id()) { 1205 tics_current = rte_rdtsc(); 1206 if (tics_current - tics_datum >= tics_per_1sec) { 1207 /* Periodic bitrate calculation */ 1208 for (i = 0; i < cnt_ports; i++) 1209 rte_stats_bitrate_calc(bitrate_data, 1210 ports_ids[i]); 1211 tics_datum = tics_current; 1212 } 1213 } 1214 #endif 1215 #ifdef RTE_LIBRTE_LATENCY_STATS 1216 if (latencystats_enabled != 0 && 1217 latencystats_lcore_id == rte_lcore_id()) 1218 rte_latencystats_update(); 1219 #endif 1220 1221 } while (! fc->stopped); 1222 } 1223 1224 static int 1225 start_pkt_forward_on_core(void *fwd_arg) 1226 { 1227 run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg, 1228 cur_fwd_config.fwd_eng->packet_fwd); 1229 return 0; 1230 } 1231 1232 /* 1233 * Run the TXONLY packet forwarding engine to send a single burst of packets. 1234 * Used to start communication flows in network loopback test configurations. 1235 */ 1236 static int 1237 run_one_txonly_burst_on_core(void *fwd_arg) 1238 { 1239 struct fwd_lcore *fwd_lc; 1240 struct fwd_lcore tmp_lcore; 1241 1242 fwd_lc = (struct fwd_lcore *) fwd_arg; 1243 tmp_lcore = *fwd_lc; 1244 tmp_lcore.stopped = 1; 1245 run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd); 1246 return 0; 1247 } 1248 1249 /* 1250 * Launch packet forwarding: 1251 * - Setup per-port forwarding context. 1252 * - launch logical cores with their forwarding configuration. 1253 */ 1254 static void 1255 launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore) 1256 { 1257 port_fwd_begin_t port_fwd_begin; 1258 unsigned int i; 1259 unsigned int lc_id; 1260 int diag; 1261 1262 port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin; 1263 if (port_fwd_begin != NULL) { 1264 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) 1265 (*port_fwd_begin)(fwd_ports_ids[i]); 1266 } 1267 for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) { 1268 lc_id = fwd_lcores_cpuids[i]; 1269 if ((interactive == 0) || (lc_id != rte_lcore_id())) { 1270 fwd_lcores[i]->stopped = 0; 1271 diag = rte_eal_remote_launch(pkt_fwd_on_lcore, 1272 fwd_lcores[i], lc_id); 1273 if (diag != 0) 1274 printf("launch lcore %u failed - diag=%d\n", 1275 lc_id, diag); 1276 } 1277 } 1278 } 1279 1280 /* 1281 * Update the forward ports list. 1282 */ 1283 void 1284 update_fwd_ports(portid_t new_pid) 1285 { 1286 unsigned int i; 1287 unsigned int new_nb_fwd_ports = 0; 1288 int move = 0; 1289 1290 for (i = 0; i < nb_fwd_ports; ++i) { 1291 if (port_id_is_invalid(fwd_ports_ids[i], DISABLED_WARN)) 1292 move = 1; 1293 else if (move) 1294 fwd_ports_ids[new_nb_fwd_ports++] = fwd_ports_ids[i]; 1295 else 1296 new_nb_fwd_ports++; 1297 } 1298 if (new_pid < RTE_MAX_ETHPORTS) 1299 fwd_ports_ids[new_nb_fwd_ports++] = new_pid; 1300 1301 nb_fwd_ports = new_nb_fwd_ports; 1302 nb_cfg_ports = new_nb_fwd_ports; 1303 } 1304 1305 /* 1306 * Launch packet forwarding configuration. 1307 */ 1308 void 1309 start_packet_forwarding(int with_tx_first) 1310 { 1311 port_fwd_begin_t port_fwd_begin; 1312 port_fwd_end_t port_fwd_end; 1313 struct rte_port *port; 1314 unsigned int i; 1315 portid_t pt_id; 1316 streamid_t sm_id; 1317 1318 if (strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") == 0 && !nb_rxq) 1319 rte_exit(EXIT_FAILURE, "rxq are 0, cannot use rxonly fwd mode\n"); 1320 1321 if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 && !nb_txq) 1322 rte_exit(EXIT_FAILURE, "txq are 0, cannot use txonly fwd mode\n"); 1323 1324 if ((strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") != 0 && 1325 strcmp(cur_fwd_eng->fwd_mode_name, "txonly") != 0) && 1326 (!nb_rxq || !nb_txq)) 1327 rte_exit(EXIT_FAILURE, 1328 "Either rxq or txq are 0, cannot use %s fwd mode\n", 1329 cur_fwd_eng->fwd_mode_name); 1330 1331 if (all_ports_started() == 0) { 1332 printf("Not all ports were started\n"); 1333 return; 1334 } 1335 if (test_done == 0) { 1336 printf("Packet forwarding already started\n"); 1337 return; 1338 } 1339 1340 1341 if(dcb_test) { 1342 for (i = 0; i < nb_fwd_ports; i++) { 1343 pt_id = fwd_ports_ids[i]; 1344 port = &ports[pt_id]; 1345 if (!port->dcb_flag) { 1346 printf("In DCB mode, all forwarding ports must " 1347 "be configured in this mode.\n"); 1348 return; 1349 } 1350 } 1351 if (nb_fwd_lcores == 1) { 1352 printf("In DCB mode,the nb forwarding cores " 1353 "should be larger than 1.\n"); 1354 return; 1355 } 1356 } 1357 test_done = 0; 1358 1359 fwd_config_setup(); 1360 1361 if(!no_flush_rx) 1362 flush_fwd_rx_queues(); 1363 1364 pkt_fwd_config_display(&cur_fwd_config); 1365 rxtx_config_display(); 1366 1367 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) { 1368 pt_id = fwd_ports_ids[i]; 1369 port = &ports[pt_id]; 1370 rte_eth_stats_get(pt_id, &port->stats); 1371 port->tx_dropped = 0; 1372 1373 map_port_queue_stats_mapping_registers(pt_id, port); 1374 } 1375 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) { 1376 fwd_streams[sm_id]->rx_packets = 0; 1377 fwd_streams[sm_id]->tx_packets = 0; 1378 fwd_streams[sm_id]->fwd_dropped = 0; 1379 fwd_streams[sm_id]->rx_bad_ip_csum = 0; 1380 fwd_streams[sm_id]->rx_bad_l4_csum = 0; 1381 1382 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS 1383 memset(&fwd_streams[sm_id]->rx_burst_stats, 0, 1384 sizeof(fwd_streams[sm_id]->rx_burst_stats)); 1385 memset(&fwd_streams[sm_id]->tx_burst_stats, 0, 1386 sizeof(fwd_streams[sm_id]->tx_burst_stats)); 1387 #endif 1388 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES 1389 fwd_streams[sm_id]->core_cycles = 0; 1390 #endif 1391 } 1392 if (with_tx_first) { 1393 port_fwd_begin = tx_only_engine.port_fwd_begin; 1394 if (port_fwd_begin != NULL) { 1395 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) 1396 (*port_fwd_begin)(fwd_ports_ids[i]); 1397 } 1398 while (with_tx_first--) { 1399 launch_packet_forwarding( 1400 run_one_txonly_burst_on_core); 1401 rte_eal_mp_wait_lcore(); 1402 } 1403 port_fwd_end = tx_only_engine.port_fwd_end; 1404 if (port_fwd_end != NULL) { 1405 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) 1406 (*port_fwd_end)(fwd_ports_ids[i]); 1407 } 1408 } 1409 launch_packet_forwarding(start_pkt_forward_on_core); 1410 } 1411 1412 void 1413 stop_packet_forwarding(void) 1414 { 1415 struct rte_eth_stats stats; 1416 struct rte_port *port; 1417 port_fwd_end_t port_fwd_end; 1418 int i; 1419 portid_t pt_id; 1420 streamid_t sm_id; 1421 lcoreid_t lc_id; 1422 uint64_t total_recv; 1423 uint64_t total_xmit; 1424 uint64_t total_rx_dropped; 1425 uint64_t total_tx_dropped; 1426 uint64_t total_rx_nombuf; 1427 uint64_t tx_dropped; 1428 uint64_t rx_bad_ip_csum; 1429 uint64_t rx_bad_l4_csum; 1430 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES 1431 uint64_t fwd_cycles; 1432 #endif 1433 1434 static const char *acc_stats_border = "+++++++++++++++"; 1435 1436 if (test_done) { 1437 printf("Packet forwarding not started\n"); 1438 return; 1439 } 1440 printf("Telling cores to stop..."); 1441 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) 1442 fwd_lcores[lc_id]->stopped = 1; 1443 printf("\nWaiting for lcores to finish...\n"); 1444 rte_eal_mp_wait_lcore(); 1445 port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end; 1446 if (port_fwd_end != NULL) { 1447 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) { 1448 pt_id = fwd_ports_ids[i]; 1449 (*port_fwd_end)(pt_id); 1450 } 1451 } 1452 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES 1453 fwd_cycles = 0; 1454 #endif 1455 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) { 1456 if (cur_fwd_config.nb_fwd_streams > 1457 cur_fwd_config.nb_fwd_ports) { 1458 fwd_stream_stats_display(sm_id); 1459 ports[fwd_streams[sm_id]->tx_port].tx_stream = NULL; 1460 ports[fwd_streams[sm_id]->rx_port].rx_stream = NULL; 1461 } else { 1462 ports[fwd_streams[sm_id]->tx_port].tx_stream = 1463 fwd_streams[sm_id]; 1464 ports[fwd_streams[sm_id]->rx_port].rx_stream = 1465 fwd_streams[sm_id]; 1466 } 1467 tx_dropped = ports[fwd_streams[sm_id]->tx_port].tx_dropped; 1468 tx_dropped = (uint64_t) (tx_dropped + 1469 fwd_streams[sm_id]->fwd_dropped); 1470 ports[fwd_streams[sm_id]->tx_port].tx_dropped = tx_dropped; 1471 1472 rx_bad_ip_csum = 1473 ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum; 1474 rx_bad_ip_csum = (uint64_t) (rx_bad_ip_csum + 1475 fwd_streams[sm_id]->rx_bad_ip_csum); 1476 ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum = 1477 rx_bad_ip_csum; 1478 1479 rx_bad_l4_csum = 1480 ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum; 1481 rx_bad_l4_csum = (uint64_t) (rx_bad_l4_csum + 1482 fwd_streams[sm_id]->rx_bad_l4_csum); 1483 ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum = 1484 rx_bad_l4_csum; 1485 1486 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES 1487 fwd_cycles = (uint64_t) (fwd_cycles + 1488 fwd_streams[sm_id]->core_cycles); 1489 #endif 1490 } 1491 total_recv = 0; 1492 total_xmit = 0; 1493 total_rx_dropped = 0; 1494 total_tx_dropped = 0; 1495 total_rx_nombuf = 0; 1496 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) { 1497 pt_id = fwd_ports_ids[i]; 1498 1499 port = &ports[pt_id]; 1500 rte_eth_stats_get(pt_id, &stats); 1501 stats.ipackets -= port->stats.ipackets; 1502 port->stats.ipackets = 0; 1503 stats.opackets -= port->stats.opackets; 1504 port->stats.opackets = 0; 1505 stats.ibytes -= port->stats.ibytes; 1506 port->stats.ibytes = 0; 1507 stats.obytes -= port->stats.obytes; 1508 port->stats.obytes = 0; 1509 stats.imissed -= port->stats.imissed; 1510 port->stats.imissed = 0; 1511 stats.oerrors -= port->stats.oerrors; 1512 port->stats.oerrors = 0; 1513 stats.rx_nombuf -= port->stats.rx_nombuf; 1514 port->stats.rx_nombuf = 0; 1515 1516 total_recv += stats.ipackets; 1517 total_xmit += stats.opackets; 1518 total_rx_dropped += stats.imissed; 1519 total_tx_dropped += port->tx_dropped; 1520 total_rx_nombuf += stats.rx_nombuf; 1521 1522 fwd_port_stats_display(pt_id, &stats); 1523 } 1524 1525 printf("\n %s Accumulated forward statistics for all ports" 1526 "%s\n", 1527 acc_stats_border, acc_stats_border); 1528 printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: " 1529 "%-"PRIu64"\n" 1530 " TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: " 1531 "%-"PRIu64"\n", 1532 total_recv, total_rx_dropped, total_recv + total_rx_dropped, 1533 total_xmit, total_tx_dropped, total_xmit + total_tx_dropped); 1534 if (total_rx_nombuf > 0) 1535 printf(" RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf); 1536 printf(" %s++++++++++++++++++++++++++++++++++++++++++++++" 1537 "%s\n", 1538 acc_stats_border, acc_stats_border); 1539 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES 1540 if (total_recv > 0) 1541 printf("\n CPU cycles/packet=%u (total cycles=" 1542 "%"PRIu64" / total RX packets=%"PRIu64")\n", 1543 (unsigned int)(fwd_cycles / total_recv), 1544 fwd_cycles, total_recv); 1545 #endif 1546 printf("\nDone.\n"); 1547 test_done = 1; 1548 } 1549 1550 void 1551 dev_set_link_up(portid_t pid) 1552 { 1553 if (rte_eth_dev_set_link_up(pid) < 0) 1554 printf("\nSet link up fail.\n"); 1555 } 1556 1557 void 1558 dev_set_link_down(portid_t pid) 1559 { 1560 if (rte_eth_dev_set_link_down(pid) < 0) 1561 printf("\nSet link down fail.\n"); 1562 } 1563 1564 static int 1565 all_ports_started(void) 1566 { 1567 portid_t pi; 1568 struct rte_port *port; 1569 1570 RTE_ETH_FOREACH_DEV(pi) { 1571 port = &ports[pi]; 1572 /* Check if there is a port which is not started */ 1573 if ((port->port_status != RTE_PORT_STARTED) && 1574 (port->slave_flag == 0)) 1575 return 0; 1576 } 1577 1578 /* No port is not started */ 1579 return 1; 1580 } 1581 1582 int 1583 port_is_stopped(portid_t port_id) 1584 { 1585 struct rte_port *port = &ports[port_id]; 1586 1587 if ((port->port_status != RTE_PORT_STOPPED) && 1588 (port->slave_flag == 0)) 1589 return 0; 1590 return 1; 1591 } 1592 1593 int 1594 all_ports_stopped(void) 1595 { 1596 portid_t pi; 1597 1598 RTE_ETH_FOREACH_DEV(pi) { 1599 if (!port_is_stopped(pi)) 1600 return 0; 1601 } 1602 1603 return 1; 1604 } 1605 1606 int 1607 port_is_started(portid_t port_id) 1608 { 1609 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1610 return 0; 1611 1612 if (ports[port_id].port_status != RTE_PORT_STARTED) 1613 return 0; 1614 1615 return 1; 1616 } 1617 1618 static int 1619 port_is_closed(portid_t port_id) 1620 { 1621 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1622 return 0; 1623 1624 if (ports[port_id].port_status != RTE_PORT_CLOSED) 1625 return 0; 1626 1627 return 1; 1628 } 1629 1630 int 1631 start_port(portid_t pid) 1632 { 1633 int diag, need_check_link_status = -1; 1634 portid_t pi; 1635 queueid_t qi; 1636 struct rte_port *port; 1637 struct ether_addr mac_addr; 1638 enum rte_eth_event_type event_type; 1639 1640 if (port_id_is_invalid(pid, ENABLED_WARN)) 1641 return 0; 1642 1643 if(dcb_config) 1644 dcb_test = 1; 1645 RTE_ETH_FOREACH_DEV(pi) { 1646 if (pid != pi && pid != (portid_t)RTE_PORT_ALL) 1647 continue; 1648 1649 need_check_link_status = 0; 1650 port = &ports[pi]; 1651 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED, 1652 RTE_PORT_HANDLING) == 0) { 1653 printf("Port %d is now not stopped\n", pi); 1654 continue; 1655 } 1656 1657 if (port->need_reconfig > 0) { 1658 port->need_reconfig = 0; 1659 1660 if (flow_isolate_all) { 1661 int ret = port_flow_isolate(pi, 1); 1662 if (ret) { 1663 printf("Failed to apply isolated" 1664 " mode on port %d\n", pi); 1665 return -1; 1666 } 1667 } 1668 1669 printf("Configuring Port %d (socket %u)\n", pi, 1670 port->socket_id); 1671 /* configure port */ 1672 diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq, 1673 &(port->dev_conf)); 1674 if (diag != 0) { 1675 if (rte_atomic16_cmpset(&(port->port_status), 1676 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0) 1677 printf("Port %d can not be set back " 1678 "to stopped\n", pi); 1679 printf("Fail to configure port %d\n", pi); 1680 /* try to reconfigure port next time */ 1681 port->need_reconfig = 1; 1682 return -1; 1683 } 1684 } 1685 if (port->need_reconfig_queues > 0) { 1686 port->need_reconfig_queues = 0; 1687 /* setup tx queues */ 1688 for (qi = 0; qi < nb_txq; qi++) { 1689 if ((numa_support) && 1690 (txring_numa[pi] != NUMA_NO_CONFIG)) 1691 diag = rte_eth_tx_queue_setup(pi, qi, 1692 port->nb_tx_desc[qi], 1693 txring_numa[pi], 1694 &(port->tx_conf[qi])); 1695 else 1696 diag = rte_eth_tx_queue_setup(pi, qi, 1697 port->nb_tx_desc[qi], 1698 port->socket_id, 1699 &(port->tx_conf[qi])); 1700 1701 if (diag == 0) 1702 continue; 1703 1704 /* Fail to setup tx queue, return */ 1705 if (rte_atomic16_cmpset(&(port->port_status), 1706 RTE_PORT_HANDLING, 1707 RTE_PORT_STOPPED) == 0) 1708 printf("Port %d can not be set back " 1709 "to stopped\n", pi); 1710 printf("Fail to configure port %d tx queues\n", 1711 pi); 1712 /* try to reconfigure queues next time */ 1713 port->need_reconfig_queues = 1; 1714 return -1; 1715 } 1716 for (qi = 0; qi < nb_rxq; qi++) { 1717 /* setup rx queues */ 1718 if ((numa_support) && 1719 (rxring_numa[pi] != NUMA_NO_CONFIG)) { 1720 struct rte_mempool * mp = 1721 mbuf_pool_find(rxring_numa[pi]); 1722 if (mp == NULL) { 1723 printf("Failed to setup RX queue:" 1724 "No mempool allocation" 1725 " on the socket %d\n", 1726 rxring_numa[pi]); 1727 return -1; 1728 } 1729 1730 diag = rte_eth_rx_queue_setup(pi, qi, 1731 port->nb_rx_desc[qi], 1732 rxring_numa[pi], 1733 &(port->rx_conf[qi]), 1734 mp); 1735 } else { 1736 struct rte_mempool *mp = 1737 mbuf_pool_find(port->socket_id); 1738 if (mp == NULL) { 1739 printf("Failed to setup RX queue:" 1740 "No mempool allocation" 1741 " on the socket %d\n", 1742 port->socket_id); 1743 return -1; 1744 } 1745 diag = rte_eth_rx_queue_setup(pi, qi, 1746 port->nb_rx_desc[qi], 1747 port->socket_id, 1748 &(port->rx_conf[qi]), 1749 mp); 1750 } 1751 if (diag == 0) 1752 continue; 1753 1754 /* Fail to setup rx queue, return */ 1755 if (rte_atomic16_cmpset(&(port->port_status), 1756 RTE_PORT_HANDLING, 1757 RTE_PORT_STOPPED) == 0) 1758 printf("Port %d can not be set back " 1759 "to stopped\n", pi); 1760 printf("Fail to configure port %d rx queues\n", 1761 pi); 1762 /* try to reconfigure queues next time */ 1763 port->need_reconfig_queues = 1; 1764 return -1; 1765 } 1766 } 1767 1768 /* start port */ 1769 if (rte_eth_dev_start(pi) < 0) { 1770 printf("Fail to start port %d\n", pi); 1771 1772 /* Fail to setup rx queue, return */ 1773 if (rte_atomic16_cmpset(&(port->port_status), 1774 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0) 1775 printf("Port %d can not be set back to " 1776 "stopped\n", pi); 1777 continue; 1778 } 1779 1780 if (rte_atomic16_cmpset(&(port->port_status), 1781 RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0) 1782 printf("Port %d can not be set into started\n", pi); 1783 1784 rte_eth_macaddr_get(pi, &mac_addr); 1785 printf("Port %d: %02X:%02X:%02X:%02X:%02X:%02X\n", pi, 1786 mac_addr.addr_bytes[0], mac_addr.addr_bytes[1], 1787 mac_addr.addr_bytes[2], mac_addr.addr_bytes[3], 1788 mac_addr.addr_bytes[4], mac_addr.addr_bytes[5]); 1789 1790 /* at least one port started, need checking link status */ 1791 need_check_link_status = 1; 1792 } 1793 1794 for (event_type = RTE_ETH_EVENT_UNKNOWN; 1795 event_type < RTE_ETH_EVENT_MAX; 1796 event_type++) { 1797 diag = rte_eth_dev_callback_register(RTE_ETH_ALL, 1798 event_type, 1799 eth_event_callback, 1800 NULL); 1801 if (diag) { 1802 printf("Failed to setup even callback for event %d\n", 1803 event_type); 1804 return -1; 1805 } 1806 } 1807 1808 if (need_check_link_status == 1 && !no_link_check) 1809 check_all_ports_link_status(RTE_PORT_ALL); 1810 else if (need_check_link_status == 0) 1811 printf("Please stop the ports first\n"); 1812 1813 printf("Done\n"); 1814 return 0; 1815 } 1816 1817 void 1818 stop_port(portid_t pid) 1819 { 1820 portid_t pi; 1821 struct rte_port *port; 1822 int need_check_link_status = 0; 1823 1824 if (dcb_test) { 1825 dcb_test = 0; 1826 dcb_config = 0; 1827 } 1828 1829 if (port_id_is_invalid(pid, ENABLED_WARN)) 1830 return; 1831 1832 printf("Stopping ports...\n"); 1833 1834 RTE_ETH_FOREACH_DEV(pi) { 1835 if (pid != pi && pid != (portid_t)RTE_PORT_ALL) 1836 continue; 1837 1838 if (port_is_forwarding(pi) != 0 && test_done == 0) { 1839 printf("Please remove port %d from forwarding configuration.\n", pi); 1840 continue; 1841 } 1842 1843 if (port_is_bonding_slave(pi)) { 1844 printf("Please remove port %d from bonded device.\n", pi); 1845 continue; 1846 } 1847 1848 port = &ports[pi]; 1849 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED, 1850 RTE_PORT_HANDLING) == 0) 1851 continue; 1852 1853 rte_eth_dev_stop(pi); 1854 1855 if (rte_atomic16_cmpset(&(port->port_status), 1856 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0) 1857 printf("Port %d can not be set into stopped\n", pi); 1858 need_check_link_status = 1; 1859 } 1860 if (need_check_link_status && !no_link_check) 1861 check_all_ports_link_status(RTE_PORT_ALL); 1862 1863 printf("Done\n"); 1864 } 1865 1866 void 1867 close_port(portid_t pid) 1868 { 1869 portid_t pi; 1870 struct rte_port *port; 1871 1872 if (port_id_is_invalid(pid, ENABLED_WARN)) 1873 return; 1874 1875 printf("Closing ports...\n"); 1876 1877 RTE_ETH_FOREACH_DEV(pi) { 1878 if (pid != pi && pid != (portid_t)RTE_PORT_ALL) 1879 continue; 1880 1881 if (port_is_forwarding(pi) != 0 && test_done == 0) { 1882 printf("Please remove port %d from forwarding configuration.\n", pi); 1883 continue; 1884 } 1885 1886 if (port_is_bonding_slave(pi)) { 1887 printf("Please remove port %d from bonded device.\n", pi); 1888 continue; 1889 } 1890 1891 port = &ports[pi]; 1892 if (rte_atomic16_cmpset(&(port->port_status), 1893 RTE_PORT_CLOSED, RTE_PORT_CLOSED) == 1) { 1894 printf("Port %d is already closed\n", pi); 1895 continue; 1896 } 1897 1898 if (rte_atomic16_cmpset(&(port->port_status), 1899 RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) { 1900 printf("Port %d is now not stopped\n", pi); 1901 continue; 1902 } 1903 1904 if (port->flow_list) 1905 port_flow_flush(pi); 1906 rte_eth_dev_close(pi); 1907 1908 if (rte_atomic16_cmpset(&(port->port_status), 1909 RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0) 1910 printf("Port %d cannot be set to closed\n", pi); 1911 } 1912 1913 printf("Done\n"); 1914 } 1915 1916 void 1917 reset_port(portid_t pid) 1918 { 1919 int diag; 1920 portid_t pi; 1921 struct rte_port *port; 1922 1923 if (port_id_is_invalid(pid, ENABLED_WARN)) 1924 return; 1925 1926 printf("Resetting ports...\n"); 1927 1928 RTE_ETH_FOREACH_DEV(pi) { 1929 if (pid != pi && pid != (portid_t)RTE_PORT_ALL) 1930 continue; 1931 1932 if (port_is_forwarding(pi) != 0 && test_done == 0) { 1933 printf("Please remove port %d from forwarding " 1934 "configuration.\n", pi); 1935 continue; 1936 } 1937 1938 if (port_is_bonding_slave(pi)) { 1939 printf("Please remove port %d from bonded device.\n", 1940 pi); 1941 continue; 1942 } 1943 1944 diag = rte_eth_dev_reset(pi); 1945 if (diag == 0) { 1946 port = &ports[pi]; 1947 port->need_reconfig = 1; 1948 port->need_reconfig_queues = 1; 1949 } else { 1950 printf("Failed to reset port %d. diag=%d\n", pi, diag); 1951 } 1952 } 1953 1954 printf("Done\n"); 1955 } 1956 1957 static int 1958 eth_dev_event_callback_register(void) 1959 { 1960 int ret; 1961 1962 /* register the device event callback */ 1963 ret = rte_dev_event_callback_register(NULL, 1964 eth_dev_event_callback, NULL); 1965 if (ret) { 1966 printf("Failed to register device event callback\n"); 1967 return -1; 1968 } 1969 1970 return 0; 1971 } 1972 1973 1974 static int 1975 eth_dev_event_callback_unregister(void) 1976 { 1977 int ret; 1978 1979 /* unregister the device event callback */ 1980 ret = rte_dev_event_callback_unregister(NULL, 1981 eth_dev_event_callback, NULL); 1982 if (ret < 0) { 1983 printf("Failed to unregister device event callback\n"); 1984 return -1; 1985 } 1986 1987 return 0; 1988 } 1989 1990 void 1991 attach_port(char *identifier) 1992 { 1993 portid_t pi = 0; 1994 unsigned int socket_id; 1995 1996 printf("Attaching a new port...\n"); 1997 1998 if (identifier == NULL) { 1999 printf("Invalid parameters are specified\n"); 2000 return; 2001 } 2002 2003 if (rte_eth_dev_attach(identifier, &pi)) 2004 return; 2005 2006 socket_id = (unsigned)rte_eth_dev_socket_id(pi); 2007 /* if socket_id is invalid, set to 0 */ 2008 if (check_socket_id(socket_id) < 0) 2009 socket_id = 0; 2010 reconfig(pi, socket_id); 2011 rte_eth_promiscuous_enable(pi); 2012 2013 ports_ids[nb_ports] = pi; 2014 nb_ports = rte_eth_dev_count_avail(); 2015 2016 ports[pi].port_status = RTE_PORT_STOPPED; 2017 2018 update_fwd_ports(pi); 2019 2020 printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports); 2021 printf("Done\n"); 2022 } 2023 2024 void 2025 detach_port(portid_t port_id) 2026 { 2027 char name[RTE_ETH_NAME_MAX_LEN]; 2028 uint16_t i; 2029 2030 printf("Detaching a port...\n"); 2031 2032 if (!port_is_closed(port_id)) { 2033 printf("Please close port first\n"); 2034 return; 2035 } 2036 2037 if (ports[port_id].flow_list) 2038 port_flow_flush(port_id); 2039 2040 if (rte_eth_dev_detach(port_id, name)) { 2041 TESTPMD_LOG(ERR, "Failed to detach port %u\n", port_id); 2042 return; 2043 } 2044 2045 for (i = 0; i < nb_ports; i++) { 2046 if (ports_ids[i] == port_id) { 2047 ports_ids[i] = ports_ids[nb_ports-1]; 2048 ports_ids[nb_ports-1] = 0; 2049 break; 2050 } 2051 } 2052 nb_ports = rte_eth_dev_count_avail(); 2053 2054 update_fwd_ports(RTE_MAX_ETHPORTS); 2055 2056 printf("Port %u is detached. Now total ports is %d\n", 2057 port_id, nb_ports); 2058 printf("Done\n"); 2059 return; 2060 } 2061 2062 void 2063 pmd_test_exit(void) 2064 { 2065 struct rte_device *device; 2066 portid_t pt_id; 2067 int ret; 2068 2069 if (test_done == 0) 2070 stop_packet_forwarding(); 2071 2072 if (ports != NULL) { 2073 no_link_check = 1; 2074 RTE_ETH_FOREACH_DEV(pt_id) { 2075 printf("\nShutting down port %d...\n", pt_id); 2076 fflush(stdout); 2077 stop_port(pt_id); 2078 close_port(pt_id); 2079 2080 /* 2081 * This is a workaround to fix a virtio-user issue that 2082 * requires to call clean-up routine to remove existing 2083 * socket. 2084 * This workaround valid only for testpmd, needs a fix 2085 * valid for all applications. 2086 * TODO: Implement proper resource cleanup 2087 */ 2088 device = rte_eth_devices[pt_id].device; 2089 if (device && !strcmp(device->driver->name, "net_virtio_user")) 2090 detach_port(pt_id); 2091 } 2092 } 2093 2094 if (hot_plug) { 2095 ret = rte_dev_event_monitor_stop(); 2096 if (ret) 2097 RTE_LOG(ERR, EAL, 2098 "fail to stop device event monitor."); 2099 2100 ret = eth_dev_event_callback_unregister(); 2101 if (ret) 2102 RTE_LOG(ERR, EAL, 2103 "fail to unregister all event callbacks."); 2104 } 2105 2106 printf("\nBye...\n"); 2107 } 2108 2109 typedef void (*cmd_func_t)(void); 2110 struct pmd_test_command { 2111 const char *cmd_name; 2112 cmd_func_t cmd_func; 2113 }; 2114 2115 #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0])) 2116 2117 /* Check the link status of all ports in up to 9s, and print them finally */ 2118 static void 2119 check_all_ports_link_status(uint32_t port_mask) 2120 { 2121 #define CHECK_INTERVAL 100 /* 100ms */ 2122 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */ 2123 portid_t portid; 2124 uint8_t count, all_ports_up, print_flag = 0; 2125 struct rte_eth_link link; 2126 2127 printf("Checking link statuses...\n"); 2128 fflush(stdout); 2129 for (count = 0; count <= MAX_CHECK_TIME; count++) { 2130 all_ports_up = 1; 2131 RTE_ETH_FOREACH_DEV(portid) { 2132 if ((port_mask & (1 << portid)) == 0) 2133 continue; 2134 memset(&link, 0, sizeof(link)); 2135 rte_eth_link_get_nowait(portid, &link); 2136 /* print link status if flag set */ 2137 if (print_flag == 1) { 2138 if (link.link_status) 2139 printf( 2140 "Port%d Link Up. speed %u Mbps- %s\n", 2141 portid, link.link_speed, 2142 (link.link_duplex == ETH_LINK_FULL_DUPLEX) ? 2143 ("full-duplex") : ("half-duplex\n")); 2144 else 2145 printf("Port %d Link Down\n", portid); 2146 continue; 2147 } 2148 /* clear all_ports_up flag if any link down */ 2149 if (link.link_status == ETH_LINK_DOWN) { 2150 all_ports_up = 0; 2151 break; 2152 } 2153 } 2154 /* after finally printing all link status, get out */ 2155 if (print_flag == 1) 2156 break; 2157 2158 if (all_ports_up == 0) { 2159 fflush(stdout); 2160 rte_delay_ms(CHECK_INTERVAL); 2161 } 2162 2163 /* set the print_flag if all ports up or timeout */ 2164 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) { 2165 print_flag = 1; 2166 } 2167 2168 if (lsc_interrupt) 2169 break; 2170 } 2171 } 2172 2173 static void 2174 rmv_event_callback(void *arg) 2175 { 2176 int need_to_start = 0; 2177 int org_no_link_check = no_link_check; 2178 portid_t port_id = (intptr_t)arg; 2179 2180 RTE_ETH_VALID_PORTID_OR_RET(port_id); 2181 2182 if (!test_done && port_is_forwarding(port_id)) { 2183 need_to_start = 1; 2184 stop_packet_forwarding(); 2185 } 2186 no_link_check = 1; 2187 stop_port(port_id); 2188 no_link_check = org_no_link_check; 2189 close_port(port_id); 2190 detach_port(port_id); 2191 if (need_to_start) 2192 start_packet_forwarding(0); 2193 } 2194 2195 /* This function is used by the interrupt thread */ 2196 static int 2197 eth_event_callback(portid_t port_id, enum rte_eth_event_type type, void *param, 2198 void *ret_param) 2199 { 2200 static const char * const event_desc[] = { 2201 [RTE_ETH_EVENT_UNKNOWN] = "Unknown", 2202 [RTE_ETH_EVENT_INTR_LSC] = "LSC", 2203 [RTE_ETH_EVENT_QUEUE_STATE] = "Queue state", 2204 [RTE_ETH_EVENT_INTR_RESET] = "Interrupt reset", 2205 [RTE_ETH_EVENT_VF_MBOX] = "VF Mbox", 2206 [RTE_ETH_EVENT_IPSEC] = "IPsec", 2207 [RTE_ETH_EVENT_MACSEC] = "MACsec", 2208 [RTE_ETH_EVENT_INTR_RMV] = "device removal", 2209 [RTE_ETH_EVENT_NEW] = "device probed", 2210 [RTE_ETH_EVENT_DESTROY] = "device released", 2211 [RTE_ETH_EVENT_MAX] = NULL, 2212 }; 2213 2214 RTE_SET_USED(param); 2215 RTE_SET_USED(ret_param); 2216 2217 if (type >= RTE_ETH_EVENT_MAX) { 2218 fprintf(stderr, "\nPort %" PRIu8 ": %s called upon invalid event %d\n", 2219 port_id, __func__, type); 2220 fflush(stderr); 2221 } else if (event_print_mask & (UINT32_C(1) << type)) { 2222 printf("\nPort %" PRIu8 ": %s event\n", port_id, 2223 event_desc[type]); 2224 fflush(stdout); 2225 } 2226 2227 if (port_id_is_invalid(port_id, DISABLED_WARN)) 2228 return 0; 2229 2230 switch (type) { 2231 case RTE_ETH_EVENT_INTR_RMV: 2232 if (rte_eal_alarm_set(100000, 2233 rmv_event_callback, (void *)(intptr_t)port_id)) 2234 fprintf(stderr, "Could not set up deferred device removal\n"); 2235 break; 2236 default: 2237 break; 2238 } 2239 return 0; 2240 } 2241 2242 /* This function is used by the interrupt thread */ 2243 static void 2244 eth_dev_event_callback(char *device_name, enum rte_dev_event_type type, 2245 __rte_unused void *arg) 2246 { 2247 if (type >= RTE_DEV_EVENT_MAX) { 2248 fprintf(stderr, "%s called upon invalid event %d\n", 2249 __func__, type); 2250 fflush(stderr); 2251 } 2252 2253 switch (type) { 2254 case RTE_DEV_EVENT_REMOVE: 2255 RTE_LOG(ERR, EAL, "The device: %s has been removed!\n", 2256 device_name); 2257 /* TODO: After finish failure handle, begin to stop 2258 * packet forward, stop port, close port, detach port. 2259 */ 2260 break; 2261 case RTE_DEV_EVENT_ADD: 2262 RTE_LOG(ERR, EAL, "The device: %s has been added!\n", 2263 device_name); 2264 /* TODO: After finish kernel driver binding, 2265 * begin to attach port. 2266 */ 2267 break; 2268 default: 2269 break; 2270 } 2271 } 2272 2273 static int 2274 set_tx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port) 2275 { 2276 uint16_t i; 2277 int diag; 2278 uint8_t mapping_found = 0; 2279 2280 for (i = 0; i < nb_tx_queue_stats_mappings; i++) { 2281 if ((tx_queue_stats_mappings[i].port_id == port_id) && 2282 (tx_queue_stats_mappings[i].queue_id < nb_txq )) { 2283 diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id, 2284 tx_queue_stats_mappings[i].queue_id, 2285 tx_queue_stats_mappings[i].stats_counter_id); 2286 if (diag != 0) 2287 return diag; 2288 mapping_found = 1; 2289 } 2290 } 2291 if (mapping_found) 2292 port->tx_queue_stats_mapping_enabled = 1; 2293 return 0; 2294 } 2295 2296 static int 2297 set_rx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port) 2298 { 2299 uint16_t i; 2300 int diag; 2301 uint8_t mapping_found = 0; 2302 2303 for (i = 0; i < nb_rx_queue_stats_mappings; i++) { 2304 if ((rx_queue_stats_mappings[i].port_id == port_id) && 2305 (rx_queue_stats_mappings[i].queue_id < nb_rxq )) { 2306 diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id, 2307 rx_queue_stats_mappings[i].queue_id, 2308 rx_queue_stats_mappings[i].stats_counter_id); 2309 if (diag != 0) 2310 return diag; 2311 mapping_found = 1; 2312 } 2313 } 2314 if (mapping_found) 2315 port->rx_queue_stats_mapping_enabled = 1; 2316 return 0; 2317 } 2318 2319 static void 2320 map_port_queue_stats_mapping_registers(portid_t pi, struct rte_port *port) 2321 { 2322 int diag = 0; 2323 2324 diag = set_tx_queue_stats_mapping_registers(pi, port); 2325 if (diag != 0) { 2326 if (diag == -ENOTSUP) { 2327 port->tx_queue_stats_mapping_enabled = 0; 2328 printf("TX queue stats mapping not supported port id=%d\n", pi); 2329 } 2330 else 2331 rte_exit(EXIT_FAILURE, 2332 "set_tx_queue_stats_mapping_registers " 2333 "failed for port id=%d diag=%d\n", 2334 pi, diag); 2335 } 2336 2337 diag = set_rx_queue_stats_mapping_registers(pi, port); 2338 if (diag != 0) { 2339 if (diag == -ENOTSUP) { 2340 port->rx_queue_stats_mapping_enabled = 0; 2341 printf("RX queue stats mapping not supported port id=%d\n", pi); 2342 } 2343 else 2344 rte_exit(EXIT_FAILURE, 2345 "set_rx_queue_stats_mapping_registers " 2346 "failed for port id=%d diag=%d\n", 2347 pi, diag); 2348 } 2349 } 2350 2351 static void 2352 rxtx_port_config(struct rte_port *port) 2353 { 2354 uint16_t qid; 2355 2356 for (qid = 0; qid < nb_rxq; qid++) { 2357 port->rx_conf[qid] = port->dev_info.default_rxconf; 2358 2359 /* Check if any Rx parameters have been passed */ 2360 if (rx_pthresh != RTE_PMD_PARAM_UNSET) 2361 port->rx_conf[qid].rx_thresh.pthresh = rx_pthresh; 2362 2363 if (rx_hthresh != RTE_PMD_PARAM_UNSET) 2364 port->rx_conf[qid].rx_thresh.hthresh = rx_hthresh; 2365 2366 if (rx_wthresh != RTE_PMD_PARAM_UNSET) 2367 port->rx_conf[qid].rx_thresh.wthresh = rx_wthresh; 2368 2369 if (rx_free_thresh != RTE_PMD_PARAM_UNSET) 2370 port->rx_conf[qid].rx_free_thresh = rx_free_thresh; 2371 2372 if (rx_drop_en != RTE_PMD_PARAM_UNSET) 2373 port->rx_conf[qid].rx_drop_en = rx_drop_en; 2374 2375 port->nb_rx_desc[qid] = nb_rxd; 2376 } 2377 2378 for (qid = 0; qid < nb_txq; qid++) { 2379 port->tx_conf[qid] = port->dev_info.default_txconf; 2380 2381 /* Check if any Tx parameters have been passed */ 2382 if (tx_pthresh != RTE_PMD_PARAM_UNSET) 2383 port->tx_conf[qid].tx_thresh.pthresh = tx_pthresh; 2384 2385 if (tx_hthresh != RTE_PMD_PARAM_UNSET) 2386 port->tx_conf[qid].tx_thresh.hthresh = tx_hthresh; 2387 2388 if (tx_wthresh != RTE_PMD_PARAM_UNSET) 2389 port->tx_conf[qid].tx_thresh.wthresh = tx_wthresh; 2390 2391 if (tx_rs_thresh != RTE_PMD_PARAM_UNSET) 2392 port->tx_conf[qid].tx_rs_thresh = tx_rs_thresh; 2393 2394 if (tx_free_thresh != RTE_PMD_PARAM_UNSET) 2395 port->tx_conf[qid].tx_free_thresh = tx_free_thresh; 2396 2397 port->nb_tx_desc[qid] = nb_txd; 2398 } 2399 } 2400 2401 void 2402 init_port_config(void) 2403 { 2404 portid_t pid; 2405 struct rte_port *port; 2406 2407 RTE_ETH_FOREACH_DEV(pid) { 2408 port = &ports[pid]; 2409 port->dev_conf.fdir_conf = fdir_conf; 2410 rte_eth_dev_info_get(pid, &port->dev_info); 2411 if (nb_rxq > 1) { 2412 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL; 2413 port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 2414 rss_hf & port->dev_info.flow_type_rss_offloads; 2415 } else { 2416 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL; 2417 port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0; 2418 } 2419 2420 if (port->dcb_flag == 0) { 2421 if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0) 2422 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_RSS; 2423 else 2424 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE; 2425 } 2426 2427 rxtx_port_config(port); 2428 2429 rte_eth_macaddr_get(pid, &port->eth_addr); 2430 2431 map_port_queue_stats_mapping_registers(pid, port); 2432 #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS 2433 rte_pmd_ixgbe_bypass_init(pid); 2434 #endif 2435 2436 if (lsc_interrupt && 2437 (rte_eth_devices[pid].data->dev_flags & 2438 RTE_ETH_DEV_INTR_LSC)) 2439 port->dev_conf.intr_conf.lsc = 1; 2440 if (rmv_interrupt && 2441 (rte_eth_devices[pid].data->dev_flags & 2442 RTE_ETH_DEV_INTR_RMV)) 2443 port->dev_conf.intr_conf.rmv = 1; 2444 } 2445 } 2446 2447 void set_port_slave_flag(portid_t slave_pid) 2448 { 2449 struct rte_port *port; 2450 2451 port = &ports[slave_pid]; 2452 port->slave_flag = 1; 2453 } 2454 2455 void clear_port_slave_flag(portid_t slave_pid) 2456 { 2457 struct rte_port *port; 2458 2459 port = &ports[slave_pid]; 2460 port->slave_flag = 0; 2461 } 2462 2463 uint8_t port_is_bonding_slave(portid_t slave_pid) 2464 { 2465 struct rte_port *port; 2466 2467 port = &ports[slave_pid]; 2468 if ((rte_eth_devices[slave_pid].data->dev_flags & 2469 RTE_ETH_DEV_BONDED_SLAVE) || (port->slave_flag == 1)) 2470 return 1; 2471 return 0; 2472 } 2473 2474 const uint16_t vlan_tags[] = { 2475 0, 1, 2, 3, 4, 5, 6, 7, 2476 8, 9, 10, 11, 12, 13, 14, 15, 2477 16, 17, 18, 19, 20, 21, 22, 23, 2478 24, 25, 26, 27, 28, 29, 30, 31 2479 }; 2480 2481 static int 2482 get_eth_dcb_conf(portid_t pid, struct rte_eth_conf *eth_conf, 2483 enum dcb_mode_enable dcb_mode, 2484 enum rte_eth_nb_tcs num_tcs, 2485 uint8_t pfc_en) 2486 { 2487 uint8_t i; 2488 int32_t rc; 2489 struct rte_eth_rss_conf rss_conf; 2490 2491 /* 2492 * Builds up the correct configuration for dcb+vt based on the vlan tags array 2493 * given above, and the number of traffic classes available for use. 2494 */ 2495 if (dcb_mode == DCB_VT_ENABLED) { 2496 struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf = 2497 ð_conf->rx_adv_conf.vmdq_dcb_conf; 2498 struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf = 2499 ð_conf->tx_adv_conf.vmdq_dcb_tx_conf; 2500 2501 /* VMDQ+DCB RX and TX configurations */ 2502 vmdq_rx_conf->enable_default_pool = 0; 2503 vmdq_rx_conf->default_pool = 0; 2504 vmdq_rx_conf->nb_queue_pools = 2505 (num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS); 2506 vmdq_tx_conf->nb_queue_pools = 2507 (num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS); 2508 2509 vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools; 2510 for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) { 2511 vmdq_rx_conf->pool_map[i].vlan_id = vlan_tags[i]; 2512 vmdq_rx_conf->pool_map[i].pools = 2513 1 << (i % vmdq_rx_conf->nb_queue_pools); 2514 } 2515 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) { 2516 vmdq_rx_conf->dcb_tc[i] = i % num_tcs; 2517 vmdq_tx_conf->dcb_tc[i] = i % num_tcs; 2518 } 2519 2520 /* set DCB mode of RX and TX of multiple queues */ 2521 eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB; 2522 eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB; 2523 } else { 2524 struct rte_eth_dcb_rx_conf *rx_conf = 2525 ð_conf->rx_adv_conf.dcb_rx_conf; 2526 struct rte_eth_dcb_tx_conf *tx_conf = 2527 ð_conf->tx_adv_conf.dcb_tx_conf; 2528 2529 rc = rte_eth_dev_rss_hash_conf_get(pid, &rss_conf); 2530 if (rc != 0) 2531 return rc; 2532 2533 rx_conf->nb_tcs = num_tcs; 2534 tx_conf->nb_tcs = num_tcs; 2535 2536 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) { 2537 rx_conf->dcb_tc[i] = i % num_tcs; 2538 tx_conf->dcb_tc[i] = i % num_tcs; 2539 } 2540 2541 eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB_RSS; 2542 eth_conf->rx_adv_conf.rss_conf = rss_conf; 2543 eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB; 2544 } 2545 2546 if (pfc_en) 2547 eth_conf->dcb_capability_en = 2548 ETH_DCB_PG_SUPPORT | ETH_DCB_PFC_SUPPORT; 2549 else 2550 eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT; 2551 2552 return 0; 2553 } 2554 2555 int 2556 init_port_dcb_config(portid_t pid, 2557 enum dcb_mode_enable dcb_mode, 2558 enum rte_eth_nb_tcs num_tcs, 2559 uint8_t pfc_en) 2560 { 2561 struct rte_eth_conf port_conf; 2562 struct rte_port *rte_port; 2563 int retval; 2564 uint16_t i; 2565 2566 rte_port = &ports[pid]; 2567 2568 memset(&port_conf, 0, sizeof(struct rte_eth_conf)); 2569 /* Enter DCB configuration status */ 2570 dcb_config = 1; 2571 2572 port_conf.rxmode = rte_port->dev_conf.rxmode; 2573 port_conf.txmode = rte_port->dev_conf.txmode; 2574 2575 /*set configuration of DCB in vt mode and DCB in non-vt mode*/ 2576 retval = get_eth_dcb_conf(pid, &port_conf, dcb_mode, num_tcs, pfc_en); 2577 if (retval < 0) 2578 return retval; 2579 port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER; 2580 2581 /* re-configure the device . */ 2582 rte_eth_dev_configure(pid, nb_rxq, nb_rxq, &port_conf); 2583 2584 rte_eth_dev_info_get(pid, &rte_port->dev_info); 2585 2586 /* If dev_info.vmdq_pool_base is greater than 0, 2587 * the queue id of vmdq pools is started after pf queues. 2588 */ 2589 if (dcb_mode == DCB_VT_ENABLED && 2590 rte_port->dev_info.vmdq_pool_base > 0) { 2591 printf("VMDQ_DCB multi-queue mode is nonsensical" 2592 " for port %d.", pid); 2593 return -1; 2594 } 2595 2596 /* Assume the ports in testpmd have the same dcb capability 2597 * and has the same number of rxq and txq in dcb mode 2598 */ 2599 if (dcb_mode == DCB_VT_ENABLED) { 2600 if (rte_port->dev_info.max_vfs > 0) { 2601 nb_rxq = rte_port->dev_info.nb_rx_queues; 2602 nb_txq = rte_port->dev_info.nb_tx_queues; 2603 } else { 2604 nb_rxq = rte_port->dev_info.max_rx_queues; 2605 nb_txq = rte_port->dev_info.max_tx_queues; 2606 } 2607 } else { 2608 /*if vt is disabled, use all pf queues */ 2609 if (rte_port->dev_info.vmdq_pool_base == 0) { 2610 nb_rxq = rte_port->dev_info.max_rx_queues; 2611 nb_txq = rte_port->dev_info.max_tx_queues; 2612 } else { 2613 nb_rxq = (queueid_t)num_tcs; 2614 nb_txq = (queueid_t)num_tcs; 2615 2616 } 2617 } 2618 rx_free_thresh = 64; 2619 2620 memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf)); 2621 2622 rxtx_port_config(rte_port); 2623 /* VLAN filter */ 2624 rte_port->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER; 2625 for (i = 0; i < RTE_DIM(vlan_tags); i++) 2626 rx_vft_set(pid, vlan_tags[i], 1); 2627 2628 rte_eth_macaddr_get(pid, &rte_port->eth_addr); 2629 map_port_queue_stats_mapping_registers(pid, rte_port); 2630 2631 rte_port->dcb_flag = 1; 2632 2633 return 0; 2634 } 2635 2636 static void 2637 init_port(void) 2638 { 2639 /* Configuration of Ethernet ports. */ 2640 ports = rte_zmalloc("testpmd: ports", 2641 sizeof(struct rte_port) * RTE_MAX_ETHPORTS, 2642 RTE_CACHE_LINE_SIZE); 2643 if (ports == NULL) { 2644 rte_exit(EXIT_FAILURE, 2645 "rte_zmalloc(%d struct rte_port) failed\n", 2646 RTE_MAX_ETHPORTS); 2647 } 2648 } 2649 2650 static void 2651 force_quit(void) 2652 { 2653 pmd_test_exit(); 2654 prompt_exit(); 2655 } 2656 2657 static void 2658 print_stats(void) 2659 { 2660 uint8_t i; 2661 const char clr[] = { 27, '[', '2', 'J', '\0' }; 2662 const char top_left[] = { 27, '[', '1', ';', '1', 'H', '\0' }; 2663 2664 /* Clear screen and move to top left */ 2665 printf("%s%s", clr, top_left); 2666 2667 printf("\nPort statistics ===================================="); 2668 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) 2669 nic_stats_display(fwd_ports_ids[i]); 2670 } 2671 2672 static void 2673 signal_handler(int signum) 2674 { 2675 if (signum == SIGINT || signum == SIGTERM) { 2676 printf("\nSignal %d received, preparing to exit...\n", 2677 signum); 2678 #ifdef RTE_LIBRTE_PDUMP 2679 /* uninitialize packet capture framework */ 2680 rte_pdump_uninit(); 2681 #endif 2682 #ifdef RTE_LIBRTE_LATENCY_STATS 2683 rte_latencystats_uninit(); 2684 #endif 2685 force_quit(); 2686 /* Set flag to indicate the force termination. */ 2687 f_quit = 1; 2688 /* exit with the expected status */ 2689 signal(signum, SIG_DFL); 2690 kill(getpid(), signum); 2691 } 2692 } 2693 2694 int 2695 main(int argc, char** argv) 2696 { 2697 int diag; 2698 portid_t port_id; 2699 uint16_t count; 2700 int ret; 2701 2702 signal(SIGINT, signal_handler); 2703 signal(SIGTERM, signal_handler); 2704 2705 diag = rte_eal_init(argc, argv); 2706 if (diag < 0) 2707 rte_panic("Cannot init EAL\n"); 2708 2709 testpmd_logtype = rte_log_register("testpmd"); 2710 if (testpmd_logtype < 0) 2711 rte_panic("Cannot register log type"); 2712 rte_log_set_level(testpmd_logtype, RTE_LOG_DEBUG); 2713 2714 #ifdef RTE_LIBRTE_PDUMP 2715 /* initialize packet capture framework */ 2716 rte_pdump_init(NULL); 2717 #endif 2718 2719 count = 0; 2720 RTE_ETH_FOREACH_DEV(port_id) { 2721 ports_ids[count] = port_id; 2722 count++; 2723 } 2724 nb_ports = (portid_t) count; 2725 if (nb_ports == 0) 2726 TESTPMD_LOG(WARNING, "No probed ethernet devices\n"); 2727 2728 /* allocate port structures, and init them */ 2729 init_port(); 2730 2731 set_def_fwd_config(); 2732 if (nb_lcores == 0) 2733 rte_panic("Empty set of forwarding logical cores - check the " 2734 "core mask supplied in the command parameters\n"); 2735 2736 /* Bitrate/latency stats disabled by default */ 2737 #ifdef RTE_LIBRTE_BITRATE 2738 bitrate_enabled = 0; 2739 #endif 2740 #ifdef RTE_LIBRTE_LATENCY_STATS 2741 latencystats_enabled = 0; 2742 #endif 2743 2744 /* on FreeBSD, mlockall() is disabled by default */ 2745 #ifdef RTE_EXEC_ENV_BSDAPP 2746 do_mlockall = 0; 2747 #else 2748 do_mlockall = 1; 2749 #endif 2750 2751 argc -= diag; 2752 argv += diag; 2753 if (argc > 1) 2754 launch_args_parse(argc, argv); 2755 2756 if (do_mlockall && mlockall(MCL_CURRENT | MCL_FUTURE)) { 2757 TESTPMD_LOG(NOTICE, "mlockall() failed with error \"%s\"\n", 2758 strerror(errno)); 2759 } 2760 2761 if (tx_first && interactive) 2762 rte_exit(EXIT_FAILURE, "--tx-first cannot be used on " 2763 "interactive mode.\n"); 2764 2765 if (tx_first && lsc_interrupt) { 2766 printf("Warning: lsc_interrupt needs to be off when " 2767 " using tx_first. Disabling.\n"); 2768 lsc_interrupt = 0; 2769 } 2770 2771 if (!nb_rxq && !nb_txq) 2772 printf("Warning: Either rx or tx queues should be non-zero\n"); 2773 2774 if (nb_rxq > 1 && nb_rxq > nb_txq) 2775 printf("Warning: nb_rxq=%d enables RSS configuration, " 2776 "but nb_txq=%d will prevent to fully test it.\n", 2777 nb_rxq, nb_txq); 2778 2779 init_config(); 2780 2781 if (hot_plug) { 2782 /* enable hot plug monitoring */ 2783 ret = rte_dev_event_monitor_start(); 2784 if (ret) { 2785 rte_errno = EINVAL; 2786 return -1; 2787 } 2788 eth_dev_event_callback_register(); 2789 2790 } 2791 2792 if (start_port(RTE_PORT_ALL) != 0) 2793 rte_exit(EXIT_FAILURE, "Start ports failed\n"); 2794 2795 /* set all ports to promiscuous mode by default */ 2796 RTE_ETH_FOREACH_DEV(port_id) 2797 rte_eth_promiscuous_enable(port_id); 2798 2799 /* Init metrics library */ 2800 rte_metrics_init(rte_socket_id()); 2801 2802 #ifdef RTE_LIBRTE_LATENCY_STATS 2803 if (latencystats_enabled != 0) { 2804 int ret = rte_latencystats_init(1, NULL); 2805 if (ret) 2806 printf("Warning: latencystats init()" 2807 " returned error %d\n", ret); 2808 printf("Latencystats running on lcore %d\n", 2809 latencystats_lcore_id); 2810 } 2811 #endif 2812 2813 /* Setup bitrate stats */ 2814 #ifdef RTE_LIBRTE_BITRATE 2815 if (bitrate_enabled != 0) { 2816 bitrate_data = rte_stats_bitrate_create(); 2817 if (bitrate_data == NULL) 2818 rte_exit(EXIT_FAILURE, 2819 "Could not allocate bitrate data.\n"); 2820 rte_stats_bitrate_reg(bitrate_data); 2821 } 2822 #endif 2823 2824 #ifdef RTE_LIBRTE_CMDLINE 2825 if (strlen(cmdline_filename) != 0) 2826 cmdline_read_from_file(cmdline_filename); 2827 2828 if (interactive == 1) { 2829 if (auto_start) { 2830 printf("Start automatic packet forwarding\n"); 2831 start_packet_forwarding(0); 2832 } 2833 prompt(); 2834 pmd_test_exit(); 2835 } else 2836 #endif 2837 { 2838 char c; 2839 int rc; 2840 2841 f_quit = 0; 2842 2843 printf("No commandline core given, start packet forwarding\n"); 2844 start_packet_forwarding(tx_first); 2845 if (stats_period != 0) { 2846 uint64_t prev_time = 0, cur_time, diff_time = 0; 2847 uint64_t timer_period; 2848 2849 /* Convert to number of cycles */ 2850 timer_period = stats_period * rte_get_timer_hz(); 2851 2852 while (f_quit == 0) { 2853 cur_time = rte_get_timer_cycles(); 2854 diff_time += cur_time - prev_time; 2855 2856 if (diff_time >= timer_period) { 2857 print_stats(); 2858 /* Reset the timer */ 2859 diff_time = 0; 2860 } 2861 /* Sleep to avoid unnecessary checks */ 2862 prev_time = cur_time; 2863 sleep(1); 2864 } 2865 } 2866 2867 printf("Press enter to exit\n"); 2868 rc = read(0, &c, 1); 2869 pmd_test_exit(); 2870 if (rc < 0) 2871 return 1; 2872 } 2873 2874 return 0; 2875 } 2876