1 /*- 2 * BSD LICENSE 3 * 4 * Copyright(c) 2010-2016 Intel Corporation. All rights reserved. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of Intel Corporation nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include <stdarg.h> 35 #include <stdio.h> 36 #include <stdlib.h> 37 #include <signal.h> 38 #include <string.h> 39 #include <time.h> 40 #include <fcntl.h> 41 #include <sys/types.h> 42 #include <errno.h> 43 44 #include <sys/queue.h> 45 #include <sys/stat.h> 46 47 #include <stdint.h> 48 #include <unistd.h> 49 #include <inttypes.h> 50 51 #include <rte_common.h> 52 #include <rte_errno.h> 53 #include <rte_byteorder.h> 54 #include <rte_log.h> 55 #include <rte_debug.h> 56 #include <rte_cycles.h> 57 #include <rte_memory.h> 58 #include <rte_memcpy.h> 59 #include <rte_memzone.h> 60 #include <rte_launch.h> 61 #include <rte_eal.h> 62 #include <rte_per_lcore.h> 63 #include <rte_lcore.h> 64 #include <rte_atomic.h> 65 #include <rte_branch_prediction.h> 66 #include <rte_mempool.h> 67 #include <rte_malloc.h> 68 #include <rte_mbuf.h> 69 #include <rte_interrupts.h> 70 #include <rte_pci.h> 71 #include <rte_ether.h> 72 #include <rte_ethdev.h> 73 #include <rte_dev.h> 74 #include <rte_string_fns.h> 75 #ifdef RTE_LIBRTE_PMD_XENVIRT 76 #include <rte_eth_xenvirt.h> 77 #endif 78 #ifdef RTE_LIBRTE_PDUMP 79 #include <rte_pdump.h> 80 #endif 81 82 #include "testpmd.h" 83 84 uint16_t verbose_level = 0; /**< Silent by default. */ 85 86 /* use master core for command line ? */ 87 uint8_t interactive = 0; 88 uint8_t auto_start = 0; 89 90 /* 91 * NUMA support configuration. 92 * When set, the NUMA support attempts to dispatch the allocation of the 93 * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the 94 * probed ports among the CPU sockets 0 and 1. 95 * Otherwise, all memory is allocated from CPU socket 0. 96 */ 97 uint8_t numa_support = 0; /**< No numa support by default */ 98 99 /* 100 * In UMA mode,all memory is allocated from socket 0 if --socket-num is 101 * not configured. 102 */ 103 uint8_t socket_num = UMA_NO_CONFIG; 104 105 /* 106 * Use ANONYMOUS mapped memory (might be not physically continuous) for mbufs. 107 */ 108 uint8_t mp_anon = 0; 109 110 /* 111 * Record the Ethernet address of peer target ports to which packets are 112 * forwarded. 113 * Must be instanciated with the ethernet addresses of peer traffic generator 114 * ports. 115 */ 116 struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS]; 117 portid_t nb_peer_eth_addrs = 0; 118 119 /* 120 * Probed Target Environment. 121 */ 122 struct rte_port *ports; /**< For all probed ethernet ports. */ 123 portid_t nb_ports; /**< Number of probed ethernet ports. */ 124 struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */ 125 lcoreid_t nb_lcores; /**< Number of probed logical cores. */ 126 127 /* 128 * Test Forwarding Configuration. 129 * nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores 130 * nb_fwd_ports <= nb_cfg_ports <= nb_ports 131 */ 132 lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */ 133 lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */ 134 portid_t nb_cfg_ports; /**< Number of configured ports. */ 135 portid_t nb_fwd_ports; /**< Number of forwarding ports. */ 136 137 unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */ 138 portid_t fwd_ports_ids[RTE_MAX_ETHPORTS]; /**< Port ids configuration. */ 139 140 struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */ 141 streamid_t nb_fwd_streams; /**< Is equal to (nb_ports * nb_rxq). */ 142 143 /* 144 * Forwarding engines. 145 */ 146 struct fwd_engine * fwd_engines[] = { 147 &io_fwd_engine, 148 &mac_fwd_engine, 149 &mac_swap_engine, 150 &flow_gen_engine, 151 &rx_only_engine, 152 &tx_only_engine, 153 &csum_fwd_engine, 154 &icmp_echo_engine, 155 #ifdef RTE_LIBRTE_IEEE1588 156 &ieee1588_fwd_engine, 157 #endif 158 NULL, 159 }; 160 161 struct fwd_config cur_fwd_config; 162 struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */ 163 uint32_t retry_enabled; 164 uint32_t burst_tx_delay_time = BURST_TX_WAIT_US; 165 uint32_t burst_tx_retry_num = BURST_TX_RETRIES; 166 167 uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */ 168 uint32_t param_total_num_mbufs = 0; /**< number of mbufs in all pools - if 169 * specified on command-line. */ 170 171 /* 172 * Configuration of packet segments used by the "txonly" processing engine. 173 */ 174 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */ 175 uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = { 176 TXONLY_DEF_PACKET_LEN, 177 }; 178 uint8_t tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */ 179 180 enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF; 181 /**< Split policy for packets to TX. */ 182 183 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */ 184 uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */ 185 186 /* current configuration is in DCB or not,0 means it is not in DCB mode */ 187 uint8_t dcb_config = 0; 188 189 /* Whether the dcb is in testing status */ 190 uint8_t dcb_test = 0; 191 192 /* 193 * Configurable number of RX/TX queues. 194 */ 195 queueid_t nb_rxq = 1; /**< Number of RX queues per port. */ 196 queueid_t nb_txq = 1; /**< Number of TX queues per port. */ 197 198 /* 199 * Configurable number of RX/TX ring descriptors. 200 */ 201 #define RTE_TEST_RX_DESC_DEFAULT 128 202 #define RTE_TEST_TX_DESC_DEFAULT 512 203 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */ 204 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */ 205 206 #define RTE_PMD_PARAM_UNSET -1 207 /* 208 * Configurable values of RX and TX ring threshold registers. 209 */ 210 211 int8_t rx_pthresh = RTE_PMD_PARAM_UNSET; 212 int8_t rx_hthresh = RTE_PMD_PARAM_UNSET; 213 int8_t rx_wthresh = RTE_PMD_PARAM_UNSET; 214 215 int8_t tx_pthresh = RTE_PMD_PARAM_UNSET; 216 int8_t tx_hthresh = RTE_PMD_PARAM_UNSET; 217 int8_t tx_wthresh = RTE_PMD_PARAM_UNSET; 218 219 /* 220 * Configurable value of RX free threshold. 221 */ 222 int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET; 223 224 /* 225 * Configurable value of RX drop enable. 226 */ 227 int8_t rx_drop_en = RTE_PMD_PARAM_UNSET; 228 229 /* 230 * Configurable value of TX free threshold. 231 */ 232 int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET; 233 234 /* 235 * Configurable value of TX RS bit threshold. 236 */ 237 int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET; 238 239 /* 240 * Configurable value of TX queue flags. 241 */ 242 int32_t txq_flags = RTE_PMD_PARAM_UNSET; 243 244 /* 245 * Receive Side Scaling (RSS) configuration. 246 */ 247 uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */ 248 249 /* 250 * Port topology configuration 251 */ 252 uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */ 253 254 /* 255 * Avoids to flush all the RX streams before starts forwarding. 256 */ 257 uint8_t no_flush_rx = 0; /* flush by default */ 258 259 /* 260 * Avoids to check link status when starting/stopping a port. 261 */ 262 uint8_t no_link_check = 0; /* check by default */ 263 264 /* 265 * NIC bypass mode configuration options. 266 */ 267 #ifdef RTE_NIC_BYPASS 268 269 /* The NIC bypass watchdog timeout. */ 270 uint32_t bypass_timeout = RTE_BYPASS_TMT_OFF; 271 272 #endif 273 274 /* default period is 1 second */ 275 static uint64_t timer_period = 1; 276 277 /* 278 * Ethernet device configuration. 279 */ 280 struct rte_eth_rxmode rx_mode = { 281 .max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */ 282 .split_hdr_size = 0, 283 .header_split = 0, /**< Header Split disabled. */ 284 .hw_ip_checksum = 0, /**< IP checksum offload disabled. */ 285 .hw_vlan_filter = 1, /**< VLAN filtering enabled. */ 286 .hw_vlan_strip = 1, /**< VLAN strip enabled. */ 287 .hw_vlan_extend = 0, /**< Extended VLAN disabled. */ 288 .jumbo_frame = 0, /**< Jumbo Frame Support disabled. */ 289 .hw_strip_crc = 0, /**< CRC stripping by hardware disabled. */ 290 }; 291 292 struct rte_fdir_conf fdir_conf = { 293 .mode = RTE_FDIR_MODE_NONE, 294 .pballoc = RTE_FDIR_PBALLOC_64K, 295 .status = RTE_FDIR_REPORT_STATUS, 296 .mask = { 297 .vlan_tci_mask = 0x0, 298 .ipv4_mask = { 299 .src_ip = 0xFFFFFFFF, 300 .dst_ip = 0xFFFFFFFF, 301 }, 302 .ipv6_mask = { 303 .src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF}, 304 .dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF}, 305 }, 306 .src_port_mask = 0xFFFF, 307 .dst_port_mask = 0xFFFF, 308 .mac_addr_byte_mask = 0xFF, 309 .tunnel_type_mask = 1, 310 .tunnel_id_mask = 0xFFFFFFFF, 311 }, 312 .drop_queue = 127, 313 }; 314 315 volatile int test_done = 1; /* stop packet forwarding when set to 1. */ 316 317 struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS]; 318 struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS]; 319 320 struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array; 321 struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array; 322 323 uint16_t nb_tx_queue_stats_mappings = 0; 324 uint16_t nb_rx_queue_stats_mappings = 0; 325 326 unsigned max_socket = 0; 327 328 /* Forward function declarations */ 329 static void map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port); 330 static void check_all_ports_link_status(uint32_t port_mask); 331 332 /* 333 * Check if all the ports are started. 334 * If yes, return positive value. If not, return zero. 335 */ 336 static int all_ports_started(void); 337 338 /* 339 * Find next enabled port 340 */ 341 portid_t 342 find_next_port(portid_t p, struct rte_port *ports, int size) 343 { 344 if (ports == NULL) 345 rte_exit(-EINVAL, "failed to find a next port id\n"); 346 347 while ((p < size) && (ports[p].enabled == 0)) 348 p++; 349 return p; 350 } 351 352 /* 353 * Setup default configuration. 354 */ 355 static void 356 set_default_fwd_lcores_config(void) 357 { 358 unsigned int i; 359 unsigned int nb_lc; 360 unsigned int sock_num; 361 362 nb_lc = 0; 363 for (i = 0; i < RTE_MAX_LCORE; i++) { 364 sock_num = rte_lcore_to_socket_id(i) + 1; 365 if (sock_num > max_socket) { 366 if (sock_num > RTE_MAX_NUMA_NODES) 367 rte_exit(EXIT_FAILURE, "Total sockets greater than %u\n", RTE_MAX_NUMA_NODES); 368 max_socket = sock_num; 369 } 370 if (!rte_lcore_is_enabled(i)) 371 continue; 372 if (i == rte_get_master_lcore()) 373 continue; 374 fwd_lcores_cpuids[nb_lc++] = i; 375 } 376 nb_lcores = (lcoreid_t) nb_lc; 377 nb_cfg_lcores = nb_lcores; 378 nb_fwd_lcores = 1; 379 } 380 381 static void 382 set_def_peer_eth_addrs(void) 383 { 384 portid_t i; 385 386 for (i = 0; i < RTE_MAX_ETHPORTS; i++) { 387 peer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR; 388 peer_eth_addrs[i].addr_bytes[5] = i; 389 } 390 } 391 392 static void 393 set_default_fwd_ports_config(void) 394 { 395 portid_t pt_id; 396 397 for (pt_id = 0; pt_id < nb_ports; pt_id++) 398 fwd_ports_ids[pt_id] = pt_id; 399 400 nb_cfg_ports = nb_ports; 401 nb_fwd_ports = nb_ports; 402 } 403 404 void 405 set_def_fwd_config(void) 406 { 407 set_default_fwd_lcores_config(); 408 set_def_peer_eth_addrs(); 409 set_default_fwd_ports_config(); 410 } 411 412 /* 413 * Configuration initialisation done once at init time. 414 */ 415 static void 416 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf, 417 unsigned int socket_id) 418 { 419 char pool_name[RTE_MEMPOOL_NAMESIZE]; 420 struct rte_mempool *rte_mp = NULL; 421 uint32_t mb_size; 422 423 mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size; 424 mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name)); 425 426 RTE_LOG(INFO, USER1, 427 "create a new mbuf pool <%s>: n=%u, size=%u, socket=%u\n", 428 pool_name, nb_mbuf, mbuf_seg_size, socket_id); 429 430 #ifdef RTE_LIBRTE_PMD_XENVIRT 431 rte_mp = rte_mempool_gntalloc_create(pool_name, nb_mbuf, mb_size, 432 (unsigned) mb_mempool_cache, 433 sizeof(struct rte_pktmbuf_pool_private), 434 rte_pktmbuf_pool_init, NULL, 435 rte_pktmbuf_init, NULL, 436 socket_id, 0); 437 #endif 438 439 /* if the former XEN allocation failed fall back to normal allocation */ 440 if (rte_mp == NULL) { 441 if (mp_anon != 0) { 442 rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf, 443 mb_size, (unsigned) mb_mempool_cache, 444 sizeof(struct rte_pktmbuf_pool_private), 445 socket_id, 0); 446 447 if (rte_mempool_populate_anon(rte_mp) == 0) { 448 rte_mempool_free(rte_mp); 449 rte_mp = NULL; 450 } 451 rte_pktmbuf_pool_init(rte_mp, NULL); 452 rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL); 453 } else { 454 /* wrapper to rte_mempool_create() */ 455 rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf, 456 mb_mempool_cache, 0, mbuf_seg_size, socket_id); 457 } 458 } 459 460 if (rte_mp == NULL) { 461 rte_exit(EXIT_FAILURE, 462 "Creation of mbuf pool for socket %u failed: %s\n", 463 socket_id, rte_strerror(rte_errno)); 464 } else if (verbose_level > 0) { 465 rte_mempool_dump(stdout, rte_mp); 466 } 467 } 468 469 /* 470 * Check given socket id is valid or not with NUMA mode, 471 * if valid, return 0, else return -1 472 */ 473 static int 474 check_socket_id(const unsigned int socket_id) 475 { 476 static int warning_once = 0; 477 478 if (socket_id >= max_socket) { 479 if (!warning_once && numa_support) 480 printf("Warning: NUMA should be configured manually by" 481 " using --port-numa-config and" 482 " --ring-numa-config parameters along with" 483 " --numa.\n"); 484 warning_once = 1; 485 return -1; 486 } 487 return 0; 488 } 489 490 static void 491 init_config(void) 492 { 493 portid_t pid; 494 struct rte_port *port; 495 struct rte_mempool *mbp; 496 unsigned int nb_mbuf_per_pool; 497 lcoreid_t lc_id; 498 uint8_t port_per_socket[RTE_MAX_NUMA_NODES]; 499 500 memset(port_per_socket,0,RTE_MAX_NUMA_NODES); 501 /* Configuration of logical cores. */ 502 fwd_lcores = rte_zmalloc("testpmd: fwd_lcores", 503 sizeof(struct fwd_lcore *) * nb_lcores, 504 RTE_CACHE_LINE_SIZE); 505 if (fwd_lcores == NULL) { 506 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) " 507 "failed\n", nb_lcores); 508 } 509 for (lc_id = 0; lc_id < nb_lcores; lc_id++) { 510 fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore", 511 sizeof(struct fwd_lcore), 512 RTE_CACHE_LINE_SIZE); 513 if (fwd_lcores[lc_id] == NULL) { 514 rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) " 515 "failed\n"); 516 } 517 fwd_lcores[lc_id]->cpuid_idx = lc_id; 518 } 519 520 /* 521 * Create pools of mbuf. 522 * If NUMA support is disabled, create a single pool of mbuf in 523 * socket 0 memory by default. 524 * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1. 525 * 526 * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and 527 * nb_txd can be configured at run time. 528 */ 529 if (param_total_num_mbufs) 530 nb_mbuf_per_pool = param_total_num_mbufs; 531 else { 532 nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX + (nb_lcores * mb_mempool_cache) 533 + RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST; 534 535 if (!numa_support) 536 nb_mbuf_per_pool = 537 (nb_mbuf_per_pool * RTE_MAX_ETHPORTS); 538 } 539 540 if (!numa_support) { 541 if (socket_num == UMA_NO_CONFIG) 542 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0); 543 else 544 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 545 socket_num); 546 } 547 548 FOREACH_PORT(pid, ports) { 549 port = &ports[pid]; 550 rte_eth_dev_info_get(pid, &port->dev_info); 551 552 if (numa_support) { 553 if (port_numa[pid] != NUMA_NO_CONFIG) 554 port_per_socket[port_numa[pid]]++; 555 else { 556 uint32_t socket_id = rte_eth_dev_socket_id(pid); 557 558 /* if socket_id is invalid, set to 0 */ 559 if (check_socket_id(socket_id) < 0) 560 socket_id = 0; 561 port_per_socket[socket_id]++; 562 } 563 } 564 565 /* set flag to initialize port/queue */ 566 port->need_reconfig = 1; 567 port->need_reconfig_queues = 1; 568 } 569 570 if (numa_support) { 571 uint8_t i; 572 unsigned int nb_mbuf; 573 574 if (param_total_num_mbufs) 575 nb_mbuf_per_pool = nb_mbuf_per_pool/nb_ports; 576 577 for (i = 0; i < max_socket; i++) { 578 nb_mbuf = (nb_mbuf_per_pool * RTE_MAX_ETHPORTS); 579 if (nb_mbuf) 580 mbuf_pool_create(mbuf_data_size, 581 nb_mbuf,i); 582 } 583 } 584 init_port_config(); 585 586 /* 587 * Records which Mbuf pool to use by each logical core, if needed. 588 */ 589 for (lc_id = 0; lc_id < nb_lcores; lc_id++) { 590 mbp = mbuf_pool_find( 591 rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id])); 592 593 if (mbp == NULL) 594 mbp = mbuf_pool_find(0); 595 fwd_lcores[lc_id]->mbp = mbp; 596 } 597 598 /* Configuration of packet forwarding streams. */ 599 if (init_fwd_streams() < 0) 600 rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n"); 601 602 fwd_config_setup(); 603 } 604 605 606 void 607 reconfig(portid_t new_port_id, unsigned socket_id) 608 { 609 struct rte_port *port; 610 611 /* Reconfiguration of Ethernet ports. */ 612 port = &ports[new_port_id]; 613 rte_eth_dev_info_get(new_port_id, &port->dev_info); 614 615 /* set flag to initialize port/queue */ 616 port->need_reconfig = 1; 617 port->need_reconfig_queues = 1; 618 port->socket_id = socket_id; 619 620 init_port_config(); 621 } 622 623 624 int 625 init_fwd_streams(void) 626 { 627 portid_t pid; 628 struct rte_port *port; 629 streamid_t sm_id, nb_fwd_streams_new; 630 queueid_t q; 631 632 /* set socket id according to numa or not */ 633 FOREACH_PORT(pid, ports) { 634 port = &ports[pid]; 635 if (nb_rxq > port->dev_info.max_rx_queues) { 636 printf("Fail: nb_rxq(%d) is greater than " 637 "max_rx_queues(%d)\n", nb_rxq, 638 port->dev_info.max_rx_queues); 639 return -1; 640 } 641 if (nb_txq > port->dev_info.max_tx_queues) { 642 printf("Fail: nb_txq(%d) is greater than " 643 "max_tx_queues(%d)\n", nb_txq, 644 port->dev_info.max_tx_queues); 645 return -1; 646 } 647 if (numa_support) { 648 if (port_numa[pid] != NUMA_NO_CONFIG) 649 port->socket_id = port_numa[pid]; 650 else { 651 port->socket_id = rte_eth_dev_socket_id(pid); 652 653 /* if socket_id is invalid, set to 0 */ 654 if (check_socket_id(port->socket_id) < 0) 655 port->socket_id = 0; 656 } 657 } 658 else { 659 if (socket_num == UMA_NO_CONFIG) 660 port->socket_id = 0; 661 else 662 port->socket_id = socket_num; 663 } 664 } 665 666 q = RTE_MAX(nb_rxq, nb_txq); 667 if (q == 0) { 668 printf("Fail: Cannot allocate fwd streams as number of queues is 0\n"); 669 return -1; 670 } 671 nb_fwd_streams_new = (streamid_t)(nb_ports * q); 672 if (nb_fwd_streams_new == nb_fwd_streams) 673 return 0; 674 /* clear the old */ 675 if (fwd_streams != NULL) { 676 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) { 677 if (fwd_streams[sm_id] == NULL) 678 continue; 679 rte_free(fwd_streams[sm_id]); 680 fwd_streams[sm_id] = NULL; 681 } 682 rte_free(fwd_streams); 683 fwd_streams = NULL; 684 } 685 686 /* init new */ 687 nb_fwd_streams = nb_fwd_streams_new; 688 fwd_streams = rte_zmalloc("testpmd: fwd_streams", 689 sizeof(struct fwd_stream *) * nb_fwd_streams, RTE_CACHE_LINE_SIZE); 690 if (fwd_streams == NULL) 691 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_stream *)) " 692 "failed\n", nb_fwd_streams); 693 694 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) { 695 fwd_streams[sm_id] = rte_zmalloc("testpmd: struct fwd_stream", 696 sizeof(struct fwd_stream), RTE_CACHE_LINE_SIZE); 697 if (fwd_streams[sm_id] == NULL) 698 rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_stream)" 699 " failed\n"); 700 } 701 702 return 0; 703 } 704 705 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS 706 static void 707 pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs) 708 { 709 unsigned int total_burst; 710 unsigned int nb_burst; 711 unsigned int burst_stats[3]; 712 uint16_t pktnb_stats[3]; 713 uint16_t nb_pkt; 714 int burst_percent[3]; 715 716 /* 717 * First compute the total number of packet bursts and the 718 * two highest numbers of bursts of the same number of packets. 719 */ 720 total_burst = 0; 721 burst_stats[0] = burst_stats[1] = burst_stats[2] = 0; 722 pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0; 723 for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) { 724 nb_burst = pbs->pkt_burst_spread[nb_pkt]; 725 if (nb_burst == 0) 726 continue; 727 total_burst += nb_burst; 728 if (nb_burst > burst_stats[0]) { 729 burst_stats[1] = burst_stats[0]; 730 pktnb_stats[1] = pktnb_stats[0]; 731 burst_stats[0] = nb_burst; 732 pktnb_stats[0] = nb_pkt; 733 } 734 } 735 if (total_burst == 0) 736 return; 737 burst_percent[0] = (burst_stats[0] * 100) / total_burst; 738 printf(" %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst, 739 burst_percent[0], (int) pktnb_stats[0]); 740 if (burst_stats[0] == total_burst) { 741 printf("]\n"); 742 return; 743 } 744 if (burst_stats[0] + burst_stats[1] == total_burst) { 745 printf(" + %d%% of %d pkts]\n", 746 100 - burst_percent[0], pktnb_stats[1]); 747 return; 748 } 749 burst_percent[1] = (burst_stats[1] * 100) / total_burst; 750 burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]); 751 if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) { 752 printf(" + %d%% of others]\n", 100 - burst_percent[0]); 753 return; 754 } 755 printf(" + %d%% of %d pkts + %d%% of others]\n", 756 burst_percent[1], (int) pktnb_stats[1], burst_percent[2]); 757 } 758 #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */ 759 760 static void 761 fwd_port_stats_display(portid_t port_id, struct rte_eth_stats *stats) 762 { 763 struct rte_port *port; 764 uint8_t i; 765 766 static const char *fwd_stats_border = "----------------------"; 767 768 port = &ports[port_id]; 769 printf("\n %s Forward statistics for port %-2d %s\n", 770 fwd_stats_border, port_id, fwd_stats_border); 771 772 if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) { 773 printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: " 774 "%-"PRIu64"\n", 775 stats->ipackets, stats->imissed, 776 (uint64_t) (stats->ipackets + stats->imissed)); 777 778 if (cur_fwd_eng == &csum_fwd_engine) 779 printf(" Bad-ipcsum: %-14"PRIu64" Bad-l4csum: %-14"PRIu64" \n", 780 port->rx_bad_ip_csum, port->rx_bad_l4_csum); 781 if ((stats->ierrors + stats->rx_nombuf) > 0) { 782 printf(" RX-error: %-"PRIu64"\n", stats->ierrors); 783 printf(" RX-nombufs: %-14"PRIu64"\n", stats->rx_nombuf); 784 } 785 786 printf(" TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: " 787 "%-"PRIu64"\n", 788 stats->opackets, port->tx_dropped, 789 (uint64_t) (stats->opackets + port->tx_dropped)); 790 } 791 else { 792 printf(" RX-packets: %14"PRIu64" RX-dropped:%14"PRIu64" RX-total:" 793 "%14"PRIu64"\n", 794 stats->ipackets, stats->imissed, 795 (uint64_t) (stats->ipackets + stats->imissed)); 796 797 if (cur_fwd_eng == &csum_fwd_engine) 798 printf(" Bad-ipcsum:%14"PRIu64" Bad-l4csum:%14"PRIu64"\n", 799 port->rx_bad_ip_csum, port->rx_bad_l4_csum); 800 if ((stats->ierrors + stats->rx_nombuf) > 0) { 801 printf(" RX-error:%"PRIu64"\n", stats->ierrors); 802 printf(" RX-nombufs: %14"PRIu64"\n", 803 stats->rx_nombuf); 804 } 805 806 printf(" TX-packets: %14"PRIu64" TX-dropped:%14"PRIu64" TX-total:" 807 "%14"PRIu64"\n", 808 stats->opackets, port->tx_dropped, 809 (uint64_t) (stats->opackets + port->tx_dropped)); 810 } 811 812 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS 813 if (port->rx_stream) 814 pkt_burst_stats_display("RX", 815 &port->rx_stream->rx_burst_stats); 816 if (port->tx_stream) 817 pkt_burst_stats_display("TX", 818 &port->tx_stream->tx_burst_stats); 819 #endif 820 821 if (port->rx_queue_stats_mapping_enabled) { 822 printf("\n"); 823 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) { 824 printf(" Stats reg %2d RX-packets:%14"PRIu64 825 " RX-errors:%14"PRIu64 826 " RX-bytes:%14"PRIu64"\n", 827 i, stats->q_ipackets[i], stats->q_errors[i], stats->q_ibytes[i]); 828 } 829 printf("\n"); 830 } 831 if (port->tx_queue_stats_mapping_enabled) { 832 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) { 833 printf(" Stats reg %2d TX-packets:%14"PRIu64 834 " TX-bytes:%14"PRIu64"\n", 835 i, stats->q_opackets[i], stats->q_obytes[i]); 836 } 837 } 838 839 printf(" %s--------------------------------%s\n", 840 fwd_stats_border, fwd_stats_border); 841 } 842 843 static void 844 fwd_stream_stats_display(streamid_t stream_id) 845 { 846 struct fwd_stream *fs; 847 static const char *fwd_top_stats_border = "-------"; 848 849 fs = fwd_streams[stream_id]; 850 if ((fs->rx_packets == 0) && (fs->tx_packets == 0) && 851 (fs->fwd_dropped == 0)) 852 return; 853 printf("\n %s Forward Stats for RX Port=%2d/Queue=%2d -> " 854 "TX Port=%2d/Queue=%2d %s\n", 855 fwd_top_stats_border, fs->rx_port, fs->rx_queue, 856 fs->tx_port, fs->tx_queue, fwd_top_stats_border); 857 printf(" RX-packets: %-14u TX-packets: %-14u TX-dropped: %-14u", 858 fs->rx_packets, fs->tx_packets, fs->fwd_dropped); 859 860 /* if checksum mode */ 861 if (cur_fwd_eng == &csum_fwd_engine) { 862 printf(" RX- bad IP checksum: %-14u Rx- bad L4 checksum: " 863 "%-14u\n", fs->rx_bad_ip_csum, fs->rx_bad_l4_csum); 864 } 865 866 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS 867 pkt_burst_stats_display("RX", &fs->rx_burst_stats); 868 pkt_burst_stats_display("TX", &fs->tx_burst_stats); 869 #endif 870 } 871 872 static void 873 flush_fwd_rx_queues(void) 874 { 875 struct rte_mbuf *pkts_burst[MAX_PKT_BURST]; 876 portid_t rxp; 877 portid_t port_id; 878 queueid_t rxq; 879 uint16_t nb_rx; 880 uint16_t i; 881 uint8_t j; 882 uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0; 883 884 /* convert to number of cycles */ 885 timer_period *= rte_get_timer_hz(); 886 887 for (j = 0; j < 2; j++) { 888 for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) { 889 for (rxq = 0; rxq < nb_rxq; rxq++) { 890 port_id = fwd_ports_ids[rxp]; 891 /** 892 * testpmd can stuck in the below do while loop 893 * if rte_eth_rx_burst() always returns nonzero 894 * packets. So timer is added to exit this loop 895 * after 1sec timer expiry. 896 */ 897 prev_tsc = rte_rdtsc(); 898 do { 899 nb_rx = rte_eth_rx_burst(port_id, rxq, 900 pkts_burst, MAX_PKT_BURST); 901 for (i = 0; i < nb_rx; i++) 902 rte_pktmbuf_free(pkts_burst[i]); 903 904 cur_tsc = rte_rdtsc(); 905 diff_tsc = cur_tsc - prev_tsc; 906 timer_tsc += diff_tsc; 907 } while ((nb_rx > 0) && 908 (timer_tsc < timer_period)); 909 timer_tsc = 0; 910 } 911 } 912 rte_delay_ms(10); /* wait 10 milli-seconds before retrying */ 913 } 914 } 915 916 static void 917 run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd) 918 { 919 struct fwd_stream **fsm; 920 streamid_t nb_fs; 921 streamid_t sm_id; 922 923 fsm = &fwd_streams[fc->stream_idx]; 924 nb_fs = fc->stream_nb; 925 do { 926 for (sm_id = 0; sm_id < nb_fs; sm_id++) 927 (*pkt_fwd)(fsm[sm_id]); 928 } while (! fc->stopped); 929 } 930 931 static int 932 start_pkt_forward_on_core(void *fwd_arg) 933 { 934 run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg, 935 cur_fwd_config.fwd_eng->packet_fwd); 936 return 0; 937 } 938 939 /* 940 * Run the TXONLY packet forwarding engine to send a single burst of packets. 941 * Used to start communication flows in network loopback test configurations. 942 */ 943 static int 944 run_one_txonly_burst_on_core(void *fwd_arg) 945 { 946 struct fwd_lcore *fwd_lc; 947 struct fwd_lcore tmp_lcore; 948 949 fwd_lc = (struct fwd_lcore *) fwd_arg; 950 tmp_lcore = *fwd_lc; 951 tmp_lcore.stopped = 1; 952 run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd); 953 return 0; 954 } 955 956 /* 957 * Launch packet forwarding: 958 * - Setup per-port forwarding context. 959 * - launch logical cores with their forwarding configuration. 960 */ 961 static void 962 launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore) 963 { 964 port_fwd_begin_t port_fwd_begin; 965 unsigned int i; 966 unsigned int lc_id; 967 int diag; 968 969 port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin; 970 if (port_fwd_begin != NULL) { 971 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) 972 (*port_fwd_begin)(fwd_ports_ids[i]); 973 } 974 for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) { 975 lc_id = fwd_lcores_cpuids[i]; 976 if ((interactive == 0) || (lc_id != rte_lcore_id())) { 977 fwd_lcores[i]->stopped = 0; 978 diag = rte_eal_remote_launch(pkt_fwd_on_lcore, 979 fwd_lcores[i], lc_id); 980 if (diag != 0) 981 printf("launch lcore %u failed - diag=%d\n", 982 lc_id, diag); 983 } 984 } 985 } 986 987 /* 988 * Launch packet forwarding configuration. 989 */ 990 void 991 start_packet_forwarding(int with_tx_first) 992 { 993 port_fwd_begin_t port_fwd_begin; 994 port_fwd_end_t port_fwd_end; 995 struct rte_port *port; 996 unsigned int i; 997 portid_t pt_id; 998 streamid_t sm_id; 999 1000 if (strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") == 0 && !nb_rxq) 1001 rte_exit(EXIT_FAILURE, "rxq are 0, cannot use rxonly fwd mode\n"); 1002 1003 if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 && !nb_txq) 1004 rte_exit(EXIT_FAILURE, "txq are 0, cannot use txonly fwd mode\n"); 1005 1006 if ((strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") != 0 && 1007 strcmp(cur_fwd_eng->fwd_mode_name, "txonly") != 0) && 1008 (!nb_rxq || !nb_txq)) 1009 rte_exit(EXIT_FAILURE, 1010 "Either rxq or txq are 0, cannot use %s fwd mode\n", 1011 cur_fwd_eng->fwd_mode_name); 1012 1013 if (all_ports_started() == 0) { 1014 printf("Not all ports were started\n"); 1015 return; 1016 } 1017 if (test_done == 0) { 1018 printf("Packet forwarding already started\n"); 1019 return; 1020 } 1021 1022 if (init_fwd_streams() < 0) { 1023 printf("Fail from init_fwd_streams()\n"); 1024 return; 1025 } 1026 1027 if(dcb_test) { 1028 for (i = 0; i < nb_fwd_ports; i++) { 1029 pt_id = fwd_ports_ids[i]; 1030 port = &ports[pt_id]; 1031 if (!port->dcb_flag) { 1032 printf("In DCB mode, all forwarding ports must " 1033 "be configured in this mode.\n"); 1034 return; 1035 } 1036 } 1037 if (nb_fwd_lcores == 1) { 1038 printf("In DCB mode,the nb forwarding cores " 1039 "should be larger than 1.\n"); 1040 return; 1041 } 1042 } 1043 test_done = 0; 1044 1045 if(!no_flush_rx) 1046 flush_fwd_rx_queues(); 1047 1048 fwd_config_setup(); 1049 pkt_fwd_config_display(&cur_fwd_config); 1050 rxtx_config_display(); 1051 1052 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) { 1053 pt_id = fwd_ports_ids[i]; 1054 port = &ports[pt_id]; 1055 rte_eth_stats_get(pt_id, &port->stats); 1056 port->tx_dropped = 0; 1057 1058 map_port_queue_stats_mapping_registers(pt_id, port); 1059 } 1060 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) { 1061 fwd_streams[sm_id]->rx_packets = 0; 1062 fwd_streams[sm_id]->tx_packets = 0; 1063 fwd_streams[sm_id]->fwd_dropped = 0; 1064 fwd_streams[sm_id]->rx_bad_ip_csum = 0; 1065 fwd_streams[sm_id]->rx_bad_l4_csum = 0; 1066 1067 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS 1068 memset(&fwd_streams[sm_id]->rx_burst_stats, 0, 1069 sizeof(fwd_streams[sm_id]->rx_burst_stats)); 1070 memset(&fwd_streams[sm_id]->tx_burst_stats, 0, 1071 sizeof(fwd_streams[sm_id]->tx_burst_stats)); 1072 #endif 1073 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES 1074 fwd_streams[sm_id]->core_cycles = 0; 1075 #endif 1076 } 1077 if (with_tx_first) { 1078 port_fwd_begin = tx_only_engine.port_fwd_begin; 1079 if (port_fwd_begin != NULL) { 1080 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) 1081 (*port_fwd_begin)(fwd_ports_ids[i]); 1082 } 1083 while (with_tx_first--) { 1084 launch_packet_forwarding( 1085 run_one_txonly_burst_on_core); 1086 rte_eal_mp_wait_lcore(); 1087 } 1088 port_fwd_end = tx_only_engine.port_fwd_end; 1089 if (port_fwd_end != NULL) { 1090 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) 1091 (*port_fwd_end)(fwd_ports_ids[i]); 1092 } 1093 } 1094 launch_packet_forwarding(start_pkt_forward_on_core); 1095 } 1096 1097 void 1098 stop_packet_forwarding(void) 1099 { 1100 struct rte_eth_stats stats; 1101 struct rte_port *port; 1102 port_fwd_end_t port_fwd_end; 1103 int i; 1104 portid_t pt_id; 1105 streamid_t sm_id; 1106 lcoreid_t lc_id; 1107 uint64_t total_recv; 1108 uint64_t total_xmit; 1109 uint64_t total_rx_dropped; 1110 uint64_t total_tx_dropped; 1111 uint64_t total_rx_nombuf; 1112 uint64_t tx_dropped; 1113 uint64_t rx_bad_ip_csum; 1114 uint64_t rx_bad_l4_csum; 1115 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES 1116 uint64_t fwd_cycles; 1117 #endif 1118 static const char *acc_stats_border = "+++++++++++++++"; 1119 1120 if (test_done) { 1121 printf("Packet forwarding not started\n"); 1122 return; 1123 } 1124 printf("Telling cores to stop..."); 1125 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) 1126 fwd_lcores[lc_id]->stopped = 1; 1127 printf("\nWaiting for lcores to finish...\n"); 1128 rte_eal_mp_wait_lcore(); 1129 port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end; 1130 if (port_fwd_end != NULL) { 1131 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) { 1132 pt_id = fwd_ports_ids[i]; 1133 (*port_fwd_end)(pt_id); 1134 } 1135 } 1136 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES 1137 fwd_cycles = 0; 1138 #endif 1139 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) { 1140 if (cur_fwd_config.nb_fwd_streams > 1141 cur_fwd_config.nb_fwd_ports) { 1142 fwd_stream_stats_display(sm_id); 1143 ports[fwd_streams[sm_id]->tx_port].tx_stream = NULL; 1144 ports[fwd_streams[sm_id]->rx_port].rx_stream = NULL; 1145 } else { 1146 ports[fwd_streams[sm_id]->tx_port].tx_stream = 1147 fwd_streams[sm_id]; 1148 ports[fwd_streams[sm_id]->rx_port].rx_stream = 1149 fwd_streams[sm_id]; 1150 } 1151 tx_dropped = ports[fwd_streams[sm_id]->tx_port].tx_dropped; 1152 tx_dropped = (uint64_t) (tx_dropped + 1153 fwd_streams[sm_id]->fwd_dropped); 1154 ports[fwd_streams[sm_id]->tx_port].tx_dropped = tx_dropped; 1155 1156 rx_bad_ip_csum = 1157 ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum; 1158 rx_bad_ip_csum = (uint64_t) (rx_bad_ip_csum + 1159 fwd_streams[sm_id]->rx_bad_ip_csum); 1160 ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum = 1161 rx_bad_ip_csum; 1162 1163 rx_bad_l4_csum = 1164 ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum; 1165 rx_bad_l4_csum = (uint64_t) (rx_bad_l4_csum + 1166 fwd_streams[sm_id]->rx_bad_l4_csum); 1167 ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum = 1168 rx_bad_l4_csum; 1169 1170 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES 1171 fwd_cycles = (uint64_t) (fwd_cycles + 1172 fwd_streams[sm_id]->core_cycles); 1173 #endif 1174 } 1175 total_recv = 0; 1176 total_xmit = 0; 1177 total_rx_dropped = 0; 1178 total_tx_dropped = 0; 1179 total_rx_nombuf = 0; 1180 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) { 1181 pt_id = fwd_ports_ids[i]; 1182 1183 port = &ports[pt_id]; 1184 rte_eth_stats_get(pt_id, &stats); 1185 stats.ipackets -= port->stats.ipackets; 1186 port->stats.ipackets = 0; 1187 stats.opackets -= port->stats.opackets; 1188 port->stats.opackets = 0; 1189 stats.ibytes -= port->stats.ibytes; 1190 port->stats.ibytes = 0; 1191 stats.obytes -= port->stats.obytes; 1192 port->stats.obytes = 0; 1193 stats.imissed -= port->stats.imissed; 1194 port->stats.imissed = 0; 1195 stats.oerrors -= port->stats.oerrors; 1196 port->stats.oerrors = 0; 1197 stats.rx_nombuf -= port->stats.rx_nombuf; 1198 port->stats.rx_nombuf = 0; 1199 1200 total_recv += stats.ipackets; 1201 total_xmit += stats.opackets; 1202 total_rx_dropped += stats.imissed; 1203 total_tx_dropped += port->tx_dropped; 1204 total_rx_nombuf += stats.rx_nombuf; 1205 1206 fwd_port_stats_display(pt_id, &stats); 1207 } 1208 printf("\n %s Accumulated forward statistics for all ports" 1209 "%s\n", 1210 acc_stats_border, acc_stats_border); 1211 printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: " 1212 "%-"PRIu64"\n" 1213 " TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: " 1214 "%-"PRIu64"\n", 1215 total_recv, total_rx_dropped, total_recv + total_rx_dropped, 1216 total_xmit, total_tx_dropped, total_xmit + total_tx_dropped); 1217 if (total_rx_nombuf > 0) 1218 printf(" RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf); 1219 printf(" %s++++++++++++++++++++++++++++++++++++++++++++++" 1220 "%s\n", 1221 acc_stats_border, acc_stats_border); 1222 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES 1223 if (total_recv > 0) 1224 printf("\n CPU cycles/packet=%u (total cycles=" 1225 "%"PRIu64" / total RX packets=%"PRIu64")\n", 1226 (unsigned int)(fwd_cycles / total_recv), 1227 fwd_cycles, total_recv); 1228 #endif 1229 printf("\nDone.\n"); 1230 test_done = 1; 1231 } 1232 1233 void 1234 dev_set_link_up(portid_t pid) 1235 { 1236 if (rte_eth_dev_set_link_up((uint8_t)pid) < 0) 1237 printf("\nSet link up fail.\n"); 1238 } 1239 1240 void 1241 dev_set_link_down(portid_t pid) 1242 { 1243 if (rte_eth_dev_set_link_down((uint8_t)pid) < 0) 1244 printf("\nSet link down fail.\n"); 1245 } 1246 1247 static int 1248 all_ports_started(void) 1249 { 1250 portid_t pi; 1251 struct rte_port *port; 1252 1253 FOREACH_PORT(pi, ports) { 1254 port = &ports[pi]; 1255 /* Check if there is a port which is not started */ 1256 if ((port->port_status != RTE_PORT_STARTED) && 1257 (port->slave_flag == 0)) 1258 return 0; 1259 } 1260 1261 /* No port is not started */ 1262 return 1; 1263 } 1264 1265 int 1266 all_ports_stopped(void) 1267 { 1268 portid_t pi; 1269 struct rte_port *port; 1270 1271 FOREACH_PORT(pi, ports) { 1272 port = &ports[pi]; 1273 if ((port->port_status != RTE_PORT_STOPPED) && 1274 (port->slave_flag == 0)) 1275 return 0; 1276 } 1277 1278 return 1; 1279 } 1280 1281 int 1282 port_is_started(portid_t port_id) 1283 { 1284 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1285 return 0; 1286 1287 if (ports[port_id].port_status != RTE_PORT_STARTED) 1288 return 0; 1289 1290 return 1; 1291 } 1292 1293 static int 1294 port_is_closed(portid_t port_id) 1295 { 1296 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1297 return 0; 1298 1299 if (ports[port_id].port_status != RTE_PORT_CLOSED) 1300 return 0; 1301 1302 return 1; 1303 } 1304 1305 int 1306 start_port(portid_t pid) 1307 { 1308 int diag, need_check_link_status = -1; 1309 portid_t pi; 1310 queueid_t qi; 1311 struct rte_port *port; 1312 struct ether_addr mac_addr; 1313 1314 if (port_id_is_invalid(pid, ENABLED_WARN)) 1315 return 0; 1316 1317 if(dcb_config) 1318 dcb_test = 1; 1319 FOREACH_PORT(pi, ports) { 1320 if (pid != pi && pid != (portid_t)RTE_PORT_ALL) 1321 continue; 1322 1323 need_check_link_status = 0; 1324 port = &ports[pi]; 1325 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED, 1326 RTE_PORT_HANDLING) == 0) { 1327 printf("Port %d is now not stopped\n", pi); 1328 continue; 1329 } 1330 1331 if (port->need_reconfig > 0) { 1332 port->need_reconfig = 0; 1333 1334 printf("Configuring Port %d (socket %u)\n", pi, 1335 port->socket_id); 1336 /* configure port */ 1337 diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq, 1338 &(port->dev_conf)); 1339 if (diag != 0) { 1340 if (rte_atomic16_cmpset(&(port->port_status), 1341 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0) 1342 printf("Port %d can not be set back " 1343 "to stopped\n", pi); 1344 printf("Fail to configure port %d\n", pi); 1345 /* try to reconfigure port next time */ 1346 port->need_reconfig = 1; 1347 return -1; 1348 } 1349 } 1350 if (port->need_reconfig_queues > 0) { 1351 port->need_reconfig_queues = 0; 1352 /* setup tx queues */ 1353 for (qi = 0; qi < nb_txq; qi++) { 1354 if ((numa_support) && 1355 (txring_numa[pi] != NUMA_NO_CONFIG)) 1356 diag = rte_eth_tx_queue_setup(pi, qi, 1357 nb_txd,txring_numa[pi], 1358 &(port->tx_conf)); 1359 else 1360 diag = rte_eth_tx_queue_setup(pi, qi, 1361 nb_txd,port->socket_id, 1362 &(port->tx_conf)); 1363 1364 if (diag == 0) 1365 continue; 1366 1367 /* Fail to setup tx queue, return */ 1368 if (rte_atomic16_cmpset(&(port->port_status), 1369 RTE_PORT_HANDLING, 1370 RTE_PORT_STOPPED) == 0) 1371 printf("Port %d can not be set back " 1372 "to stopped\n", pi); 1373 printf("Fail to configure port %d tx queues\n", pi); 1374 /* try to reconfigure queues next time */ 1375 port->need_reconfig_queues = 1; 1376 return -1; 1377 } 1378 /* setup rx queues */ 1379 for (qi = 0; qi < nb_rxq; qi++) { 1380 if ((numa_support) && 1381 (rxring_numa[pi] != NUMA_NO_CONFIG)) { 1382 struct rte_mempool * mp = 1383 mbuf_pool_find(rxring_numa[pi]); 1384 if (mp == NULL) { 1385 printf("Failed to setup RX queue:" 1386 "No mempool allocation" 1387 " on the socket %d\n", 1388 rxring_numa[pi]); 1389 return -1; 1390 } 1391 1392 diag = rte_eth_rx_queue_setup(pi, qi, 1393 nb_rxd,rxring_numa[pi], 1394 &(port->rx_conf),mp); 1395 } else { 1396 struct rte_mempool *mp = 1397 mbuf_pool_find(port->socket_id); 1398 if (mp == NULL) { 1399 printf("Failed to setup RX queue:" 1400 "No mempool allocation" 1401 " on the socket %d\n", 1402 port->socket_id); 1403 return -1; 1404 } 1405 diag = rte_eth_rx_queue_setup(pi, qi, 1406 nb_rxd,port->socket_id, 1407 &(port->rx_conf), mp); 1408 } 1409 if (diag == 0) 1410 continue; 1411 1412 /* Fail to setup rx queue, return */ 1413 if (rte_atomic16_cmpset(&(port->port_status), 1414 RTE_PORT_HANDLING, 1415 RTE_PORT_STOPPED) == 0) 1416 printf("Port %d can not be set back " 1417 "to stopped\n", pi); 1418 printf("Fail to configure port %d rx queues\n", pi); 1419 /* try to reconfigure queues next time */ 1420 port->need_reconfig_queues = 1; 1421 return -1; 1422 } 1423 } 1424 /* start port */ 1425 if (rte_eth_dev_start(pi) < 0) { 1426 printf("Fail to start port %d\n", pi); 1427 1428 /* Fail to setup rx queue, return */ 1429 if (rte_atomic16_cmpset(&(port->port_status), 1430 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0) 1431 printf("Port %d can not be set back to " 1432 "stopped\n", pi); 1433 continue; 1434 } 1435 1436 if (rte_atomic16_cmpset(&(port->port_status), 1437 RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0) 1438 printf("Port %d can not be set into started\n", pi); 1439 1440 rte_eth_macaddr_get(pi, &mac_addr); 1441 printf("Port %d: %02X:%02X:%02X:%02X:%02X:%02X\n", pi, 1442 mac_addr.addr_bytes[0], mac_addr.addr_bytes[1], 1443 mac_addr.addr_bytes[2], mac_addr.addr_bytes[3], 1444 mac_addr.addr_bytes[4], mac_addr.addr_bytes[5]); 1445 1446 /* at least one port started, need checking link status */ 1447 need_check_link_status = 1; 1448 } 1449 1450 if (need_check_link_status == 1 && !no_link_check) 1451 check_all_ports_link_status(RTE_PORT_ALL); 1452 else if (need_check_link_status == 0) 1453 printf("Please stop the ports first\n"); 1454 1455 printf("Done\n"); 1456 return 0; 1457 } 1458 1459 void 1460 stop_port(portid_t pid) 1461 { 1462 portid_t pi; 1463 struct rte_port *port; 1464 int need_check_link_status = 0; 1465 1466 if (dcb_test) { 1467 dcb_test = 0; 1468 dcb_config = 0; 1469 } 1470 1471 if (port_id_is_invalid(pid, ENABLED_WARN)) 1472 return; 1473 1474 printf("Stopping ports...\n"); 1475 1476 FOREACH_PORT(pi, ports) { 1477 if (pid != pi && pid != (portid_t)RTE_PORT_ALL) 1478 continue; 1479 1480 if (port_is_forwarding(pi) != 0 && test_done == 0) { 1481 printf("Please remove port %d from forwarding configuration.\n", pi); 1482 continue; 1483 } 1484 1485 if (port_is_bonding_slave(pi)) { 1486 printf("Please remove port %d from bonded device.\n", pi); 1487 continue; 1488 } 1489 1490 port = &ports[pi]; 1491 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED, 1492 RTE_PORT_HANDLING) == 0) 1493 continue; 1494 1495 rte_eth_dev_stop(pi); 1496 1497 if (rte_atomic16_cmpset(&(port->port_status), 1498 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0) 1499 printf("Port %d can not be set into stopped\n", pi); 1500 need_check_link_status = 1; 1501 } 1502 if (need_check_link_status && !no_link_check) 1503 check_all_ports_link_status(RTE_PORT_ALL); 1504 1505 printf("Done\n"); 1506 } 1507 1508 void 1509 close_port(portid_t pid) 1510 { 1511 portid_t pi; 1512 struct rte_port *port; 1513 1514 if (port_id_is_invalid(pid, ENABLED_WARN)) 1515 return; 1516 1517 printf("Closing ports...\n"); 1518 1519 FOREACH_PORT(pi, ports) { 1520 if (pid != pi && pid != (portid_t)RTE_PORT_ALL) 1521 continue; 1522 1523 if (port_is_forwarding(pi) != 0 && test_done == 0) { 1524 printf("Please remove port %d from forwarding configuration.\n", pi); 1525 continue; 1526 } 1527 1528 if (port_is_bonding_slave(pi)) { 1529 printf("Please remove port %d from bonded device.\n", pi); 1530 continue; 1531 } 1532 1533 port = &ports[pi]; 1534 if (rte_atomic16_cmpset(&(port->port_status), 1535 RTE_PORT_CLOSED, RTE_PORT_CLOSED) == 1) { 1536 printf("Port %d is already closed\n", pi); 1537 continue; 1538 } 1539 1540 if (rte_atomic16_cmpset(&(port->port_status), 1541 RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) { 1542 printf("Port %d is now not stopped\n", pi); 1543 continue; 1544 } 1545 1546 rte_eth_dev_close(pi); 1547 1548 if (rte_atomic16_cmpset(&(port->port_status), 1549 RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0) 1550 printf("Port %d cannot be set to closed\n", pi); 1551 } 1552 1553 printf("Done\n"); 1554 } 1555 1556 void 1557 attach_port(char *identifier) 1558 { 1559 portid_t pi = 0; 1560 unsigned int socket_id; 1561 1562 printf("Attaching a new port...\n"); 1563 1564 if (identifier == NULL) { 1565 printf("Invalid parameters are specified\n"); 1566 return; 1567 } 1568 1569 if (rte_eth_dev_attach(identifier, &pi)) 1570 return; 1571 1572 ports[pi].enabled = 1; 1573 socket_id = (unsigned)rte_eth_dev_socket_id(pi); 1574 /* if socket_id is invalid, set to 0 */ 1575 if (check_socket_id(socket_id) < 0) 1576 socket_id = 0; 1577 reconfig(pi, socket_id); 1578 rte_eth_promiscuous_enable(pi); 1579 1580 nb_ports = rte_eth_dev_count(); 1581 1582 ports[pi].port_status = RTE_PORT_STOPPED; 1583 1584 printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports); 1585 printf("Done\n"); 1586 } 1587 1588 void 1589 detach_port(uint8_t port_id) 1590 { 1591 char name[RTE_ETH_NAME_MAX_LEN]; 1592 1593 printf("Detaching a port...\n"); 1594 1595 if (!port_is_closed(port_id)) { 1596 printf("Please close port first\n"); 1597 return; 1598 } 1599 1600 if (rte_eth_dev_detach(port_id, name)) 1601 return; 1602 1603 ports[port_id].enabled = 0; 1604 nb_ports = rte_eth_dev_count(); 1605 1606 printf("Port '%s' is detached. Now total ports is %d\n", 1607 name, nb_ports); 1608 printf("Done\n"); 1609 return; 1610 } 1611 1612 void 1613 pmd_test_exit(void) 1614 { 1615 portid_t pt_id; 1616 1617 if (test_done == 0) 1618 stop_packet_forwarding(); 1619 1620 if (ports != NULL) { 1621 no_link_check = 1; 1622 FOREACH_PORT(pt_id, ports) { 1623 printf("\nShutting down port %d...\n", pt_id); 1624 fflush(stdout); 1625 stop_port(pt_id); 1626 close_port(pt_id); 1627 } 1628 } 1629 printf("\nBye...\n"); 1630 } 1631 1632 typedef void (*cmd_func_t)(void); 1633 struct pmd_test_command { 1634 const char *cmd_name; 1635 cmd_func_t cmd_func; 1636 }; 1637 1638 #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0])) 1639 1640 /* Check the link status of all ports in up to 9s, and print them finally */ 1641 static void 1642 check_all_ports_link_status(uint32_t port_mask) 1643 { 1644 #define CHECK_INTERVAL 100 /* 100ms */ 1645 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */ 1646 uint8_t portid, count, all_ports_up, print_flag = 0; 1647 struct rte_eth_link link; 1648 1649 printf("Checking link statuses...\n"); 1650 fflush(stdout); 1651 for (count = 0; count <= MAX_CHECK_TIME; count++) { 1652 all_ports_up = 1; 1653 FOREACH_PORT(portid, ports) { 1654 if ((port_mask & (1 << portid)) == 0) 1655 continue; 1656 memset(&link, 0, sizeof(link)); 1657 rte_eth_link_get_nowait(portid, &link); 1658 /* print link status if flag set */ 1659 if (print_flag == 1) { 1660 if (link.link_status) 1661 printf("Port %d Link Up - speed %u " 1662 "Mbps - %s\n", (uint8_t)portid, 1663 (unsigned)link.link_speed, 1664 (link.link_duplex == ETH_LINK_FULL_DUPLEX) ? 1665 ("full-duplex") : ("half-duplex\n")); 1666 else 1667 printf("Port %d Link Down\n", 1668 (uint8_t)portid); 1669 continue; 1670 } 1671 /* clear all_ports_up flag if any link down */ 1672 if (link.link_status == ETH_LINK_DOWN) { 1673 all_ports_up = 0; 1674 break; 1675 } 1676 } 1677 /* after finally printing all link status, get out */ 1678 if (print_flag == 1) 1679 break; 1680 1681 if (all_ports_up == 0) { 1682 fflush(stdout); 1683 rte_delay_ms(CHECK_INTERVAL); 1684 } 1685 1686 /* set the print_flag if all ports up or timeout */ 1687 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) { 1688 print_flag = 1; 1689 } 1690 } 1691 } 1692 1693 static int 1694 set_tx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port) 1695 { 1696 uint16_t i; 1697 int diag; 1698 uint8_t mapping_found = 0; 1699 1700 for (i = 0; i < nb_tx_queue_stats_mappings; i++) { 1701 if ((tx_queue_stats_mappings[i].port_id == port_id) && 1702 (tx_queue_stats_mappings[i].queue_id < nb_txq )) { 1703 diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id, 1704 tx_queue_stats_mappings[i].queue_id, 1705 tx_queue_stats_mappings[i].stats_counter_id); 1706 if (diag != 0) 1707 return diag; 1708 mapping_found = 1; 1709 } 1710 } 1711 if (mapping_found) 1712 port->tx_queue_stats_mapping_enabled = 1; 1713 return 0; 1714 } 1715 1716 static int 1717 set_rx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port) 1718 { 1719 uint16_t i; 1720 int diag; 1721 uint8_t mapping_found = 0; 1722 1723 for (i = 0; i < nb_rx_queue_stats_mappings; i++) { 1724 if ((rx_queue_stats_mappings[i].port_id == port_id) && 1725 (rx_queue_stats_mappings[i].queue_id < nb_rxq )) { 1726 diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id, 1727 rx_queue_stats_mappings[i].queue_id, 1728 rx_queue_stats_mappings[i].stats_counter_id); 1729 if (diag != 0) 1730 return diag; 1731 mapping_found = 1; 1732 } 1733 } 1734 if (mapping_found) 1735 port->rx_queue_stats_mapping_enabled = 1; 1736 return 0; 1737 } 1738 1739 static void 1740 map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port) 1741 { 1742 int diag = 0; 1743 1744 diag = set_tx_queue_stats_mapping_registers(pi, port); 1745 if (diag != 0) { 1746 if (diag == -ENOTSUP) { 1747 port->tx_queue_stats_mapping_enabled = 0; 1748 printf("TX queue stats mapping not supported port id=%d\n", pi); 1749 } 1750 else 1751 rte_exit(EXIT_FAILURE, 1752 "set_tx_queue_stats_mapping_registers " 1753 "failed for port id=%d diag=%d\n", 1754 pi, diag); 1755 } 1756 1757 diag = set_rx_queue_stats_mapping_registers(pi, port); 1758 if (diag != 0) { 1759 if (diag == -ENOTSUP) { 1760 port->rx_queue_stats_mapping_enabled = 0; 1761 printf("RX queue stats mapping not supported port id=%d\n", pi); 1762 } 1763 else 1764 rte_exit(EXIT_FAILURE, 1765 "set_rx_queue_stats_mapping_registers " 1766 "failed for port id=%d diag=%d\n", 1767 pi, diag); 1768 } 1769 } 1770 1771 static void 1772 rxtx_port_config(struct rte_port *port) 1773 { 1774 port->rx_conf = port->dev_info.default_rxconf; 1775 port->tx_conf = port->dev_info.default_txconf; 1776 1777 /* Check if any RX/TX parameters have been passed */ 1778 if (rx_pthresh != RTE_PMD_PARAM_UNSET) 1779 port->rx_conf.rx_thresh.pthresh = rx_pthresh; 1780 1781 if (rx_hthresh != RTE_PMD_PARAM_UNSET) 1782 port->rx_conf.rx_thresh.hthresh = rx_hthresh; 1783 1784 if (rx_wthresh != RTE_PMD_PARAM_UNSET) 1785 port->rx_conf.rx_thresh.wthresh = rx_wthresh; 1786 1787 if (rx_free_thresh != RTE_PMD_PARAM_UNSET) 1788 port->rx_conf.rx_free_thresh = rx_free_thresh; 1789 1790 if (rx_drop_en != RTE_PMD_PARAM_UNSET) 1791 port->rx_conf.rx_drop_en = rx_drop_en; 1792 1793 if (tx_pthresh != RTE_PMD_PARAM_UNSET) 1794 port->tx_conf.tx_thresh.pthresh = tx_pthresh; 1795 1796 if (tx_hthresh != RTE_PMD_PARAM_UNSET) 1797 port->tx_conf.tx_thresh.hthresh = tx_hthresh; 1798 1799 if (tx_wthresh != RTE_PMD_PARAM_UNSET) 1800 port->tx_conf.tx_thresh.wthresh = tx_wthresh; 1801 1802 if (tx_rs_thresh != RTE_PMD_PARAM_UNSET) 1803 port->tx_conf.tx_rs_thresh = tx_rs_thresh; 1804 1805 if (tx_free_thresh != RTE_PMD_PARAM_UNSET) 1806 port->tx_conf.tx_free_thresh = tx_free_thresh; 1807 1808 if (txq_flags != RTE_PMD_PARAM_UNSET) 1809 port->tx_conf.txq_flags = txq_flags; 1810 } 1811 1812 void 1813 init_port_config(void) 1814 { 1815 portid_t pid; 1816 struct rte_port *port; 1817 1818 FOREACH_PORT(pid, ports) { 1819 port = &ports[pid]; 1820 port->dev_conf.rxmode = rx_mode; 1821 port->dev_conf.fdir_conf = fdir_conf; 1822 if (nb_rxq > 1) { 1823 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL; 1824 port->dev_conf.rx_adv_conf.rss_conf.rss_hf = rss_hf; 1825 } else { 1826 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL; 1827 port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0; 1828 } 1829 1830 if (port->dcb_flag == 0 && port->dev_info.max_vfs == 0) { 1831 if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0) 1832 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_RSS; 1833 else 1834 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE; 1835 } 1836 1837 if (port->dev_info.max_vfs != 0) { 1838 if (port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0) 1839 port->dev_conf.rxmode.mq_mode = 1840 ETH_MQ_RX_VMDQ_RSS; 1841 else 1842 port->dev_conf.rxmode.mq_mode = 1843 ETH_MQ_RX_NONE; 1844 1845 port->dev_conf.txmode.mq_mode = ETH_MQ_TX_NONE; 1846 } 1847 1848 rxtx_port_config(port); 1849 1850 rte_eth_macaddr_get(pid, &port->eth_addr); 1851 1852 map_port_queue_stats_mapping_registers(pid, port); 1853 #ifdef RTE_NIC_BYPASS 1854 rte_eth_dev_bypass_init(pid); 1855 #endif 1856 } 1857 } 1858 1859 void set_port_slave_flag(portid_t slave_pid) 1860 { 1861 struct rte_port *port; 1862 1863 port = &ports[slave_pid]; 1864 port->slave_flag = 1; 1865 } 1866 1867 void clear_port_slave_flag(portid_t slave_pid) 1868 { 1869 struct rte_port *port; 1870 1871 port = &ports[slave_pid]; 1872 port->slave_flag = 0; 1873 } 1874 1875 uint8_t port_is_bonding_slave(portid_t slave_pid) 1876 { 1877 struct rte_port *port; 1878 1879 port = &ports[slave_pid]; 1880 return port->slave_flag; 1881 } 1882 1883 const uint16_t vlan_tags[] = { 1884 0, 1, 2, 3, 4, 5, 6, 7, 1885 8, 9, 10, 11, 12, 13, 14, 15, 1886 16, 17, 18, 19, 20, 21, 22, 23, 1887 24, 25, 26, 27, 28, 29, 30, 31 1888 }; 1889 1890 static int 1891 get_eth_dcb_conf(struct rte_eth_conf *eth_conf, 1892 enum dcb_mode_enable dcb_mode, 1893 enum rte_eth_nb_tcs num_tcs, 1894 uint8_t pfc_en) 1895 { 1896 uint8_t i; 1897 1898 /* 1899 * Builds up the correct configuration for dcb+vt based on the vlan tags array 1900 * given above, and the number of traffic classes available for use. 1901 */ 1902 if (dcb_mode == DCB_VT_ENABLED) { 1903 struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf = 1904 ð_conf->rx_adv_conf.vmdq_dcb_conf; 1905 struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf = 1906 ð_conf->tx_adv_conf.vmdq_dcb_tx_conf; 1907 1908 /* VMDQ+DCB RX and TX configrations */ 1909 vmdq_rx_conf->enable_default_pool = 0; 1910 vmdq_rx_conf->default_pool = 0; 1911 vmdq_rx_conf->nb_queue_pools = 1912 (num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS); 1913 vmdq_tx_conf->nb_queue_pools = 1914 (num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS); 1915 1916 vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools; 1917 for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) { 1918 vmdq_rx_conf->pool_map[i].vlan_id = vlan_tags[i]; 1919 vmdq_rx_conf->pool_map[i].pools = 1920 1 << (i % vmdq_rx_conf->nb_queue_pools); 1921 } 1922 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) { 1923 vmdq_rx_conf->dcb_tc[i] = i; 1924 vmdq_tx_conf->dcb_tc[i] = i; 1925 } 1926 1927 /* set DCB mode of RX and TX of multiple queues */ 1928 eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB; 1929 eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB; 1930 } else { 1931 struct rte_eth_dcb_rx_conf *rx_conf = 1932 ð_conf->rx_adv_conf.dcb_rx_conf; 1933 struct rte_eth_dcb_tx_conf *tx_conf = 1934 ð_conf->tx_adv_conf.dcb_tx_conf; 1935 1936 rx_conf->nb_tcs = num_tcs; 1937 tx_conf->nb_tcs = num_tcs; 1938 1939 for (i = 0; i < num_tcs; i++) { 1940 rx_conf->dcb_tc[i] = i; 1941 tx_conf->dcb_tc[i] = i; 1942 } 1943 eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB_RSS; 1944 eth_conf->rx_adv_conf.rss_conf.rss_hf = rss_hf; 1945 eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB; 1946 } 1947 1948 if (pfc_en) 1949 eth_conf->dcb_capability_en = 1950 ETH_DCB_PG_SUPPORT | ETH_DCB_PFC_SUPPORT; 1951 else 1952 eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT; 1953 1954 return 0; 1955 } 1956 1957 int 1958 init_port_dcb_config(portid_t pid, 1959 enum dcb_mode_enable dcb_mode, 1960 enum rte_eth_nb_tcs num_tcs, 1961 uint8_t pfc_en) 1962 { 1963 struct rte_eth_conf port_conf; 1964 struct rte_eth_dev_info dev_info; 1965 struct rte_port *rte_port; 1966 int retval; 1967 uint16_t i; 1968 1969 rte_eth_dev_info_get(pid, &dev_info); 1970 1971 /* If dev_info.vmdq_pool_base is greater than 0, 1972 * the queue id of vmdq pools is started after pf queues. 1973 */ 1974 if (dcb_mode == DCB_VT_ENABLED && dev_info.vmdq_pool_base > 0) { 1975 printf("VMDQ_DCB multi-queue mode is nonsensical" 1976 " for port %d.", pid); 1977 return -1; 1978 } 1979 1980 /* Assume the ports in testpmd have the same dcb capability 1981 * and has the same number of rxq and txq in dcb mode 1982 */ 1983 if (dcb_mode == DCB_VT_ENABLED) { 1984 nb_rxq = dev_info.max_rx_queues; 1985 nb_txq = dev_info.max_tx_queues; 1986 } else { 1987 /*if vt is disabled, use all pf queues */ 1988 if (dev_info.vmdq_pool_base == 0) { 1989 nb_rxq = dev_info.max_rx_queues; 1990 nb_txq = dev_info.max_tx_queues; 1991 } else { 1992 nb_rxq = (queueid_t)num_tcs; 1993 nb_txq = (queueid_t)num_tcs; 1994 1995 } 1996 } 1997 rx_free_thresh = 64; 1998 1999 memset(&port_conf, 0, sizeof(struct rte_eth_conf)); 2000 /* Enter DCB configuration status */ 2001 dcb_config = 1; 2002 2003 /*set configuration of DCB in vt mode and DCB in non-vt mode*/ 2004 retval = get_eth_dcb_conf(&port_conf, dcb_mode, num_tcs, pfc_en); 2005 if (retval < 0) 2006 return retval; 2007 2008 rte_port = &ports[pid]; 2009 memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf)); 2010 2011 rxtx_port_config(rte_port); 2012 /* VLAN filter */ 2013 rte_port->dev_conf.rxmode.hw_vlan_filter = 1; 2014 for (i = 0; i < RTE_DIM(vlan_tags); i++) 2015 rx_vft_set(pid, vlan_tags[i], 1); 2016 2017 rte_eth_macaddr_get(pid, &rte_port->eth_addr); 2018 map_port_queue_stats_mapping_registers(pid, rte_port); 2019 2020 rte_port->dcb_flag = 1; 2021 2022 return 0; 2023 } 2024 2025 static void 2026 init_port(void) 2027 { 2028 portid_t pid; 2029 2030 /* Configuration of Ethernet ports. */ 2031 ports = rte_zmalloc("testpmd: ports", 2032 sizeof(struct rte_port) * RTE_MAX_ETHPORTS, 2033 RTE_CACHE_LINE_SIZE); 2034 if (ports == NULL) { 2035 rte_exit(EXIT_FAILURE, 2036 "rte_zmalloc(%d struct rte_port) failed\n", 2037 RTE_MAX_ETHPORTS); 2038 } 2039 2040 /* enabled allocated ports */ 2041 for (pid = 0; pid < nb_ports; pid++) 2042 ports[pid].enabled = 1; 2043 } 2044 2045 static void 2046 force_quit(void) 2047 { 2048 pmd_test_exit(); 2049 prompt_exit(); 2050 } 2051 2052 static void 2053 signal_handler(int signum) 2054 { 2055 if (signum == SIGINT || signum == SIGTERM) { 2056 printf("\nSignal %d received, preparing to exit...\n", 2057 signum); 2058 #ifdef RTE_LIBRTE_PDUMP 2059 /* uninitialize packet capture framework */ 2060 rte_pdump_uninit(); 2061 #endif 2062 force_quit(); 2063 /* exit with the expected status */ 2064 signal(signum, SIG_DFL); 2065 kill(getpid(), signum); 2066 } 2067 } 2068 2069 int 2070 main(int argc, char** argv) 2071 { 2072 int diag; 2073 uint8_t port_id; 2074 2075 signal(SIGINT, signal_handler); 2076 signal(SIGTERM, signal_handler); 2077 2078 diag = rte_eal_init(argc, argv); 2079 if (diag < 0) 2080 rte_panic("Cannot init EAL\n"); 2081 2082 #ifdef RTE_LIBRTE_PDUMP 2083 /* initialize packet capture framework */ 2084 rte_pdump_init(NULL); 2085 #endif 2086 2087 nb_ports = (portid_t) rte_eth_dev_count(); 2088 if (nb_ports == 0) 2089 RTE_LOG(WARNING, EAL, "No probed ethernet devices\n"); 2090 2091 /* allocate port structures, and init them */ 2092 init_port(); 2093 2094 set_def_fwd_config(); 2095 if (nb_lcores == 0) 2096 rte_panic("Empty set of forwarding logical cores - check the " 2097 "core mask supplied in the command parameters\n"); 2098 2099 argc -= diag; 2100 argv += diag; 2101 if (argc > 1) 2102 launch_args_parse(argc, argv); 2103 2104 if (!nb_rxq && !nb_txq) 2105 printf("Warning: Either rx or tx queues should be non-zero\n"); 2106 2107 if (nb_rxq > 1 && nb_rxq > nb_txq) 2108 printf("Warning: nb_rxq=%d enables RSS configuration, " 2109 "but nb_txq=%d will prevent to fully test it.\n", 2110 nb_rxq, nb_txq); 2111 2112 init_config(); 2113 if (start_port(RTE_PORT_ALL) != 0) 2114 rte_exit(EXIT_FAILURE, "Start ports failed\n"); 2115 2116 /* set all ports to promiscuous mode by default */ 2117 FOREACH_PORT(port_id, ports) 2118 rte_eth_promiscuous_enable(port_id); 2119 2120 #ifdef RTE_LIBRTE_CMDLINE 2121 if (interactive == 1) { 2122 if (auto_start) { 2123 printf("Start automatic packet forwarding\n"); 2124 start_packet_forwarding(0); 2125 } 2126 prompt(); 2127 } else 2128 #endif 2129 { 2130 char c; 2131 int rc; 2132 2133 printf("No commandline core given, start packet forwarding\n"); 2134 start_packet_forwarding(0); 2135 printf("Press enter to exit\n"); 2136 rc = read(0, &c, 1); 2137 pmd_test_exit(); 2138 if (rc < 0) 2139 return 1; 2140 } 2141 2142 return 0; 2143 } 2144