1 /*- 2 * BSD LICENSE 3 * 4 * Copyright(c) 2010-2017 Intel Corporation. All rights reserved. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of Intel Corporation nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include <stdarg.h> 35 #include <stdio.h> 36 #include <stdlib.h> 37 #include <signal.h> 38 #include <string.h> 39 #include <time.h> 40 #include <fcntl.h> 41 #include <sys/types.h> 42 #include <errno.h> 43 44 #include <sys/queue.h> 45 #include <sys/stat.h> 46 47 #include <stdint.h> 48 #include <unistd.h> 49 #include <inttypes.h> 50 51 #include <rte_common.h> 52 #include <rte_errno.h> 53 #include <rte_byteorder.h> 54 #include <rte_log.h> 55 #include <rte_debug.h> 56 #include <rte_cycles.h> 57 #include <rte_memory.h> 58 #include <rte_memcpy.h> 59 #include <rte_memzone.h> 60 #include <rte_launch.h> 61 #include <rte_eal.h> 62 #include <rte_per_lcore.h> 63 #include <rte_lcore.h> 64 #include <rte_atomic.h> 65 #include <rte_branch_prediction.h> 66 #include <rte_mempool.h> 67 #include <rte_malloc.h> 68 #include <rte_mbuf.h> 69 #include <rte_interrupts.h> 70 #include <rte_pci.h> 71 #include <rte_ether.h> 72 #include <rte_ethdev.h> 73 #include <rte_dev.h> 74 #include <rte_string_fns.h> 75 #ifdef RTE_LIBRTE_PMD_XENVIRT 76 #include <rte_eth_xenvirt.h> 77 #endif 78 #ifdef RTE_LIBRTE_PDUMP 79 #include <rte_pdump.h> 80 #endif 81 #include <rte_flow.h> 82 #include <rte_metrics.h> 83 #ifdef RTE_LIBRTE_BITRATE 84 #include <rte_bitrate.h> 85 #endif 86 87 #include "testpmd.h" 88 89 uint16_t verbose_level = 0; /**< Silent by default. */ 90 91 /* use master core for command line ? */ 92 uint8_t interactive = 0; 93 uint8_t auto_start = 0; 94 95 /* 96 * NUMA support configuration. 97 * When set, the NUMA support attempts to dispatch the allocation of the 98 * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the 99 * probed ports among the CPU sockets 0 and 1. 100 * Otherwise, all memory is allocated from CPU socket 0. 101 */ 102 uint8_t numa_support = 0; /**< No numa support by default */ 103 104 /* 105 * In UMA mode,all memory is allocated from socket 0 if --socket-num is 106 * not configured. 107 */ 108 uint8_t socket_num = UMA_NO_CONFIG; 109 110 /* 111 * Use ANONYMOUS mapped memory (might be not physically continuous) for mbufs. 112 */ 113 uint8_t mp_anon = 0; 114 115 /* 116 * Record the Ethernet address of peer target ports to which packets are 117 * forwarded. 118 * Must be instantiated with the ethernet addresses of peer traffic generator 119 * ports. 120 */ 121 struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS]; 122 portid_t nb_peer_eth_addrs = 0; 123 124 /* 125 * Probed Target Environment. 126 */ 127 struct rte_port *ports; /**< For all probed ethernet ports. */ 128 portid_t nb_ports; /**< Number of probed ethernet ports. */ 129 struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */ 130 lcoreid_t nb_lcores; /**< Number of probed logical cores. */ 131 132 /* 133 * Test Forwarding Configuration. 134 * nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores 135 * nb_fwd_ports <= nb_cfg_ports <= nb_ports 136 */ 137 lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */ 138 lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */ 139 portid_t nb_cfg_ports; /**< Number of configured ports. */ 140 portid_t nb_fwd_ports; /**< Number of forwarding ports. */ 141 142 unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */ 143 portid_t fwd_ports_ids[RTE_MAX_ETHPORTS]; /**< Port ids configuration. */ 144 145 struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */ 146 streamid_t nb_fwd_streams; /**< Is equal to (nb_ports * nb_rxq). */ 147 148 /* 149 * Forwarding engines. 150 */ 151 struct fwd_engine * fwd_engines[] = { 152 &io_fwd_engine, 153 &mac_fwd_engine, 154 &mac_swap_engine, 155 &flow_gen_engine, 156 &rx_only_engine, 157 &tx_only_engine, 158 &csum_fwd_engine, 159 &icmp_echo_engine, 160 #ifdef RTE_LIBRTE_IEEE1588 161 &ieee1588_fwd_engine, 162 #endif 163 NULL, 164 }; 165 166 struct fwd_config cur_fwd_config; 167 struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */ 168 uint32_t retry_enabled; 169 uint32_t burst_tx_delay_time = BURST_TX_WAIT_US; 170 uint32_t burst_tx_retry_num = BURST_TX_RETRIES; 171 172 uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */ 173 uint32_t param_total_num_mbufs = 0; /**< number of mbufs in all pools - if 174 * specified on command-line. */ 175 176 /* 177 * Configuration of packet segments used by the "txonly" processing engine. 178 */ 179 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */ 180 uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = { 181 TXONLY_DEF_PACKET_LEN, 182 }; 183 uint8_t tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */ 184 185 enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF; 186 /**< Split policy for packets to TX. */ 187 188 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */ 189 uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */ 190 191 /* current configuration is in DCB or not,0 means it is not in DCB mode */ 192 uint8_t dcb_config = 0; 193 194 /* Whether the dcb is in testing status */ 195 uint8_t dcb_test = 0; 196 197 /* 198 * Configurable number of RX/TX queues. 199 */ 200 queueid_t nb_rxq = 1; /**< Number of RX queues per port. */ 201 queueid_t nb_txq = 1; /**< Number of TX queues per port. */ 202 203 /* 204 * Configurable number of RX/TX ring descriptors. 205 */ 206 #define RTE_TEST_RX_DESC_DEFAULT 128 207 #define RTE_TEST_TX_DESC_DEFAULT 512 208 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */ 209 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */ 210 211 #define RTE_PMD_PARAM_UNSET -1 212 /* 213 * Configurable values of RX and TX ring threshold registers. 214 */ 215 216 int8_t rx_pthresh = RTE_PMD_PARAM_UNSET; 217 int8_t rx_hthresh = RTE_PMD_PARAM_UNSET; 218 int8_t rx_wthresh = RTE_PMD_PARAM_UNSET; 219 220 int8_t tx_pthresh = RTE_PMD_PARAM_UNSET; 221 int8_t tx_hthresh = RTE_PMD_PARAM_UNSET; 222 int8_t tx_wthresh = RTE_PMD_PARAM_UNSET; 223 224 /* 225 * Configurable value of RX free threshold. 226 */ 227 int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET; 228 229 /* 230 * Configurable value of RX drop enable. 231 */ 232 int8_t rx_drop_en = RTE_PMD_PARAM_UNSET; 233 234 /* 235 * Configurable value of TX free threshold. 236 */ 237 int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET; 238 239 /* 240 * Configurable value of TX RS bit threshold. 241 */ 242 int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET; 243 244 /* 245 * Configurable value of TX queue flags. 246 */ 247 int32_t txq_flags = RTE_PMD_PARAM_UNSET; 248 249 /* 250 * Receive Side Scaling (RSS) configuration. 251 */ 252 uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */ 253 254 /* 255 * Port topology configuration 256 */ 257 uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */ 258 259 /* 260 * Avoids to flush all the RX streams before starts forwarding. 261 */ 262 uint8_t no_flush_rx = 0; /* flush by default */ 263 264 /* 265 * Avoids to check link status when starting/stopping a port. 266 */ 267 uint8_t no_link_check = 0; /* check by default */ 268 269 /* 270 * NIC bypass mode configuration options. 271 */ 272 #ifdef RTE_NIC_BYPASS 273 274 /* The NIC bypass watchdog timeout. */ 275 uint32_t bypass_timeout = RTE_BYPASS_TMT_OFF; 276 277 #endif 278 279 /* 280 * Ethernet device configuration. 281 */ 282 struct rte_eth_rxmode rx_mode = { 283 .max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */ 284 .split_hdr_size = 0, 285 .header_split = 0, /**< Header Split disabled. */ 286 .hw_ip_checksum = 0, /**< IP checksum offload disabled. */ 287 .hw_vlan_filter = 1, /**< VLAN filtering enabled. */ 288 .hw_vlan_strip = 1, /**< VLAN strip enabled. */ 289 .hw_vlan_extend = 0, /**< Extended VLAN disabled. */ 290 .jumbo_frame = 0, /**< Jumbo Frame Support disabled. */ 291 .hw_strip_crc = 0, /**< CRC stripping by hardware disabled. */ 292 }; 293 294 struct rte_fdir_conf fdir_conf = { 295 .mode = RTE_FDIR_MODE_NONE, 296 .pballoc = RTE_FDIR_PBALLOC_64K, 297 .status = RTE_FDIR_REPORT_STATUS, 298 .mask = { 299 .vlan_tci_mask = 0x0, 300 .ipv4_mask = { 301 .src_ip = 0xFFFFFFFF, 302 .dst_ip = 0xFFFFFFFF, 303 }, 304 .ipv6_mask = { 305 .src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF}, 306 .dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF}, 307 }, 308 .src_port_mask = 0xFFFF, 309 .dst_port_mask = 0xFFFF, 310 .mac_addr_byte_mask = 0xFF, 311 .tunnel_type_mask = 1, 312 .tunnel_id_mask = 0xFFFFFFFF, 313 }, 314 .drop_queue = 127, 315 }; 316 317 volatile int test_done = 1; /* stop packet forwarding when set to 1. */ 318 319 struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS]; 320 struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS]; 321 322 struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array; 323 struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array; 324 325 uint16_t nb_tx_queue_stats_mappings = 0; 326 uint16_t nb_rx_queue_stats_mappings = 0; 327 328 unsigned max_socket = 0; 329 330 /* Bitrate statistics */ 331 struct rte_stats_bitrates *bitrate_data; 332 333 /* Forward function declarations */ 334 static void map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port); 335 static void check_all_ports_link_status(uint32_t port_mask); 336 337 /* 338 * Check if all the ports are started. 339 * If yes, return positive value. If not, return zero. 340 */ 341 static int all_ports_started(void); 342 343 /* 344 * Find next enabled port 345 */ 346 portid_t 347 find_next_port(portid_t p, struct rte_port *ports, int size) 348 { 349 if (ports == NULL) 350 rte_exit(-EINVAL, "failed to find a next port id\n"); 351 352 while ((p < size) && (ports[p].enabled == 0)) 353 p++; 354 return p; 355 } 356 357 /* 358 * Setup default configuration. 359 */ 360 static void 361 set_default_fwd_lcores_config(void) 362 { 363 unsigned int i; 364 unsigned int nb_lc; 365 unsigned int sock_num; 366 367 nb_lc = 0; 368 for (i = 0; i < RTE_MAX_LCORE; i++) { 369 sock_num = rte_lcore_to_socket_id(i) + 1; 370 if (sock_num > max_socket) { 371 if (sock_num > RTE_MAX_NUMA_NODES) 372 rte_exit(EXIT_FAILURE, "Total sockets greater than %u\n", RTE_MAX_NUMA_NODES); 373 max_socket = sock_num; 374 } 375 if (!rte_lcore_is_enabled(i)) 376 continue; 377 if (i == rte_get_master_lcore()) 378 continue; 379 fwd_lcores_cpuids[nb_lc++] = i; 380 } 381 nb_lcores = (lcoreid_t) nb_lc; 382 nb_cfg_lcores = nb_lcores; 383 nb_fwd_lcores = 1; 384 } 385 386 static void 387 set_def_peer_eth_addrs(void) 388 { 389 portid_t i; 390 391 for (i = 0; i < RTE_MAX_ETHPORTS; i++) { 392 peer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR; 393 peer_eth_addrs[i].addr_bytes[5] = i; 394 } 395 } 396 397 static void 398 set_default_fwd_ports_config(void) 399 { 400 portid_t pt_id; 401 402 for (pt_id = 0; pt_id < nb_ports; pt_id++) 403 fwd_ports_ids[pt_id] = pt_id; 404 405 nb_cfg_ports = nb_ports; 406 nb_fwd_ports = nb_ports; 407 } 408 409 void 410 set_def_fwd_config(void) 411 { 412 set_default_fwd_lcores_config(); 413 set_def_peer_eth_addrs(); 414 set_default_fwd_ports_config(); 415 } 416 417 /* 418 * Configuration initialisation done once at init time. 419 */ 420 static void 421 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf, 422 unsigned int socket_id) 423 { 424 char pool_name[RTE_MEMPOOL_NAMESIZE]; 425 struct rte_mempool *rte_mp = NULL; 426 uint32_t mb_size; 427 428 mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size; 429 mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name)); 430 431 RTE_LOG(INFO, USER1, 432 "create a new mbuf pool <%s>: n=%u, size=%u, socket=%u\n", 433 pool_name, nb_mbuf, mbuf_seg_size, socket_id); 434 435 #ifdef RTE_LIBRTE_PMD_XENVIRT 436 rte_mp = rte_mempool_gntalloc_create(pool_name, nb_mbuf, mb_size, 437 (unsigned) mb_mempool_cache, 438 sizeof(struct rte_pktmbuf_pool_private), 439 rte_pktmbuf_pool_init, NULL, 440 rte_pktmbuf_init, NULL, 441 socket_id, 0); 442 #endif 443 444 /* if the former XEN allocation failed fall back to normal allocation */ 445 if (rte_mp == NULL) { 446 if (mp_anon != 0) { 447 rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf, 448 mb_size, (unsigned) mb_mempool_cache, 449 sizeof(struct rte_pktmbuf_pool_private), 450 socket_id, 0); 451 if (rte_mp == NULL) 452 goto err; 453 454 if (rte_mempool_populate_anon(rte_mp) == 0) { 455 rte_mempool_free(rte_mp); 456 rte_mp = NULL; 457 goto err; 458 } 459 rte_pktmbuf_pool_init(rte_mp, NULL); 460 rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL); 461 } else { 462 /* wrapper to rte_mempool_create() */ 463 rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf, 464 mb_mempool_cache, 0, mbuf_seg_size, socket_id); 465 } 466 } 467 468 err: 469 if (rte_mp == NULL) { 470 rte_exit(EXIT_FAILURE, 471 "Creation of mbuf pool for socket %u failed: %s\n", 472 socket_id, rte_strerror(rte_errno)); 473 } else if (verbose_level > 0) { 474 rte_mempool_dump(stdout, rte_mp); 475 } 476 } 477 478 /* 479 * Check given socket id is valid or not with NUMA mode, 480 * if valid, return 0, else return -1 481 */ 482 static int 483 check_socket_id(const unsigned int socket_id) 484 { 485 static int warning_once = 0; 486 487 if (socket_id >= max_socket) { 488 if (!warning_once && numa_support) 489 printf("Warning: NUMA should be configured manually by" 490 " using --port-numa-config and" 491 " --ring-numa-config parameters along with" 492 " --numa.\n"); 493 warning_once = 1; 494 return -1; 495 } 496 return 0; 497 } 498 499 static void 500 init_config(void) 501 { 502 portid_t pid; 503 struct rte_port *port; 504 struct rte_mempool *mbp; 505 unsigned int nb_mbuf_per_pool; 506 lcoreid_t lc_id; 507 uint8_t port_per_socket[RTE_MAX_NUMA_NODES]; 508 509 memset(port_per_socket,0,RTE_MAX_NUMA_NODES); 510 /* Configuration of logical cores. */ 511 fwd_lcores = rte_zmalloc("testpmd: fwd_lcores", 512 sizeof(struct fwd_lcore *) * nb_lcores, 513 RTE_CACHE_LINE_SIZE); 514 if (fwd_lcores == NULL) { 515 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) " 516 "failed\n", nb_lcores); 517 } 518 for (lc_id = 0; lc_id < nb_lcores; lc_id++) { 519 fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore", 520 sizeof(struct fwd_lcore), 521 RTE_CACHE_LINE_SIZE); 522 if (fwd_lcores[lc_id] == NULL) { 523 rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) " 524 "failed\n"); 525 } 526 fwd_lcores[lc_id]->cpuid_idx = lc_id; 527 } 528 529 /* 530 * Create pools of mbuf. 531 * If NUMA support is disabled, create a single pool of mbuf in 532 * socket 0 memory by default. 533 * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1. 534 * 535 * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and 536 * nb_txd can be configured at run time. 537 */ 538 if (param_total_num_mbufs) 539 nb_mbuf_per_pool = param_total_num_mbufs; 540 else { 541 nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX + (nb_lcores * mb_mempool_cache) 542 + RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST; 543 544 if (!numa_support) 545 nb_mbuf_per_pool = 546 (nb_mbuf_per_pool * RTE_MAX_ETHPORTS); 547 } 548 549 if (!numa_support) { 550 if (socket_num == UMA_NO_CONFIG) 551 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0); 552 else 553 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 554 socket_num); 555 } 556 557 FOREACH_PORT(pid, ports) { 558 port = &ports[pid]; 559 rte_eth_dev_info_get(pid, &port->dev_info); 560 561 if (numa_support) { 562 if (port_numa[pid] != NUMA_NO_CONFIG) 563 port_per_socket[port_numa[pid]]++; 564 else { 565 uint32_t socket_id = rte_eth_dev_socket_id(pid); 566 567 /* if socket_id is invalid, set to 0 */ 568 if (check_socket_id(socket_id) < 0) 569 socket_id = 0; 570 port_per_socket[socket_id]++; 571 } 572 } 573 574 /* set flag to initialize port/queue */ 575 port->need_reconfig = 1; 576 port->need_reconfig_queues = 1; 577 } 578 579 if (numa_support) { 580 uint8_t i; 581 unsigned int nb_mbuf; 582 583 if (param_total_num_mbufs) 584 nb_mbuf_per_pool = nb_mbuf_per_pool/nb_ports; 585 586 for (i = 0; i < max_socket; i++) { 587 nb_mbuf = (nb_mbuf_per_pool * RTE_MAX_ETHPORTS); 588 if (nb_mbuf) 589 mbuf_pool_create(mbuf_data_size, 590 nb_mbuf,i); 591 } 592 } 593 init_port_config(); 594 595 /* 596 * Records which Mbuf pool to use by each logical core, if needed. 597 */ 598 for (lc_id = 0; lc_id < nb_lcores; lc_id++) { 599 mbp = mbuf_pool_find( 600 rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id])); 601 602 if (mbp == NULL) 603 mbp = mbuf_pool_find(0); 604 fwd_lcores[lc_id]->mbp = mbp; 605 } 606 607 /* Configuration of packet forwarding streams. */ 608 if (init_fwd_streams() < 0) 609 rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n"); 610 611 fwd_config_setup(); 612 } 613 614 615 void 616 reconfig(portid_t new_port_id, unsigned socket_id) 617 { 618 struct rte_port *port; 619 620 /* Reconfiguration of Ethernet ports. */ 621 port = &ports[new_port_id]; 622 rte_eth_dev_info_get(new_port_id, &port->dev_info); 623 624 /* set flag to initialize port/queue */ 625 port->need_reconfig = 1; 626 port->need_reconfig_queues = 1; 627 port->socket_id = socket_id; 628 629 init_port_config(); 630 } 631 632 633 int 634 init_fwd_streams(void) 635 { 636 portid_t pid; 637 struct rte_port *port; 638 streamid_t sm_id, nb_fwd_streams_new; 639 queueid_t q; 640 641 /* set socket id according to numa or not */ 642 FOREACH_PORT(pid, ports) { 643 port = &ports[pid]; 644 if (nb_rxq > port->dev_info.max_rx_queues) { 645 printf("Fail: nb_rxq(%d) is greater than " 646 "max_rx_queues(%d)\n", nb_rxq, 647 port->dev_info.max_rx_queues); 648 return -1; 649 } 650 if (nb_txq > port->dev_info.max_tx_queues) { 651 printf("Fail: nb_txq(%d) is greater than " 652 "max_tx_queues(%d)\n", nb_txq, 653 port->dev_info.max_tx_queues); 654 return -1; 655 } 656 if (numa_support) { 657 if (port_numa[pid] != NUMA_NO_CONFIG) 658 port->socket_id = port_numa[pid]; 659 else { 660 port->socket_id = rte_eth_dev_socket_id(pid); 661 662 /* if socket_id is invalid, set to 0 */ 663 if (check_socket_id(port->socket_id) < 0) 664 port->socket_id = 0; 665 } 666 } 667 else { 668 if (socket_num == UMA_NO_CONFIG) 669 port->socket_id = 0; 670 else 671 port->socket_id = socket_num; 672 } 673 } 674 675 q = RTE_MAX(nb_rxq, nb_txq); 676 if (q == 0) { 677 printf("Fail: Cannot allocate fwd streams as number of queues is 0\n"); 678 return -1; 679 } 680 nb_fwd_streams_new = (streamid_t)(nb_ports * q); 681 if (nb_fwd_streams_new == nb_fwd_streams) 682 return 0; 683 /* clear the old */ 684 if (fwd_streams != NULL) { 685 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) { 686 if (fwd_streams[sm_id] == NULL) 687 continue; 688 rte_free(fwd_streams[sm_id]); 689 fwd_streams[sm_id] = NULL; 690 } 691 rte_free(fwd_streams); 692 fwd_streams = NULL; 693 } 694 695 /* init new */ 696 nb_fwd_streams = nb_fwd_streams_new; 697 fwd_streams = rte_zmalloc("testpmd: fwd_streams", 698 sizeof(struct fwd_stream *) * nb_fwd_streams, RTE_CACHE_LINE_SIZE); 699 if (fwd_streams == NULL) 700 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_stream *)) " 701 "failed\n", nb_fwd_streams); 702 703 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) { 704 fwd_streams[sm_id] = rte_zmalloc("testpmd: struct fwd_stream", 705 sizeof(struct fwd_stream), RTE_CACHE_LINE_SIZE); 706 if (fwd_streams[sm_id] == NULL) 707 rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_stream)" 708 " failed\n"); 709 } 710 711 return 0; 712 } 713 714 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS 715 static void 716 pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs) 717 { 718 unsigned int total_burst; 719 unsigned int nb_burst; 720 unsigned int burst_stats[3]; 721 uint16_t pktnb_stats[3]; 722 uint16_t nb_pkt; 723 int burst_percent[3]; 724 725 /* 726 * First compute the total number of packet bursts and the 727 * two highest numbers of bursts of the same number of packets. 728 */ 729 total_burst = 0; 730 burst_stats[0] = burst_stats[1] = burst_stats[2] = 0; 731 pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0; 732 for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) { 733 nb_burst = pbs->pkt_burst_spread[nb_pkt]; 734 if (nb_burst == 0) 735 continue; 736 total_burst += nb_burst; 737 if (nb_burst > burst_stats[0]) { 738 burst_stats[1] = burst_stats[0]; 739 pktnb_stats[1] = pktnb_stats[0]; 740 burst_stats[0] = nb_burst; 741 pktnb_stats[0] = nb_pkt; 742 } 743 } 744 if (total_burst == 0) 745 return; 746 burst_percent[0] = (burst_stats[0] * 100) / total_burst; 747 printf(" %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst, 748 burst_percent[0], (int) pktnb_stats[0]); 749 if (burst_stats[0] == total_burst) { 750 printf("]\n"); 751 return; 752 } 753 if (burst_stats[0] + burst_stats[1] == total_burst) { 754 printf(" + %d%% of %d pkts]\n", 755 100 - burst_percent[0], pktnb_stats[1]); 756 return; 757 } 758 burst_percent[1] = (burst_stats[1] * 100) / total_burst; 759 burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]); 760 if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) { 761 printf(" + %d%% of others]\n", 100 - burst_percent[0]); 762 return; 763 } 764 printf(" + %d%% of %d pkts + %d%% of others]\n", 765 burst_percent[1], (int) pktnb_stats[1], burst_percent[2]); 766 } 767 #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */ 768 769 static void 770 fwd_port_stats_display(portid_t port_id, struct rte_eth_stats *stats) 771 { 772 struct rte_port *port; 773 uint8_t i; 774 775 static const char *fwd_stats_border = "----------------------"; 776 777 port = &ports[port_id]; 778 printf("\n %s Forward statistics for port %-2d %s\n", 779 fwd_stats_border, port_id, fwd_stats_border); 780 781 if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) { 782 printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: " 783 "%-"PRIu64"\n", 784 stats->ipackets, stats->imissed, 785 (uint64_t) (stats->ipackets + stats->imissed)); 786 787 if (cur_fwd_eng == &csum_fwd_engine) 788 printf(" Bad-ipcsum: %-14"PRIu64" Bad-l4csum: %-14"PRIu64" \n", 789 port->rx_bad_ip_csum, port->rx_bad_l4_csum); 790 if ((stats->ierrors + stats->rx_nombuf) > 0) { 791 printf(" RX-error: %-"PRIu64"\n", stats->ierrors); 792 printf(" RX-nombufs: %-14"PRIu64"\n", stats->rx_nombuf); 793 } 794 795 printf(" TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: " 796 "%-"PRIu64"\n", 797 stats->opackets, port->tx_dropped, 798 (uint64_t) (stats->opackets + port->tx_dropped)); 799 } 800 else { 801 printf(" RX-packets: %14"PRIu64" RX-dropped:%14"PRIu64" RX-total:" 802 "%14"PRIu64"\n", 803 stats->ipackets, stats->imissed, 804 (uint64_t) (stats->ipackets + stats->imissed)); 805 806 if (cur_fwd_eng == &csum_fwd_engine) 807 printf(" Bad-ipcsum:%14"PRIu64" Bad-l4csum:%14"PRIu64"\n", 808 port->rx_bad_ip_csum, port->rx_bad_l4_csum); 809 if ((stats->ierrors + stats->rx_nombuf) > 0) { 810 printf(" RX-error:%"PRIu64"\n", stats->ierrors); 811 printf(" RX-nombufs: %14"PRIu64"\n", 812 stats->rx_nombuf); 813 } 814 815 printf(" TX-packets: %14"PRIu64" TX-dropped:%14"PRIu64" TX-total:" 816 "%14"PRIu64"\n", 817 stats->opackets, port->tx_dropped, 818 (uint64_t) (stats->opackets + port->tx_dropped)); 819 } 820 821 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS 822 if (port->rx_stream) 823 pkt_burst_stats_display("RX", 824 &port->rx_stream->rx_burst_stats); 825 if (port->tx_stream) 826 pkt_burst_stats_display("TX", 827 &port->tx_stream->tx_burst_stats); 828 #endif 829 830 if (port->rx_queue_stats_mapping_enabled) { 831 printf("\n"); 832 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) { 833 printf(" Stats reg %2d RX-packets:%14"PRIu64 834 " RX-errors:%14"PRIu64 835 " RX-bytes:%14"PRIu64"\n", 836 i, stats->q_ipackets[i], stats->q_errors[i], stats->q_ibytes[i]); 837 } 838 printf("\n"); 839 } 840 if (port->tx_queue_stats_mapping_enabled) { 841 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) { 842 printf(" Stats reg %2d TX-packets:%14"PRIu64 843 " TX-bytes:%14"PRIu64"\n", 844 i, stats->q_opackets[i], stats->q_obytes[i]); 845 } 846 } 847 848 printf(" %s--------------------------------%s\n", 849 fwd_stats_border, fwd_stats_border); 850 } 851 852 static void 853 fwd_stream_stats_display(streamid_t stream_id) 854 { 855 struct fwd_stream *fs; 856 static const char *fwd_top_stats_border = "-------"; 857 858 fs = fwd_streams[stream_id]; 859 if ((fs->rx_packets == 0) && (fs->tx_packets == 0) && 860 (fs->fwd_dropped == 0)) 861 return; 862 printf("\n %s Forward Stats for RX Port=%2d/Queue=%2d -> " 863 "TX Port=%2d/Queue=%2d %s\n", 864 fwd_top_stats_border, fs->rx_port, fs->rx_queue, 865 fs->tx_port, fs->tx_queue, fwd_top_stats_border); 866 printf(" RX-packets: %-14u TX-packets: %-14u TX-dropped: %-14u", 867 fs->rx_packets, fs->tx_packets, fs->fwd_dropped); 868 869 /* if checksum mode */ 870 if (cur_fwd_eng == &csum_fwd_engine) { 871 printf(" RX- bad IP checksum: %-14u Rx- bad L4 checksum: " 872 "%-14u\n", fs->rx_bad_ip_csum, fs->rx_bad_l4_csum); 873 } 874 875 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS 876 pkt_burst_stats_display("RX", &fs->rx_burst_stats); 877 pkt_burst_stats_display("TX", &fs->tx_burst_stats); 878 #endif 879 } 880 881 static void 882 flush_fwd_rx_queues(void) 883 { 884 struct rte_mbuf *pkts_burst[MAX_PKT_BURST]; 885 portid_t rxp; 886 portid_t port_id; 887 queueid_t rxq; 888 uint16_t nb_rx; 889 uint16_t i; 890 uint8_t j; 891 uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0; 892 uint64_t timer_period; 893 894 /* convert to number of cycles */ 895 timer_period = rte_get_timer_hz(); /* 1 second timeout */ 896 897 for (j = 0; j < 2; j++) { 898 for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) { 899 for (rxq = 0; rxq < nb_rxq; rxq++) { 900 port_id = fwd_ports_ids[rxp]; 901 /** 902 * testpmd can stuck in the below do while loop 903 * if rte_eth_rx_burst() always returns nonzero 904 * packets. So timer is added to exit this loop 905 * after 1sec timer expiry. 906 */ 907 prev_tsc = rte_rdtsc(); 908 do { 909 nb_rx = rte_eth_rx_burst(port_id, rxq, 910 pkts_burst, MAX_PKT_BURST); 911 for (i = 0; i < nb_rx; i++) 912 rte_pktmbuf_free(pkts_burst[i]); 913 914 cur_tsc = rte_rdtsc(); 915 diff_tsc = cur_tsc - prev_tsc; 916 timer_tsc += diff_tsc; 917 } while ((nb_rx > 0) && 918 (timer_tsc < timer_period)); 919 timer_tsc = 0; 920 } 921 } 922 rte_delay_ms(10); /* wait 10 milli-seconds before retrying */ 923 } 924 } 925 926 static void 927 run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd) 928 { 929 struct fwd_stream **fsm; 930 streamid_t nb_fs; 931 streamid_t sm_id; 932 #ifdef RTE_LIBRTE_BITRATE 933 uint64_t tics_per_1sec; 934 uint64_t tics_datum; 935 uint64_t tics_current; 936 uint8_t idx_port, cnt_ports; 937 938 cnt_ports = rte_eth_dev_count(); 939 tics_datum = rte_rdtsc(); 940 tics_per_1sec = rte_get_timer_hz(); 941 #endif 942 fsm = &fwd_streams[fc->stream_idx]; 943 nb_fs = fc->stream_nb; 944 do { 945 for (sm_id = 0; sm_id < nb_fs; sm_id++) 946 (*pkt_fwd)(fsm[sm_id]); 947 #ifdef RTE_LIBRTE_BITRATE 948 tics_current = rte_rdtsc(); 949 if (tics_current - tics_datum >= tics_per_1sec) { 950 /* Periodic bitrate calculation */ 951 for (idx_port = 0; idx_port < cnt_ports; idx_port++) 952 rte_stats_bitrate_calc(bitrate_data, idx_port); 953 tics_datum = tics_current; 954 } 955 #endif 956 } while (! fc->stopped); 957 } 958 959 static int 960 start_pkt_forward_on_core(void *fwd_arg) 961 { 962 run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg, 963 cur_fwd_config.fwd_eng->packet_fwd); 964 return 0; 965 } 966 967 /* 968 * Run the TXONLY packet forwarding engine to send a single burst of packets. 969 * Used to start communication flows in network loopback test configurations. 970 */ 971 static int 972 run_one_txonly_burst_on_core(void *fwd_arg) 973 { 974 struct fwd_lcore *fwd_lc; 975 struct fwd_lcore tmp_lcore; 976 977 fwd_lc = (struct fwd_lcore *) fwd_arg; 978 tmp_lcore = *fwd_lc; 979 tmp_lcore.stopped = 1; 980 run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd); 981 return 0; 982 } 983 984 /* 985 * Launch packet forwarding: 986 * - Setup per-port forwarding context. 987 * - launch logical cores with their forwarding configuration. 988 */ 989 static void 990 launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore) 991 { 992 port_fwd_begin_t port_fwd_begin; 993 unsigned int i; 994 unsigned int lc_id; 995 int diag; 996 997 port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin; 998 if (port_fwd_begin != NULL) { 999 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) 1000 (*port_fwd_begin)(fwd_ports_ids[i]); 1001 } 1002 for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) { 1003 lc_id = fwd_lcores_cpuids[i]; 1004 if ((interactive == 0) || (lc_id != rte_lcore_id())) { 1005 fwd_lcores[i]->stopped = 0; 1006 diag = rte_eal_remote_launch(pkt_fwd_on_lcore, 1007 fwd_lcores[i], lc_id); 1008 if (diag != 0) 1009 printf("launch lcore %u failed - diag=%d\n", 1010 lc_id, diag); 1011 } 1012 } 1013 } 1014 1015 /* 1016 * Launch packet forwarding configuration. 1017 */ 1018 void 1019 start_packet_forwarding(int with_tx_first) 1020 { 1021 port_fwd_begin_t port_fwd_begin; 1022 port_fwd_end_t port_fwd_end; 1023 struct rte_port *port; 1024 unsigned int i; 1025 portid_t pt_id; 1026 streamid_t sm_id; 1027 1028 if (strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") == 0 && !nb_rxq) 1029 rte_exit(EXIT_FAILURE, "rxq are 0, cannot use rxonly fwd mode\n"); 1030 1031 if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 && !nb_txq) 1032 rte_exit(EXIT_FAILURE, "txq are 0, cannot use txonly fwd mode\n"); 1033 1034 if ((strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") != 0 && 1035 strcmp(cur_fwd_eng->fwd_mode_name, "txonly") != 0) && 1036 (!nb_rxq || !nb_txq)) 1037 rte_exit(EXIT_FAILURE, 1038 "Either rxq or txq are 0, cannot use %s fwd mode\n", 1039 cur_fwd_eng->fwd_mode_name); 1040 1041 if (all_ports_started() == 0) { 1042 printf("Not all ports were started\n"); 1043 return; 1044 } 1045 if (test_done == 0) { 1046 printf("Packet forwarding already started\n"); 1047 return; 1048 } 1049 1050 if (init_fwd_streams() < 0) { 1051 printf("Fail from init_fwd_streams()\n"); 1052 return; 1053 } 1054 1055 if(dcb_test) { 1056 for (i = 0; i < nb_fwd_ports; i++) { 1057 pt_id = fwd_ports_ids[i]; 1058 port = &ports[pt_id]; 1059 if (!port->dcb_flag) { 1060 printf("In DCB mode, all forwarding ports must " 1061 "be configured in this mode.\n"); 1062 return; 1063 } 1064 } 1065 if (nb_fwd_lcores == 1) { 1066 printf("In DCB mode,the nb forwarding cores " 1067 "should be larger than 1.\n"); 1068 return; 1069 } 1070 } 1071 test_done = 0; 1072 1073 if(!no_flush_rx) 1074 flush_fwd_rx_queues(); 1075 1076 fwd_config_setup(); 1077 pkt_fwd_config_display(&cur_fwd_config); 1078 rxtx_config_display(); 1079 1080 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) { 1081 pt_id = fwd_ports_ids[i]; 1082 port = &ports[pt_id]; 1083 rte_eth_stats_get(pt_id, &port->stats); 1084 port->tx_dropped = 0; 1085 1086 map_port_queue_stats_mapping_registers(pt_id, port); 1087 } 1088 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) { 1089 fwd_streams[sm_id]->rx_packets = 0; 1090 fwd_streams[sm_id]->tx_packets = 0; 1091 fwd_streams[sm_id]->fwd_dropped = 0; 1092 fwd_streams[sm_id]->rx_bad_ip_csum = 0; 1093 fwd_streams[sm_id]->rx_bad_l4_csum = 0; 1094 1095 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS 1096 memset(&fwd_streams[sm_id]->rx_burst_stats, 0, 1097 sizeof(fwd_streams[sm_id]->rx_burst_stats)); 1098 memset(&fwd_streams[sm_id]->tx_burst_stats, 0, 1099 sizeof(fwd_streams[sm_id]->tx_burst_stats)); 1100 #endif 1101 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES 1102 fwd_streams[sm_id]->core_cycles = 0; 1103 #endif 1104 } 1105 if (with_tx_first) { 1106 port_fwd_begin = tx_only_engine.port_fwd_begin; 1107 if (port_fwd_begin != NULL) { 1108 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) 1109 (*port_fwd_begin)(fwd_ports_ids[i]); 1110 } 1111 while (with_tx_first--) { 1112 launch_packet_forwarding( 1113 run_one_txonly_burst_on_core); 1114 rte_eal_mp_wait_lcore(); 1115 } 1116 port_fwd_end = tx_only_engine.port_fwd_end; 1117 if (port_fwd_end != NULL) { 1118 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) 1119 (*port_fwd_end)(fwd_ports_ids[i]); 1120 } 1121 } 1122 launch_packet_forwarding(start_pkt_forward_on_core); 1123 } 1124 1125 void 1126 stop_packet_forwarding(void) 1127 { 1128 struct rte_eth_stats stats; 1129 struct rte_port *port; 1130 port_fwd_end_t port_fwd_end; 1131 int i; 1132 portid_t pt_id; 1133 streamid_t sm_id; 1134 lcoreid_t lc_id; 1135 uint64_t total_recv; 1136 uint64_t total_xmit; 1137 uint64_t total_rx_dropped; 1138 uint64_t total_tx_dropped; 1139 uint64_t total_rx_nombuf; 1140 uint64_t tx_dropped; 1141 uint64_t rx_bad_ip_csum; 1142 uint64_t rx_bad_l4_csum; 1143 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES 1144 uint64_t fwd_cycles; 1145 #endif 1146 static const char *acc_stats_border = "+++++++++++++++"; 1147 1148 if (test_done) { 1149 printf("Packet forwarding not started\n"); 1150 return; 1151 } 1152 printf("Telling cores to stop..."); 1153 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) 1154 fwd_lcores[lc_id]->stopped = 1; 1155 printf("\nWaiting for lcores to finish...\n"); 1156 rte_eal_mp_wait_lcore(); 1157 port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end; 1158 if (port_fwd_end != NULL) { 1159 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) { 1160 pt_id = fwd_ports_ids[i]; 1161 (*port_fwd_end)(pt_id); 1162 } 1163 } 1164 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES 1165 fwd_cycles = 0; 1166 #endif 1167 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) { 1168 if (cur_fwd_config.nb_fwd_streams > 1169 cur_fwd_config.nb_fwd_ports) { 1170 fwd_stream_stats_display(sm_id); 1171 ports[fwd_streams[sm_id]->tx_port].tx_stream = NULL; 1172 ports[fwd_streams[sm_id]->rx_port].rx_stream = NULL; 1173 } else { 1174 ports[fwd_streams[sm_id]->tx_port].tx_stream = 1175 fwd_streams[sm_id]; 1176 ports[fwd_streams[sm_id]->rx_port].rx_stream = 1177 fwd_streams[sm_id]; 1178 } 1179 tx_dropped = ports[fwd_streams[sm_id]->tx_port].tx_dropped; 1180 tx_dropped = (uint64_t) (tx_dropped + 1181 fwd_streams[sm_id]->fwd_dropped); 1182 ports[fwd_streams[sm_id]->tx_port].tx_dropped = tx_dropped; 1183 1184 rx_bad_ip_csum = 1185 ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum; 1186 rx_bad_ip_csum = (uint64_t) (rx_bad_ip_csum + 1187 fwd_streams[sm_id]->rx_bad_ip_csum); 1188 ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum = 1189 rx_bad_ip_csum; 1190 1191 rx_bad_l4_csum = 1192 ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum; 1193 rx_bad_l4_csum = (uint64_t) (rx_bad_l4_csum + 1194 fwd_streams[sm_id]->rx_bad_l4_csum); 1195 ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum = 1196 rx_bad_l4_csum; 1197 1198 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES 1199 fwd_cycles = (uint64_t) (fwd_cycles + 1200 fwd_streams[sm_id]->core_cycles); 1201 #endif 1202 } 1203 total_recv = 0; 1204 total_xmit = 0; 1205 total_rx_dropped = 0; 1206 total_tx_dropped = 0; 1207 total_rx_nombuf = 0; 1208 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) { 1209 pt_id = fwd_ports_ids[i]; 1210 1211 port = &ports[pt_id]; 1212 rte_eth_stats_get(pt_id, &stats); 1213 stats.ipackets -= port->stats.ipackets; 1214 port->stats.ipackets = 0; 1215 stats.opackets -= port->stats.opackets; 1216 port->stats.opackets = 0; 1217 stats.ibytes -= port->stats.ibytes; 1218 port->stats.ibytes = 0; 1219 stats.obytes -= port->stats.obytes; 1220 port->stats.obytes = 0; 1221 stats.imissed -= port->stats.imissed; 1222 port->stats.imissed = 0; 1223 stats.oerrors -= port->stats.oerrors; 1224 port->stats.oerrors = 0; 1225 stats.rx_nombuf -= port->stats.rx_nombuf; 1226 port->stats.rx_nombuf = 0; 1227 1228 total_recv += stats.ipackets; 1229 total_xmit += stats.opackets; 1230 total_rx_dropped += stats.imissed; 1231 total_tx_dropped += port->tx_dropped; 1232 total_rx_nombuf += stats.rx_nombuf; 1233 1234 fwd_port_stats_display(pt_id, &stats); 1235 } 1236 printf("\n %s Accumulated forward statistics for all ports" 1237 "%s\n", 1238 acc_stats_border, acc_stats_border); 1239 printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: " 1240 "%-"PRIu64"\n" 1241 " TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: " 1242 "%-"PRIu64"\n", 1243 total_recv, total_rx_dropped, total_recv + total_rx_dropped, 1244 total_xmit, total_tx_dropped, total_xmit + total_tx_dropped); 1245 if (total_rx_nombuf > 0) 1246 printf(" RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf); 1247 printf(" %s++++++++++++++++++++++++++++++++++++++++++++++" 1248 "%s\n", 1249 acc_stats_border, acc_stats_border); 1250 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES 1251 if (total_recv > 0) 1252 printf("\n CPU cycles/packet=%u (total cycles=" 1253 "%"PRIu64" / total RX packets=%"PRIu64")\n", 1254 (unsigned int)(fwd_cycles / total_recv), 1255 fwd_cycles, total_recv); 1256 #endif 1257 printf("\nDone.\n"); 1258 test_done = 1; 1259 } 1260 1261 void 1262 dev_set_link_up(portid_t pid) 1263 { 1264 if (rte_eth_dev_set_link_up((uint8_t)pid) < 0) 1265 printf("\nSet link up fail.\n"); 1266 } 1267 1268 void 1269 dev_set_link_down(portid_t pid) 1270 { 1271 if (rte_eth_dev_set_link_down((uint8_t)pid) < 0) 1272 printf("\nSet link down fail.\n"); 1273 } 1274 1275 static int 1276 all_ports_started(void) 1277 { 1278 portid_t pi; 1279 struct rte_port *port; 1280 1281 FOREACH_PORT(pi, ports) { 1282 port = &ports[pi]; 1283 /* Check if there is a port which is not started */ 1284 if ((port->port_status != RTE_PORT_STARTED) && 1285 (port->slave_flag == 0)) 1286 return 0; 1287 } 1288 1289 /* No port is not started */ 1290 return 1; 1291 } 1292 1293 int 1294 all_ports_stopped(void) 1295 { 1296 portid_t pi; 1297 struct rte_port *port; 1298 1299 FOREACH_PORT(pi, ports) { 1300 port = &ports[pi]; 1301 if ((port->port_status != RTE_PORT_STOPPED) && 1302 (port->slave_flag == 0)) 1303 return 0; 1304 } 1305 1306 return 1; 1307 } 1308 1309 int 1310 port_is_started(portid_t port_id) 1311 { 1312 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1313 return 0; 1314 1315 if (ports[port_id].port_status != RTE_PORT_STARTED) 1316 return 0; 1317 1318 return 1; 1319 } 1320 1321 static int 1322 port_is_closed(portid_t port_id) 1323 { 1324 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1325 return 0; 1326 1327 if (ports[port_id].port_status != RTE_PORT_CLOSED) 1328 return 0; 1329 1330 return 1; 1331 } 1332 1333 int 1334 start_port(portid_t pid) 1335 { 1336 int diag, need_check_link_status = -1; 1337 portid_t pi; 1338 queueid_t qi; 1339 struct rte_port *port; 1340 struct ether_addr mac_addr; 1341 1342 if (port_id_is_invalid(pid, ENABLED_WARN)) 1343 return 0; 1344 1345 if(dcb_config) 1346 dcb_test = 1; 1347 FOREACH_PORT(pi, ports) { 1348 if (pid != pi && pid != (portid_t)RTE_PORT_ALL) 1349 continue; 1350 1351 need_check_link_status = 0; 1352 port = &ports[pi]; 1353 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED, 1354 RTE_PORT_HANDLING) == 0) { 1355 printf("Port %d is now not stopped\n", pi); 1356 continue; 1357 } 1358 1359 if (port->need_reconfig > 0) { 1360 port->need_reconfig = 0; 1361 1362 printf("Configuring Port %d (socket %u)\n", pi, 1363 port->socket_id); 1364 /* configure port */ 1365 diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq, 1366 &(port->dev_conf)); 1367 if (diag != 0) { 1368 if (rte_atomic16_cmpset(&(port->port_status), 1369 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0) 1370 printf("Port %d can not be set back " 1371 "to stopped\n", pi); 1372 printf("Fail to configure port %d\n", pi); 1373 /* try to reconfigure port next time */ 1374 port->need_reconfig = 1; 1375 return -1; 1376 } 1377 } 1378 if (port->need_reconfig_queues > 0) { 1379 port->need_reconfig_queues = 0; 1380 /* setup tx queues */ 1381 for (qi = 0; qi < nb_txq; qi++) { 1382 if ((numa_support) && 1383 (txring_numa[pi] != NUMA_NO_CONFIG)) 1384 diag = rte_eth_tx_queue_setup(pi, qi, 1385 nb_txd,txring_numa[pi], 1386 &(port->tx_conf)); 1387 else 1388 diag = rte_eth_tx_queue_setup(pi, qi, 1389 nb_txd,port->socket_id, 1390 &(port->tx_conf)); 1391 1392 if (diag == 0) 1393 continue; 1394 1395 /* Fail to setup tx queue, return */ 1396 if (rte_atomic16_cmpset(&(port->port_status), 1397 RTE_PORT_HANDLING, 1398 RTE_PORT_STOPPED) == 0) 1399 printf("Port %d can not be set back " 1400 "to stopped\n", pi); 1401 printf("Fail to configure port %d tx queues\n", pi); 1402 /* try to reconfigure queues next time */ 1403 port->need_reconfig_queues = 1; 1404 return -1; 1405 } 1406 /* setup rx queues */ 1407 for (qi = 0; qi < nb_rxq; qi++) { 1408 if ((numa_support) && 1409 (rxring_numa[pi] != NUMA_NO_CONFIG)) { 1410 struct rte_mempool * mp = 1411 mbuf_pool_find(rxring_numa[pi]); 1412 if (mp == NULL) { 1413 printf("Failed to setup RX queue:" 1414 "No mempool allocation" 1415 " on the socket %d\n", 1416 rxring_numa[pi]); 1417 return -1; 1418 } 1419 1420 diag = rte_eth_rx_queue_setup(pi, qi, 1421 nb_rxd,rxring_numa[pi], 1422 &(port->rx_conf),mp); 1423 } else { 1424 struct rte_mempool *mp = 1425 mbuf_pool_find(port->socket_id); 1426 if (mp == NULL) { 1427 printf("Failed to setup RX queue:" 1428 "No mempool allocation" 1429 " on the socket %d\n", 1430 port->socket_id); 1431 return -1; 1432 } 1433 diag = rte_eth_rx_queue_setup(pi, qi, 1434 nb_rxd,port->socket_id, 1435 &(port->rx_conf), mp); 1436 } 1437 if (diag == 0) 1438 continue; 1439 1440 /* Fail to setup rx queue, return */ 1441 if (rte_atomic16_cmpset(&(port->port_status), 1442 RTE_PORT_HANDLING, 1443 RTE_PORT_STOPPED) == 0) 1444 printf("Port %d can not be set back " 1445 "to stopped\n", pi); 1446 printf("Fail to configure port %d rx queues\n", pi); 1447 /* try to reconfigure queues next time */ 1448 port->need_reconfig_queues = 1; 1449 return -1; 1450 } 1451 } 1452 /* start port */ 1453 if (rte_eth_dev_start(pi) < 0) { 1454 printf("Fail to start port %d\n", pi); 1455 1456 /* Fail to setup rx queue, return */ 1457 if (rte_atomic16_cmpset(&(port->port_status), 1458 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0) 1459 printf("Port %d can not be set back to " 1460 "stopped\n", pi); 1461 continue; 1462 } 1463 1464 if (rte_atomic16_cmpset(&(port->port_status), 1465 RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0) 1466 printf("Port %d can not be set into started\n", pi); 1467 1468 rte_eth_macaddr_get(pi, &mac_addr); 1469 printf("Port %d: %02X:%02X:%02X:%02X:%02X:%02X\n", pi, 1470 mac_addr.addr_bytes[0], mac_addr.addr_bytes[1], 1471 mac_addr.addr_bytes[2], mac_addr.addr_bytes[3], 1472 mac_addr.addr_bytes[4], mac_addr.addr_bytes[5]); 1473 1474 /* at least one port started, need checking link status */ 1475 need_check_link_status = 1; 1476 } 1477 1478 if (need_check_link_status == 1 && !no_link_check) 1479 check_all_ports_link_status(RTE_PORT_ALL); 1480 else if (need_check_link_status == 0) 1481 printf("Please stop the ports first\n"); 1482 1483 printf("Done\n"); 1484 return 0; 1485 } 1486 1487 void 1488 stop_port(portid_t pid) 1489 { 1490 portid_t pi; 1491 struct rte_port *port; 1492 int need_check_link_status = 0; 1493 1494 if (dcb_test) { 1495 dcb_test = 0; 1496 dcb_config = 0; 1497 } 1498 1499 if (port_id_is_invalid(pid, ENABLED_WARN)) 1500 return; 1501 1502 printf("Stopping ports...\n"); 1503 1504 FOREACH_PORT(pi, ports) { 1505 if (pid != pi && pid != (portid_t)RTE_PORT_ALL) 1506 continue; 1507 1508 if (port_is_forwarding(pi) != 0 && test_done == 0) { 1509 printf("Please remove port %d from forwarding configuration.\n", pi); 1510 continue; 1511 } 1512 1513 if (port_is_bonding_slave(pi)) { 1514 printf("Please remove port %d from bonded device.\n", pi); 1515 continue; 1516 } 1517 1518 port = &ports[pi]; 1519 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED, 1520 RTE_PORT_HANDLING) == 0) 1521 continue; 1522 1523 rte_eth_dev_stop(pi); 1524 1525 if (rte_atomic16_cmpset(&(port->port_status), 1526 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0) 1527 printf("Port %d can not be set into stopped\n", pi); 1528 need_check_link_status = 1; 1529 } 1530 if (need_check_link_status && !no_link_check) 1531 check_all_ports_link_status(RTE_PORT_ALL); 1532 1533 printf("Done\n"); 1534 } 1535 1536 void 1537 close_port(portid_t pid) 1538 { 1539 portid_t pi; 1540 struct rte_port *port; 1541 1542 if (port_id_is_invalid(pid, ENABLED_WARN)) 1543 return; 1544 1545 printf("Closing ports...\n"); 1546 1547 FOREACH_PORT(pi, ports) { 1548 if (pid != pi && pid != (portid_t)RTE_PORT_ALL) 1549 continue; 1550 1551 if (port_is_forwarding(pi) != 0 && test_done == 0) { 1552 printf("Please remove port %d from forwarding configuration.\n", pi); 1553 continue; 1554 } 1555 1556 if (port_is_bonding_slave(pi)) { 1557 printf("Please remove port %d from bonded device.\n", pi); 1558 continue; 1559 } 1560 1561 port = &ports[pi]; 1562 if (rte_atomic16_cmpset(&(port->port_status), 1563 RTE_PORT_CLOSED, RTE_PORT_CLOSED) == 1) { 1564 printf("Port %d is already closed\n", pi); 1565 continue; 1566 } 1567 1568 if (rte_atomic16_cmpset(&(port->port_status), 1569 RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) { 1570 printf("Port %d is now not stopped\n", pi); 1571 continue; 1572 } 1573 1574 if (port->flow_list) 1575 port_flow_flush(pi); 1576 rte_eth_dev_close(pi); 1577 1578 if (rte_atomic16_cmpset(&(port->port_status), 1579 RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0) 1580 printf("Port %d cannot be set to closed\n", pi); 1581 } 1582 1583 printf("Done\n"); 1584 } 1585 1586 void 1587 attach_port(char *identifier) 1588 { 1589 portid_t pi = 0; 1590 unsigned int socket_id; 1591 1592 printf("Attaching a new port...\n"); 1593 1594 if (identifier == NULL) { 1595 printf("Invalid parameters are specified\n"); 1596 return; 1597 } 1598 1599 if (rte_eth_dev_attach(identifier, &pi)) 1600 return; 1601 1602 ports[pi].enabled = 1; 1603 socket_id = (unsigned)rte_eth_dev_socket_id(pi); 1604 /* if socket_id is invalid, set to 0 */ 1605 if (check_socket_id(socket_id) < 0) 1606 socket_id = 0; 1607 reconfig(pi, socket_id); 1608 rte_eth_promiscuous_enable(pi); 1609 1610 nb_ports = rte_eth_dev_count(); 1611 1612 ports[pi].port_status = RTE_PORT_STOPPED; 1613 1614 printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports); 1615 printf("Done\n"); 1616 } 1617 1618 void 1619 detach_port(uint8_t port_id) 1620 { 1621 char name[RTE_ETH_NAME_MAX_LEN]; 1622 1623 printf("Detaching a port...\n"); 1624 1625 if (!port_is_closed(port_id)) { 1626 printf("Please close port first\n"); 1627 return; 1628 } 1629 1630 if (ports[port_id].flow_list) 1631 port_flow_flush(port_id); 1632 1633 if (rte_eth_dev_detach(port_id, name)) 1634 return; 1635 1636 ports[port_id].enabled = 0; 1637 nb_ports = rte_eth_dev_count(); 1638 1639 printf("Port '%s' is detached. Now total ports is %d\n", 1640 name, nb_ports); 1641 printf("Done\n"); 1642 return; 1643 } 1644 1645 void 1646 pmd_test_exit(void) 1647 { 1648 portid_t pt_id; 1649 1650 if (test_done == 0) 1651 stop_packet_forwarding(); 1652 1653 if (ports != NULL) { 1654 no_link_check = 1; 1655 FOREACH_PORT(pt_id, ports) { 1656 printf("\nShutting down port %d...\n", pt_id); 1657 fflush(stdout); 1658 stop_port(pt_id); 1659 close_port(pt_id); 1660 } 1661 } 1662 printf("\nBye...\n"); 1663 } 1664 1665 typedef void (*cmd_func_t)(void); 1666 struct pmd_test_command { 1667 const char *cmd_name; 1668 cmd_func_t cmd_func; 1669 }; 1670 1671 #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0])) 1672 1673 /* Check the link status of all ports in up to 9s, and print them finally */ 1674 static void 1675 check_all_ports_link_status(uint32_t port_mask) 1676 { 1677 #define CHECK_INTERVAL 100 /* 100ms */ 1678 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */ 1679 uint8_t portid, count, all_ports_up, print_flag = 0; 1680 struct rte_eth_link link; 1681 1682 printf("Checking link statuses...\n"); 1683 fflush(stdout); 1684 for (count = 0; count <= MAX_CHECK_TIME; count++) { 1685 all_ports_up = 1; 1686 FOREACH_PORT(portid, ports) { 1687 if ((port_mask & (1 << portid)) == 0) 1688 continue; 1689 memset(&link, 0, sizeof(link)); 1690 rte_eth_link_get_nowait(portid, &link); 1691 /* print link status if flag set */ 1692 if (print_flag == 1) { 1693 if (link.link_status) 1694 printf("Port %d Link Up - speed %u " 1695 "Mbps - %s\n", (uint8_t)portid, 1696 (unsigned)link.link_speed, 1697 (link.link_duplex == ETH_LINK_FULL_DUPLEX) ? 1698 ("full-duplex") : ("half-duplex\n")); 1699 else 1700 printf("Port %d Link Down\n", 1701 (uint8_t)portid); 1702 continue; 1703 } 1704 /* clear all_ports_up flag if any link down */ 1705 if (link.link_status == ETH_LINK_DOWN) { 1706 all_ports_up = 0; 1707 break; 1708 } 1709 } 1710 /* after finally printing all link status, get out */ 1711 if (print_flag == 1) 1712 break; 1713 1714 if (all_ports_up == 0) { 1715 fflush(stdout); 1716 rte_delay_ms(CHECK_INTERVAL); 1717 } 1718 1719 /* set the print_flag if all ports up or timeout */ 1720 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) { 1721 print_flag = 1; 1722 } 1723 } 1724 } 1725 1726 static int 1727 set_tx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port) 1728 { 1729 uint16_t i; 1730 int diag; 1731 uint8_t mapping_found = 0; 1732 1733 for (i = 0; i < nb_tx_queue_stats_mappings; i++) { 1734 if ((tx_queue_stats_mappings[i].port_id == port_id) && 1735 (tx_queue_stats_mappings[i].queue_id < nb_txq )) { 1736 diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id, 1737 tx_queue_stats_mappings[i].queue_id, 1738 tx_queue_stats_mappings[i].stats_counter_id); 1739 if (diag != 0) 1740 return diag; 1741 mapping_found = 1; 1742 } 1743 } 1744 if (mapping_found) 1745 port->tx_queue_stats_mapping_enabled = 1; 1746 return 0; 1747 } 1748 1749 static int 1750 set_rx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port) 1751 { 1752 uint16_t i; 1753 int diag; 1754 uint8_t mapping_found = 0; 1755 1756 for (i = 0; i < nb_rx_queue_stats_mappings; i++) { 1757 if ((rx_queue_stats_mappings[i].port_id == port_id) && 1758 (rx_queue_stats_mappings[i].queue_id < nb_rxq )) { 1759 diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id, 1760 rx_queue_stats_mappings[i].queue_id, 1761 rx_queue_stats_mappings[i].stats_counter_id); 1762 if (diag != 0) 1763 return diag; 1764 mapping_found = 1; 1765 } 1766 } 1767 if (mapping_found) 1768 port->rx_queue_stats_mapping_enabled = 1; 1769 return 0; 1770 } 1771 1772 static void 1773 map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port) 1774 { 1775 int diag = 0; 1776 1777 diag = set_tx_queue_stats_mapping_registers(pi, port); 1778 if (diag != 0) { 1779 if (diag == -ENOTSUP) { 1780 port->tx_queue_stats_mapping_enabled = 0; 1781 printf("TX queue stats mapping not supported port id=%d\n", pi); 1782 } 1783 else 1784 rte_exit(EXIT_FAILURE, 1785 "set_tx_queue_stats_mapping_registers " 1786 "failed for port id=%d diag=%d\n", 1787 pi, diag); 1788 } 1789 1790 diag = set_rx_queue_stats_mapping_registers(pi, port); 1791 if (diag != 0) { 1792 if (diag == -ENOTSUP) { 1793 port->rx_queue_stats_mapping_enabled = 0; 1794 printf("RX queue stats mapping not supported port id=%d\n", pi); 1795 } 1796 else 1797 rte_exit(EXIT_FAILURE, 1798 "set_rx_queue_stats_mapping_registers " 1799 "failed for port id=%d diag=%d\n", 1800 pi, diag); 1801 } 1802 } 1803 1804 static void 1805 rxtx_port_config(struct rte_port *port) 1806 { 1807 port->rx_conf = port->dev_info.default_rxconf; 1808 port->tx_conf = port->dev_info.default_txconf; 1809 1810 /* Check if any RX/TX parameters have been passed */ 1811 if (rx_pthresh != RTE_PMD_PARAM_UNSET) 1812 port->rx_conf.rx_thresh.pthresh = rx_pthresh; 1813 1814 if (rx_hthresh != RTE_PMD_PARAM_UNSET) 1815 port->rx_conf.rx_thresh.hthresh = rx_hthresh; 1816 1817 if (rx_wthresh != RTE_PMD_PARAM_UNSET) 1818 port->rx_conf.rx_thresh.wthresh = rx_wthresh; 1819 1820 if (rx_free_thresh != RTE_PMD_PARAM_UNSET) 1821 port->rx_conf.rx_free_thresh = rx_free_thresh; 1822 1823 if (rx_drop_en != RTE_PMD_PARAM_UNSET) 1824 port->rx_conf.rx_drop_en = rx_drop_en; 1825 1826 if (tx_pthresh != RTE_PMD_PARAM_UNSET) 1827 port->tx_conf.tx_thresh.pthresh = tx_pthresh; 1828 1829 if (tx_hthresh != RTE_PMD_PARAM_UNSET) 1830 port->tx_conf.tx_thresh.hthresh = tx_hthresh; 1831 1832 if (tx_wthresh != RTE_PMD_PARAM_UNSET) 1833 port->tx_conf.tx_thresh.wthresh = tx_wthresh; 1834 1835 if (tx_rs_thresh != RTE_PMD_PARAM_UNSET) 1836 port->tx_conf.tx_rs_thresh = tx_rs_thresh; 1837 1838 if (tx_free_thresh != RTE_PMD_PARAM_UNSET) 1839 port->tx_conf.tx_free_thresh = tx_free_thresh; 1840 1841 if (txq_flags != RTE_PMD_PARAM_UNSET) 1842 port->tx_conf.txq_flags = txq_flags; 1843 } 1844 1845 void 1846 init_port_config(void) 1847 { 1848 portid_t pid; 1849 struct rte_port *port; 1850 1851 FOREACH_PORT(pid, ports) { 1852 port = &ports[pid]; 1853 port->dev_conf.rxmode = rx_mode; 1854 port->dev_conf.fdir_conf = fdir_conf; 1855 if (nb_rxq > 1) { 1856 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL; 1857 port->dev_conf.rx_adv_conf.rss_conf.rss_hf = rss_hf; 1858 } else { 1859 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL; 1860 port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0; 1861 } 1862 1863 if (port->dcb_flag == 0) { 1864 if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0) 1865 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_RSS; 1866 else 1867 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE; 1868 } 1869 1870 rxtx_port_config(port); 1871 1872 rte_eth_macaddr_get(pid, &port->eth_addr); 1873 1874 map_port_queue_stats_mapping_registers(pid, port); 1875 #ifdef RTE_NIC_BYPASS 1876 rte_eth_dev_bypass_init(pid); 1877 #endif 1878 } 1879 } 1880 1881 void set_port_slave_flag(portid_t slave_pid) 1882 { 1883 struct rte_port *port; 1884 1885 port = &ports[slave_pid]; 1886 port->slave_flag = 1; 1887 } 1888 1889 void clear_port_slave_flag(portid_t slave_pid) 1890 { 1891 struct rte_port *port; 1892 1893 port = &ports[slave_pid]; 1894 port->slave_flag = 0; 1895 } 1896 1897 uint8_t port_is_bonding_slave(portid_t slave_pid) 1898 { 1899 struct rte_port *port; 1900 1901 port = &ports[slave_pid]; 1902 return port->slave_flag; 1903 } 1904 1905 const uint16_t vlan_tags[] = { 1906 0, 1, 2, 3, 4, 5, 6, 7, 1907 8, 9, 10, 11, 12, 13, 14, 15, 1908 16, 17, 18, 19, 20, 21, 22, 23, 1909 24, 25, 26, 27, 28, 29, 30, 31 1910 }; 1911 1912 static int 1913 get_eth_dcb_conf(struct rte_eth_conf *eth_conf, 1914 enum dcb_mode_enable dcb_mode, 1915 enum rte_eth_nb_tcs num_tcs, 1916 uint8_t pfc_en) 1917 { 1918 uint8_t i; 1919 1920 /* 1921 * Builds up the correct configuration for dcb+vt based on the vlan tags array 1922 * given above, and the number of traffic classes available for use. 1923 */ 1924 if (dcb_mode == DCB_VT_ENABLED) { 1925 struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf = 1926 ð_conf->rx_adv_conf.vmdq_dcb_conf; 1927 struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf = 1928 ð_conf->tx_adv_conf.vmdq_dcb_tx_conf; 1929 1930 /* VMDQ+DCB RX and TX configurations */ 1931 vmdq_rx_conf->enable_default_pool = 0; 1932 vmdq_rx_conf->default_pool = 0; 1933 vmdq_rx_conf->nb_queue_pools = 1934 (num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS); 1935 vmdq_tx_conf->nb_queue_pools = 1936 (num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS); 1937 1938 vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools; 1939 for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) { 1940 vmdq_rx_conf->pool_map[i].vlan_id = vlan_tags[i]; 1941 vmdq_rx_conf->pool_map[i].pools = 1942 1 << (i % vmdq_rx_conf->nb_queue_pools); 1943 } 1944 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) { 1945 vmdq_rx_conf->dcb_tc[i] = i; 1946 vmdq_tx_conf->dcb_tc[i] = i; 1947 } 1948 1949 /* set DCB mode of RX and TX of multiple queues */ 1950 eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB; 1951 eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB; 1952 } else { 1953 struct rte_eth_dcb_rx_conf *rx_conf = 1954 ð_conf->rx_adv_conf.dcb_rx_conf; 1955 struct rte_eth_dcb_tx_conf *tx_conf = 1956 ð_conf->tx_adv_conf.dcb_tx_conf; 1957 1958 rx_conf->nb_tcs = num_tcs; 1959 tx_conf->nb_tcs = num_tcs; 1960 1961 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) { 1962 rx_conf->dcb_tc[i] = i % num_tcs; 1963 tx_conf->dcb_tc[i] = i % num_tcs; 1964 } 1965 eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB_RSS; 1966 eth_conf->rx_adv_conf.rss_conf.rss_hf = rss_hf; 1967 eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB; 1968 } 1969 1970 if (pfc_en) 1971 eth_conf->dcb_capability_en = 1972 ETH_DCB_PG_SUPPORT | ETH_DCB_PFC_SUPPORT; 1973 else 1974 eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT; 1975 1976 return 0; 1977 } 1978 1979 int 1980 init_port_dcb_config(portid_t pid, 1981 enum dcb_mode_enable dcb_mode, 1982 enum rte_eth_nb_tcs num_tcs, 1983 uint8_t pfc_en) 1984 { 1985 struct rte_eth_conf port_conf; 1986 struct rte_port *rte_port; 1987 int retval; 1988 uint16_t i; 1989 1990 rte_port = &ports[pid]; 1991 1992 memset(&port_conf, 0, sizeof(struct rte_eth_conf)); 1993 /* Enter DCB configuration status */ 1994 dcb_config = 1; 1995 1996 /*set configuration of DCB in vt mode and DCB in non-vt mode*/ 1997 retval = get_eth_dcb_conf(&port_conf, dcb_mode, num_tcs, pfc_en); 1998 if (retval < 0) 1999 return retval; 2000 port_conf.rxmode.hw_vlan_filter = 1; 2001 2002 /** 2003 * Write the configuration into the device. 2004 * Set the numbers of RX & TX queues to 0, so 2005 * the RX & TX queues will not be setup. 2006 */ 2007 (void)rte_eth_dev_configure(pid, 0, 0, &port_conf); 2008 2009 rte_eth_dev_info_get(pid, &rte_port->dev_info); 2010 2011 /* If dev_info.vmdq_pool_base is greater than 0, 2012 * the queue id of vmdq pools is started after pf queues. 2013 */ 2014 if (dcb_mode == DCB_VT_ENABLED && 2015 rte_port->dev_info.vmdq_pool_base > 0) { 2016 printf("VMDQ_DCB multi-queue mode is nonsensical" 2017 " for port %d.", pid); 2018 return -1; 2019 } 2020 2021 /* Assume the ports in testpmd have the same dcb capability 2022 * and has the same number of rxq and txq in dcb mode 2023 */ 2024 if (dcb_mode == DCB_VT_ENABLED) { 2025 if (rte_port->dev_info.max_vfs > 0) { 2026 nb_rxq = rte_port->dev_info.nb_rx_queues; 2027 nb_txq = rte_port->dev_info.nb_tx_queues; 2028 } else { 2029 nb_rxq = rte_port->dev_info.max_rx_queues; 2030 nb_txq = rte_port->dev_info.max_tx_queues; 2031 } 2032 } else { 2033 /*if vt is disabled, use all pf queues */ 2034 if (rte_port->dev_info.vmdq_pool_base == 0) { 2035 nb_rxq = rte_port->dev_info.max_rx_queues; 2036 nb_txq = rte_port->dev_info.max_tx_queues; 2037 } else { 2038 nb_rxq = (queueid_t)num_tcs; 2039 nb_txq = (queueid_t)num_tcs; 2040 2041 } 2042 } 2043 rx_free_thresh = 64; 2044 2045 memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf)); 2046 2047 rxtx_port_config(rte_port); 2048 /* VLAN filter */ 2049 rte_port->dev_conf.rxmode.hw_vlan_filter = 1; 2050 for (i = 0; i < RTE_DIM(vlan_tags); i++) 2051 rx_vft_set(pid, vlan_tags[i], 1); 2052 2053 rte_eth_macaddr_get(pid, &rte_port->eth_addr); 2054 map_port_queue_stats_mapping_registers(pid, rte_port); 2055 2056 rte_port->dcb_flag = 1; 2057 2058 return 0; 2059 } 2060 2061 static void 2062 init_port(void) 2063 { 2064 portid_t pid; 2065 2066 /* Configuration of Ethernet ports. */ 2067 ports = rte_zmalloc("testpmd: ports", 2068 sizeof(struct rte_port) * RTE_MAX_ETHPORTS, 2069 RTE_CACHE_LINE_SIZE); 2070 if (ports == NULL) { 2071 rte_exit(EXIT_FAILURE, 2072 "rte_zmalloc(%d struct rte_port) failed\n", 2073 RTE_MAX_ETHPORTS); 2074 } 2075 2076 /* enabled allocated ports */ 2077 for (pid = 0; pid < nb_ports; pid++) 2078 ports[pid].enabled = 1; 2079 } 2080 2081 static void 2082 force_quit(void) 2083 { 2084 pmd_test_exit(); 2085 prompt_exit(); 2086 } 2087 2088 static void 2089 signal_handler(int signum) 2090 { 2091 if (signum == SIGINT || signum == SIGTERM) { 2092 printf("\nSignal %d received, preparing to exit...\n", 2093 signum); 2094 #ifdef RTE_LIBRTE_PDUMP 2095 /* uninitialize packet capture framework */ 2096 rte_pdump_uninit(); 2097 #endif 2098 force_quit(); 2099 /* exit with the expected status */ 2100 signal(signum, SIG_DFL); 2101 kill(getpid(), signum); 2102 } 2103 } 2104 2105 int 2106 main(int argc, char** argv) 2107 { 2108 int diag; 2109 uint8_t port_id; 2110 2111 signal(SIGINT, signal_handler); 2112 signal(SIGTERM, signal_handler); 2113 2114 diag = rte_eal_init(argc, argv); 2115 if (diag < 0) 2116 rte_panic("Cannot init EAL\n"); 2117 2118 #ifdef RTE_LIBRTE_PDUMP 2119 /* initialize packet capture framework */ 2120 rte_pdump_init(NULL); 2121 #endif 2122 2123 nb_ports = (portid_t) rte_eth_dev_count(); 2124 if (nb_ports == 0) 2125 RTE_LOG(WARNING, EAL, "No probed ethernet devices\n"); 2126 2127 /* allocate port structures, and init them */ 2128 init_port(); 2129 2130 set_def_fwd_config(); 2131 if (nb_lcores == 0) 2132 rte_panic("Empty set of forwarding logical cores - check the " 2133 "core mask supplied in the command parameters\n"); 2134 2135 argc -= diag; 2136 argv += diag; 2137 if (argc > 1) 2138 launch_args_parse(argc, argv); 2139 2140 if (!nb_rxq && !nb_txq) 2141 printf("Warning: Either rx or tx queues should be non-zero\n"); 2142 2143 if (nb_rxq > 1 && nb_rxq > nb_txq) 2144 printf("Warning: nb_rxq=%d enables RSS configuration, " 2145 "but nb_txq=%d will prevent to fully test it.\n", 2146 nb_rxq, nb_txq); 2147 2148 init_config(); 2149 if (start_port(RTE_PORT_ALL) != 0) 2150 rte_exit(EXIT_FAILURE, "Start ports failed\n"); 2151 2152 /* set all ports to promiscuous mode by default */ 2153 FOREACH_PORT(port_id, ports) 2154 rte_eth_promiscuous_enable(port_id); 2155 2156 /* Init metrics library */ 2157 rte_metrics_init(rte_socket_id()); 2158 2159 /* Setup bitrate stats */ 2160 #ifdef RTE_LIBRTE_BITRATE 2161 bitrate_data = rte_stats_bitrate_create(); 2162 if (bitrate_data == NULL) 2163 rte_exit(EXIT_FAILURE, "Could not allocate bitrate data.\n"); 2164 rte_stats_bitrate_reg(bitrate_data); 2165 #endif 2166 2167 2168 #ifdef RTE_LIBRTE_CMDLINE 2169 if (interactive == 1) { 2170 if (auto_start) { 2171 printf("Start automatic packet forwarding\n"); 2172 start_packet_forwarding(0); 2173 } 2174 prompt(); 2175 } else 2176 #endif 2177 { 2178 char c; 2179 int rc; 2180 2181 printf("No commandline core given, start packet forwarding\n"); 2182 start_packet_forwarding(0); 2183 printf("Press enter to exit\n"); 2184 rc = read(0, &c, 1); 2185 pmd_test_exit(); 2186 if (rc < 0) 2187 return 1; 2188 } 2189 2190 return 0; 2191 } 2192