1 /*- 2 * BSD LICENSE 3 * 4 * Copyright(c) 2010-2016 Intel Corporation. All rights reserved. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of Intel Corporation nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include <stdarg.h> 35 #include <stdio.h> 36 #include <stdlib.h> 37 #include <signal.h> 38 #include <string.h> 39 #include <time.h> 40 #include <fcntl.h> 41 #include <sys/types.h> 42 #include <errno.h> 43 44 #include <sys/queue.h> 45 #include <sys/stat.h> 46 47 #include <stdint.h> 48 #include <unistd.h> 49 #include <inttypes.h> 50 51 #include <rte_common.h> 52 #include <rte_errno.h> 53 #include <rte_byteorder.h> 54 #include <rte_log.h> 55 #include <rte_debug.h> 56 #include <rte_cycles.h> 57 #include <rte_memory.h> 58 #include <rte_memcpy.h> 59 #include <rte_memzone.h> 60 #include <rte_launch.h> 61 #include <rte_eal.h> 62 #include <rte_per_lcore.h> 63 #include <rte_lcore.h> 64 #include <rte_atomic.h> 65 #include <rte_branch_prediction.h> 66 #include <rte_ring.h> 67 #include <rte_mempool.h> 68 #include <rte_malloc.h> 69 #include <rte_mbuf.h> 70 #include <rte_interrupts.h> 71 #include <rte_pci.h> 72 #include <rte_ether.h> 73 #include <rte_ethdev.h> 74 #include <rte_dev.h> 75 #include <rte_string_fns.h> 76 #ifdef RTE_LIBRTE_PMD_XENVIRT 77 #include <rte_eth_xenvirt.h> 78 #endif 79 80 #include "testpmd.h" 81 82 uint16_t verbose_level = 0; /**< Silent by default. */ 83 84 /* use master core for command line ? */ 85 uint8_t interactive = 0; 86 uint8_t auto_start = 0; 87 88 /* 89 * NUMA support configuration. 90 * When set, the NUMA support attempts to dispatch the allocation of the 91 * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the 92 * probed ports among the CPU sockets 0 and 1. 93 * Otherwise, all memory is allocated from CPU socket 0. 94 */ 95 uint8_t numa_support = 0; /**< No numa support by default */ 96 97 /* 98 * In UMA mode,all memory is allocated from socket 0 if --socket-num is 99 * not configured. 100 */ 101 uint8_t socket_num = UMA_NO_CONFIG; 102 103 /* 104 * Use ANONYMOUS mapped memory (might be not physically continuous) for mbufs. 105 */ 106 uint8_t mp_anon = 0; 107 108 /* 109 * Record the Ethernet address of peer target ports to which packets are 110 * forwarded. 111 * Must be instanciated with the ethernet addresses of peer traffic generator 112 * ports. 113 */ 114 struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS]; 115 portid_t nb_peer_eth_addrs = 0; 116 117 /* 118 * Probed Target Environment. 119 */ 120 struct rte_port *ports; /**< For all probed ethernet ports. */ 121 portid_t nb_ports; /**< Number of probed ethernet ports. */ 122 struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */ 123 lcoreid_t nb_lcores; /**< Number of probed logical cores. */ 124 125 /* 126 * Test Forwarding Configuration. 127 * nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores 128 * nb_fwd_ports <= nb_cfg_ports <= nb_ports 129 */ 130 lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */ 131 lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */ 132 portid_t nb_cfg_ports; /**< Number of configured ports. */ 133 portid_t nb_fwd_ports; /**< Number of forwarding ports. */ 134 135 unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */ 136 portid_t fwd_ports_ids[RTE_MAX_ETHPORTS]; /**< Port ids configuration. */ 137 138 struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */ 139 streamid_t nb_fwd_streams; /**< Is equal to (nb_ports * nb_rxq). */ 140 141 /* 142 * Forwarding engines. 143 */ 144 struct fwd_engine * fwd_engines[] = { 145 &io_fwd_engine, 146 &mac_fwd_engine, 147 &mac_retry_fwd_engine, 148 &mac_swap_engine, 149 &flow_gen_engine, 150 &rx_only_engine, 151 &tx_only_engine, 152 &csum_fwd_engine, 153 &icmp_echo_engine, 154 #ifdef RTE_LIBRTE_IEEE1588 155 &ieee1588_fwd_engine, 156 #endif 157 NULL, 158 }; 159 160 struct fwd_config cur_fwd_config; 161 struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */ 162 163 uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */ 164 uint32_t param_total_num_mbufs = 0; /**< number of mbufs in all pools - if 165 * specified on command-line. */ 166 167 /* 168 * Configuration of packet segments used by the "txonly" processing engine. 169 */ 170 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */ 171 uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = { 172 TXONLY_DEF_PACKET_LEN, 173 }; 174 uint8_t tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */ 175 176 enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF; 177 /**< Split policy for packets to TX. */ 178 179 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */ 180 uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */ 181 182 /* current configuration is in DCB or not,0 means it is not in DCB mode */ 183 uint8_t dcb_config = 0; 184 185 /* Whether the dcb is in testing status */ 186 uint8_t dcb_test = 0; 187 188 /* 189 * Configurable number of RX/TX queues. 190 */ 191 queueid_t nb_rxq = 1; /**< Number of RX queues per port. */ 192 queueid_t nb_txq = 1; /**< Number of TX queues per port. */ 193 194 /* 195 * Configurable number of RX/TX ring descriptors. 196 */ 197 #define RTE_TEST_RX_DESC_DEFAULT 128 198 #define RTE_TEST_TX_DESC_DEFAULT 512 199 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */ 200 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */ 201 202 #define RTE_PMD_PARAM_UNSET -1 203 /* 204 * Configurable values of RX and TX ring threshold registers. 205 */ 206 207 int8_t rx_pthresh = RTE_PMD_PARAM_UNSET; 208 int8_t rx_hthresh = RTE_PMD_PARAM_UNSET; 209 int8_t rx_wthresh = RTE_PMD_PARAM_UNSET; 210 211 int8_t tx_pthresh = RTE_PMD_PARAM_UNSET; 212 int8_t tx_hthresh = RTE_PMD_PARAM_UNSET; 213 int8_t tx_wthresh = RTE_PMD_PARAM_UNSET; 214 215 /* 216 * Configurable value of RX free threshold. 217 */ 218 int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET; 219 220 /* 221 * Configurable value of RX drop enable. 222 */ 223 int8_t rx_drop_en = RTE_PMD_PARAM_UNSET; 224 225 /* 226 * Configurable value of TX free threshold. 227 */ 228 int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET; 229 230 /* 231 * Configurable value of TX RS bit threshold. 232 */ 233 int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET; 234 235 /* 236 * Configurable value of TX queue flags. 237 */ 238 int32_t txq_flags = RTE_PMD_PARAM_UNSET; 239 240 /* 241 * Receive Side Scaling (RSS) configuration. 242 */ 243 uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */ 244 245 /* 246 * Port topology configuration 247 */ 248 uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */ 249 250 /* 251 * Avoids to flush all the RX streams before starts forwarding. 252 */ 253 uint8_t no_flush_rx = 0; /* flush by default */ 254 255 /* 256 * Avoids to check link status when starting/stopping a port. 257 */ 258 uint8_t no_link_check = 0; /* check by default */ 259 260 /* 261 * NIC bypass mode configuration options. 262 */ 263 #ifdef RTE_NIC_BYPASS 264 265 /* The NIC bypass watchdog timeout. */ 266 uint32_t bypass_timeout = RTE_BYPASS_TMT_OFF; 267 268 #endif 269 270 /* 271 * Ethernet device configuration. 272 */ 273 struct rte_eth_rxmode rx_mode = { 274 .max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */ 275 .split_hdr_size = 0, 276 .header_split = 0, /**< Header Split disabled. */ 277 .hw_ip_checksum = 0, /**< IP checksum offload disabled. */ 278 .hw_vlan_filter = 1, /**< VLAN filtering enabled. */ 279 .hw_vlan_strip = 1, /**< VLAN strip enabled. */ 280 .hw_vlan_extend = 0, /**< Extended VLAN disabled. */ 281 .jumbo_frame = 0, /**< Jumbo Frame Support disabled. */ 282 .hw_strip_crc = 0, /**< CRC stripping by hardware disabled. */ 283 }; 284 285 struct rte_fdir_conf fdir_conf = { 286 .mode = RTE_FDIR_MODE_NONE, 287 .pballoc = RTE_FDIR_PBALLOC_64K, 288 .status = RTE_FDIR_REPORT_STATUS, 289 .mask = { 290 .vlan_tci_mask = 0x0, 291 .ipv4_mask = { 292 .src_ip = 0xFFFFFFFF, 293 .dst_ip = 0xFFFFFFFF, 294 }, 295 .ipv6_mask = { 296 .src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF}, 297 .dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF}, 298 }, 299 .src_port_mask = 0xFFFF, 300 .dst_port_mask = 0xFFFF, 301 .mac_addr_byte_mask = 0xFF, 302 .tunnel_type_mask = 1, 303 .tunnel_id_mask = 0xFFFFFFFF, 304 }, 305 .drop_queue = 127, 306 }; 307 308 volatile int test_done = 1; /* stop packet forwarding when set to 1. */ 309 310 struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS]; 311 struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS]; 312 313 struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array; 314 struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array; 315 316 uint16_t nb_tx_queue_stats_mappings = 0; 317 uint16_t nb_rx_queue_stats_mappings = 0; 318 319 unsigned max_socket = 0; 320 321 /* Forward function declarations */ 322 static void map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port); 323 static void check_all_ports_link_status(uint32_t port_mask); 324 325 /* 326 * Check if all the ports are started. 327 * If yes, return positive value. If not, return zero. 328 */ 329 static int all_ports_started(void); 330 331 /* 332 * Find next enabled port 333 */ 334 portid_t 335 find_next_port(portid_t p, struct rte_port *ports, int size) 336 { 337 if (ports == NULL) 338 rte_exit(-EINVAL, "failed to find a next port id\n"); 339 340 while ((p < size) && (ports[p].enabled == 0)) 341 p++; 342 return p; 343 } 344 345 /* 346 * Setup default configuration. 347 */ 348 static void 349 set_default_fwd_lcores_config(void) 350 { 351 unsigned int i; 352 unsigned int nb_lc; 353 unsigned int sock_num; 354 355 nb_lc = 0; 356 for (i = 0; i < RTE_MAX_LCORE; i++) { 357 sock_num = rte_lcore_to_socket_id(i) + 1; 358 if (sock_num > max_socket) { 359 if (sock_num > RTE_MAX_NUMA_NODES) 360 rte_exit(EXIT_FAILURE, "Total sockets greater than %u\n", RTE_MAX_NUMA_NODES); 361 max_socket = sock_num; 362 } 363 if (!rte_lcore_is_enabled(i)) 364 continue; 365 if (i == rte_get_master_lcore()) 366 continue; 367 fwd_lcores_cpuids[nb_lc++] = i; 368 } 369 nb_lcores = (lcoreid_t) nb_lc; 370 nb_cfg_lcores = nb_lcores; 371 nb_fwd_lcores = 1; 372 } 373 374 static void 375 set_def_peer_eth_addrs(void) 376 { 377 portid_t i; 378 379 for (i = 0; i < RTE_MAX_ETHPORTS; i++) { 380 peer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR; 381 peer_eth_addrs[i].addr_bytes[5] = i; 382 } 383 } 384 385 static void 386 set_default_fwd_ports_config(void) 387 { 388 portid_t pt_id; 389 390 for (pt_id = 0; pt_id < nb_ports; pt_id++) 391 fwd_ports_ids[pt_id] = pt_id; 392 393 nb_cfg_ports = nb_ports; 394 nb_fwd_ports = nb_ports; 395 } 396 397 void 398 set_def_fwd_config(void) 399 { 400 set_default_fwd_lcores_config(); 401 set_def_peer_eth_addrs(); 402 set_default_fwd_ports_config(); 403 } 404 405 /* 406 * Configuration initialisation done once at init time. 407 */ 408 static void 409 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf, 410 unsigned int socket_id) 411 { 412 char pool_name[RTE_MEMPOOL_NAMESIZE]; 413 struct rte_mempool *rte_mp = NULL; 414 uint32_t mb_size; 415 416 mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size; 417 mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name)); 418 419 RTE_LOG(INFO, USER1, 420 "create a new mbuf pool <%s>: n=%u, size=%u, socket=%u\n", 421 pool_name, nb_mbuf, mbuf_seg_size, socket_id); 422 423 #ifdef RTE_LIBRTE_PMD_XENVIRT 424 rte_mp = rte_mempool_gntalloc_create(pool_name, nb_mbuf, mb_size, 425 (unsigned) mb_mempool_cache, 426 sizeof(struct rte_pktmbuf_pool_private), 427 rte_pktmbuf_pool_init, NULL, 428 rte_pktmbuf_init, NULL, 429 socket_id, 0); 430 #endif 431 432 /* if the former XEN allocation failed fall back to normal allocation */ 433 if (rte_mp == NULL) { 434 if (mp_anon != 0) { 435 rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf, 436 mb_size, (unsigned) mb_mempool_cache, 437 sizeof(struct rte_pktmbuf_pool_private), 438 socket_id, 0); 439 440 if (rte_mempool_populate_anon(rte_mp) == 0) { 441 rte_mempool_free(rte_mp); 442 rte_mp = NULL; 443 } 444 rte_pktmbuf_pool_init(rte_mp, NULL); 445 rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL); 446 } else { 447 /* wrapper to rte_mempool_create() */ 448 rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf, 449 mb_mempool_cache, 0, mbuf_seg_size, socket_id); 450 } 451 } 452 453 if (rte_mp == NULL) { 454 rte_exit(EXIT_FAILURE, 455 "Creation of mbuf pool for socket %u failed: %s\n", 456 socket_id, rte_strerror(rte_errno)); 457 } else if (verbose_level > 0) { 458 rte_mempool_dump(stdout, rte_mp); 459 } 460 } 461 462 /* 463 * Check given socket id is valid or not with NUMA mode, 464 * if valid, return 0, else return -1 465 */ 466 static int 467 check_socket_id(const unsigned int socket_id) 468 { 469 static int warning_once = 0; 470 471 if (socket_id >= max_socket) { 472 if (!warning_once && numa_support) 473 printf("Warning: NUMA should be configured manually by" 474 " using --port-numa-config and" 475 " --ring-numa-config parameters along with" 476 " --numa.\n"); 477 warning_once = 1; 478 return -1; 479 } 480 return 0; 481 } 482 483 static void 484 init_config(void) 485 { 486 portid_t pid; 487 struct rte_port *port; 488 struct rte_mempool *mbp; 489 unsigned int nb_mbuf_per_pool; 490 lcoreid_t lc_id; 491 uint8_t port_per_socket[RTE_MAX_NUMA_NODES]; 492 493 memset(port_per_socket,0,RTE_MAX_NUMA_NODES); 494 /* Configuration of logical cores. */ 495 fwd_lcores = rte_zmalloc("testpmd: fwd_lcores", 496 sizeof(struct fwd_lcore *) * nb_lcores, 497 RTE_CACHE_LINE_SIZE); 498 if (fwd_lcores == NULL) { 499 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) " 500 "failed\n", nb_lcores); 501 } 502 for (lc_id = 0; lc_id < nb_lcores; lc_id++) { 503 fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore", 504 sizeof(struct fwd_lcore), 505 RTE_CACHE_LINE_SIZE); 506 if (fwd_lcores[lc_id] == NULL) { 507 rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) " 508 "failed\n"); 509 } 510 fwd_lcores[lc_id]->cpuid_idx = lc_id; 511 } 512 513 /* 514 * Create pools of mbuf. 515 * If NUMA support is disabled, create a single pool of mbuf in 516 * socket 0 memory by default. 517 * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1. 518 * 519 * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and 520 * nb_txd can be configured at run time. 521 */ 522 if (param_total_num_mbufs) 523 nb_mbuf_per_pool = param_total_num_mbufs; 524 else { 525 nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX + (nb_lcores * mb_mempool_cache) 526 + RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST; 527 528 if (!numa_support) 529 nb_mbuf_per_pool = 530 (nb_mbuf_per_pool * RTE_MAX_ETHPORTS); 531 } 532 533 if (!numa_support) { 534 if (socket_num == UMA_NO_CONFIG) 535 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0); 536 else 537 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 538 socket_num); 539 } 540 541 FOREACH_PORT(pid, ports) { 542 port = &ports[pid]; 543 rte_eth_dev_info_get(pid, &port->dev_info); 544 545 if (numa_support) { 546 if (port_numa[pid] != NUMA_NO_CONFIG) 547 port_per_socket[port_numa[pid]]++; 548 else { 549 uint32_t socket_id = rte_eth_dev_socket_id(pid); 550 551 /* if socket_id is invalid, set to 0 */ 552 if (check_socket_id(socket_id) < 0) 553 socket_id = 0; 554 port_per_socket[socket_id]++; 555 } 556 } 557 558 /* set flag to initialize port/queue */ 559 port->need_reconfig = 1; 560 port->need_reconfig_queues = 1; 561 } 562 563 if (numa_support) { 564 uint8_t i; 565 unsigned int nb_mbuf; 566 567 if (param_total_num_mbufs) 568 nb_mbuf_per_pool = nb_mbuf_per_pool/nb_ports; 569 570 for (i = 0; i < max_socket; i++) { 571 nb_mbuf = (nb_mbuf_per_pool * RTE_MAX_ETHPORTS); 572 if (nb_mbuf) 573 mbuf_pool_create(mbuf_data_size, 574 nb_mbuf,i); 575 } 576 } 577 init_port_config(); 578 579 /* 580 * Records which Mbuf pool to use by each logical core, if needed. 581 */ 582 for (lc_id = 0; lc_id < nb_lcores; lc_id++) { 583 mbp = mbuf_pool_find( 584 rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id])); 585 586 if (mbp == NULL) 587 mbp = mbuf_pool_find(0); 588 fwd_lcores[lc_id]->mbp = mbp; 589 } 590 591 /* Configuration of packet forwarding streams. */ 592 if (init_fwd_streams() < 0) 593 rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n"); 594 595 fwd_config_setup(); 596 } 597 598 599 void 600 reconfig(portid_t new_port_id, unsigned socket_id) 601 { 602 struct rte_port *port; 603 604 /* Reconfiguration of Ethernet ports. */ 605 port = &ports[new_port_id]; 606 rte_eth_dev_info_get(new_port_id, &port->dev_info); 607 608 /* set flag to initialize port/queue */ 609 port->need_reconfig = 1; 610 port->need_reconfig_queues = 1; 611 port->socket_id = socket_id; 612 613 init_port_config(); 614 } 615 616 617 int 618 init_fwd_streams(void) 619 { 620 portid_t pid; 621 struct rte_port *port; 622 streamid_t sm_id, nb_fwd_streams_new; 623 queueid_t q; 624 625 /* set socket id according to numa or not */ 626 FOREACH_PORT(pid, ports) { 627 port = &ports[pid]; 628 if (nb_rxq > port->dev_info.max_rx_queues) { 629 printf("Fail: nb_rxq(%d) is greater than " 630 "max_rx_queues(%d)\n", nb_rxq, 631 port->dev_info.max_rx_queues); 632 return -1; 633 } 634 if (nb_txq > port->dev_info.max_tx_queues) { 635 printf("Fail: nb_txq(%d) is greater than " 636 "max_tx_queues(%d)\n", nb_txq, 637 port->dev_info.max_tx_queues); 638 return -1; 639 } 640 if (numa_support) { 641 if (port_numa[pid] != NUMA_NO_CONFIG) 642 port->socket_id = port_numa[pid]; 643 else { 644 port->socket_id = rte_eth_dev_socket_id(pid); 645 646 /* if socket_id is invalid, set to 0 */ 647 if (check_socket_id(port->socket_id) < 0) 648 port->socket_id = 0; 649 } 650 } 651 else { 652 if (socket_num == UMA_NO_CONFIG) 653 port->socket_id = 0; 654 else 655 port->socket_id = socket_num; 656 } 657 } 658 659 q = RTE_MAX(nb_rxq, nb_txq); 660 if (q == 0) { 661 printf("Fail: Cannot allocate fwd streams as number of queues is 0\n"); 662 return -1; 663 } 664 nb_fwd_streams_new = (streamid_t)(nb_ports * q); 665 if (nb_fwd_streams_new == nb_fwd_streams) 666 return 0; 667 /* clear the old */ 668 if (fwd_streams != NULL) { 669 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) { 670 if (fwd_streams[sm_id] == NULL) 671 continue; 672 rte_free(fwd_streams[sm_id]); 673 fwd_streams[sm_id] = NULL; 674 } 675 rte_free(fwd_streams); 676 fwd_streams = NULL; 677 } 678 679 /* init new */ 680 nb_fwd_streams = nb_fwd_streams_new; 681 fwd_streams = rte_zmalloc("testpmd: fwd_streams", 682 sizeof(struct fwd_stream *) * nb_fwd_streams, RTE_CACHE_LINE_SIZE); 683 if (fwd_streams == NULL) 684 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_stream *)) " 685 "failed\n", nb_fwd_streams); 686 687 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) { 688 fwd_streams[sm_id] = rte_zmalloc("testpmd: struct fwd_stream", 689 sizeof(struct fwd_stream), RTE_CACHE_LINE_SIZE); 690 if (fwd_streams[sm_id] == NULL) 691 rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_stream)" 692 " failed\n"); 693 } 694 695 return 0; 696 } 697 698 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS 699 static void 700 pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs) 701 { 702 unsigned int total_burst; 703 unsigned int nb_burst; 704 unsigned int burst_stats[3]; 705 uint16_t pktnb_stats[3]; 706 uint16_t nb_pkt; 707 int burst_percent[3]; 708 709 /* 710 * First compute the total number of packet bursts and the 711 * two highest numbers of bursts of the same number of packets. 712 */ 713 total_burst = 0; 714 burst_stats[0] = burst_stats[1] = burst_stats[2] = 0; 715 pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0; 716 for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) { 717 nb_burst = pbs->pkt_burst_spread[nb_pkt]; 718 if (nb_burst == 0) 719 continue; 720 total_burst += nb_burst; 721 if (nb_burst > burst_stats[0]) { 722 burst_stats[1] = burst_stats[0]; 723 pktnb_stats[1] = pktnb_stats[0]; 724 burst_stats[0] = nb_burst; 725 pktnb_stats[0] = nb_pkt; 726 } 727 } 728 if (total_burst == 0) 729 return; 730 burst_percent[0] = (burst_stats[0] * 100) / total_burst; 731 printf(" %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst, 732 burst_percent[0], (int) pktnb_stats[0]); 733 if (burst_stats[0] == total_burst) { 734 printf("]\n"); 735 return; 736 } 737 if (burst_stats[0] + burst_stats[1] == total_burst) { 738 printf(" + %d%% of %d pkts]\n", 739 100 - burst_percent[0], pktnb_stats[1]); 740 return; 741 } 742 burst_percent[1] = (burst_stats[1] * 100) / total_burst; 743 burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]); 744 if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) { 745 printf(" + %d%% of others]\n", 100 - burst_percent[0]); 746 return; 747 } 748 printf(" + %d%% of %d pkts + %d%% of others]\n", 749 burst_percent[1], (int) pktnb_stats[1], burst_percent[2]); 750 } 751 #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */ 752 753 static void 754 fwd_port_stats_display(portid_t port_id, struct rte_eth_stats *stats) 755 { 756 struct rte_port *port; 757 uint8_t i; 758 759 static const char *fwd_stats_border = "----------------------"; 760 761 port = &ports[port_id]; 762 printf("\n %s Forward statistics for port %-2d %s\n", 763 fwd_stats_border, port_id, fwd_stats_border); 764 765 if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) { 766 printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: " 767 "%-"PRIu64"\n", 768 stats->ipackets, stats->imissed, 769 (uint64_t) (stats->ipackets + stats->imissed)); 770 771 if (cur_fwd_eng == &csum_fwd_engine) 772 printf(" Bad-ipcsum: %-14"PRIu64" Bad-l4csum: %-14"PRIu64" \n", 773 port->rx_bad_ip_csum, port->rx_bad_l4_csum); 774 if ((stats->ierrors + stats->rx_nombuf) > 0) { 775 printf(" RX-error: %-"PRIu64"\n", stats->ierrors); 776 printf(" RX-nombufs: %-14"PRIu64"\n", stats->rx_nombuf); 777 } 778 779 printf(" TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: " 780 "%-"PRIu64"\n", 781 stats->opackets, port->tx_dropped, 782 (uint64_t) (stats->opackets + port->tx_dropped)); 783 } 784 else { 785 printf(" RX-packets: %14"PRIu64" RX-dropped:%14"PRIu64" RX-total:" 786 "%14"PRIu64"\n", 787 stats->ipackets, stats->imissed, 788 (uint64_t) (stats->ipackets + stats->imissed)); 789 790 if (cur_fwd_eng == &csum_fwd_engine) 791 printf(" Bad-ipcsum:%14"PRIu64" Bad-l4csum:%14"PRIu64"\n", 792 port->rx_bad_ip_csum, port->rx_bad_l4_csum); 793 if ((stats->ierrors + stats->rx_nombuf) > 0) { 794 printf(" RX-error:%"PRIu64"\n", stats->ierrors); 795 printf(" RX-nombufs: %14"PRIu64"\n", 796 stats->rx_nombuf); 797 } 798 799 printf(" TX-packets: %14"PRIu64" TX-dropped:%14"PRIu64" TX-total:" 800 "%14"PRIu64"\n", 801 stats->opackets, port->tx_dropped, 802 (uint64_t) (stats->opackets + port->tx_dropped)); 803 } 804 805 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS 806 if (port->rx_stream) 807 pkt_burst_stats_display("RX", 808 &port->rx_stream->rx_burst_stats); 809 if (port->tx_stream) 810 pkt_burst_stats_display("TX", 811 &port->tx_stream->tx_burst_stats); 812 #endif 813 814 if (port->rx_queue_stats_mapping_enabled) { 815 printf("\n"); 816 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) { 817 printf(" Stats reg %2d RX-packets:%14"PRIu64 818 " RX-errors:%14"PRIu64 819 " RX-bytes:%14"PRIu64"\n", 820 i, stats->q_ipackets[i], stats->q_errors[i], stats->q_ibytes[i]); 821 } 822 printf("\n"); 823 } 824 if (port->tx_queue_stats_mapping_enabled) { 825 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) { 826 printf(" Stats reg %2d TX-packets:%14"PRIu64 827 " TX-bytes:%14"PRIu64"\n", 828 i, stats->q_opackets[i], stats->q_obytes[i]); 829 } 830 } 831 832 printf(" %s--------------------------------%s\n", 833 fwd_stats_border, fwd_stats_border); 834 } 835 836 static void 837 fwd_stream_stats_display(streamid_t stream_id) 838 { 839 struct fwd_stream *fs; 840 static const char *fwd_top_stats_border = "-------"; 841 842 fs = fwd_streams[stream_id]; 843 if ((fs->rx_packets == 0) && (fs->tx_packets == 0) && 844 (fs->fwd_dropped == 0)) 845 return; 846 printf("\n %s Forward Stats for RX Port=%2d/Queue=%2d -> " 847 "TX Port=%2d/Queue=%2d %s\n", 848 fwd_top_stats_border, fs->rx_port, fs->rx_queue, 849 fs->tx_port, fs->tx_queue, fwd_top_stats_border); 850 printf(" RX-packets: %-14u TX-packets: %-14u TX-dropped: %-14u", 851 fs->rx_packets, fs->tx_packets, fs->fwd_dropped); 852 853 /* if checksum mode */ 854 if (cur_fwd_eng == &csum_fwd_engine) { 855 printf(" RX- bad IP checksum: %-14u Rx- bad L4 checksum: " 856 "%-14u\n", fs->rx_bad_ip_csum, fs->rx_bad_l4_csum); 857 } 858 859 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS 860 pkt_burst_stats_display("RX", &fs->rx_burst_stats); 861 pkt_burst_stats_display("TX", &fs->tx_burst_stats); 862 #endif 863 } 864 865 static void 866 flush_fwd_rx_queues(void) 867 { 868 struct rte_mbuf *pkts_burst[MAX_PKT_BURST]; 869 portid_t rxp; 870 portid_t port_id; 871 queueid_t rxq; 872 uint16_t nb_rx; 873 uint16_t i; 874 uint8_t j; 875 876 for (j = 0; j < 2; j++) { 877 for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) { 878 for (rxq = 0; rxq < nb_rxq; rxq++) { 879 port_id = fwd_ports_ids[rxp]; 880 do { 881 nb_rx = rte_eth_rx_burst(port_id, rxq, 882 pkts_burst, MAX_PKT_BURST); 883 for (i = 0; i < nb_rx; i++) 884 rte_pktmbuf_free(pkts_burst[i]); 885 } while (nb_rx > 0); 886 } 887 } 888 rte_delay_ms(10); /* wait 10 milli-seconds before retrying */ 889 } 890 } 891 892 static void 893 run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd) 894 { 895 struct fwd_stream **fsm; 896 streamid_t nb_fs; 897 streamid_t sm_id; 898 899 fsm = &fwd_streams[fc->stream_idx]; 900 nb_fs = fc->stream_nb; 901 do { 902 for (sm_id = 0; sm_id < nb_fs; sm_id++) 903 (*pkt_fwd)(fsm[sm_id]); 904 } while (! fc->stopped); 905 } 906 907 static int 908 start_pkt_forward_on_core(void *fwd_arg) 909 { 910 run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg, 911 cur_fwd_config.fwd_eng->packet_fwd); 912 return 0; 913 } 914 915 /* 916 * Run the TXONLY packet forwarding engine to send a single burst of packets. 917 * Used to start communication flows in network loopback test configurations. 918 */ 919 static int 920 run_one_txonly_burst_on_core(void *fwd_arg) 921 { 922 struct fwd_lcore *fwd_lc; 923 struct fwd_lcore tmp_lcore; 924 925 fwd_lc = (struct fwd_lcore *) fwd_arg; 926 tmp_lcore = *fwd_lc; 927 tmp_lcore.stopped = 1; 928 run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd); 929 return 0; 930 } 931 932 /* 933 * Launch packet forwarding: 934 * - Setup per-port forwarding context. 935 * - launch logical cores with their forwarding configuration. 936 */ 937 static void 938 launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore) 939 { 940 port_fwd_begin_t port_fwd_begin; 941 unsigned int i; 942 unsigned int lc_id; 943 int diag; 944 945 port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin; 946 if (port_fwd_begin != NULL) { 947 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) 948 (*port_fwd_begin)(fwd_ports_ids[i]); 949 } 950 for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) { 951 lc_id = fwd_lcores_cpuids[i]; 952 if ((interactive == 0) || (lc_id != rte_lcore_id())) { 953 fwd_lcores[i]->stopped = 0; 954 diag = rte_eal_remote_launch(pkt_fwd_on_lcore, 955 fwd_lcores[i], lc_id); 956 if (diag != 0) 957 printf("launch lcore %u failed - diag=%d\n", 958 lc_id, diag); 959 } 960 } 961 } 962 963 /* 964 * Launch packet forwarding configuration. 965 */ 966 void 967 start_packet_forwarding(int with_tx_first) 968 { 969 port_fwd_begin_t port_fwd_begin; 970 port_fwd_end_t port_fwd_end; 971 struct rte_port *port; 972 unsigned int i; 973 portid_t pt_id; 974 streamid_t sm_id; 975 976 if (strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") == 0 && !nb_rxq) 977 rte_exit(EXIT_FAILURE, "rxq are 0, cannot use rxonly fwd mode\n"); 978 979 if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 && !nb_txq) 980 rte_exit(EXIT_FAILURE, "txq are 0, cannot use txonly fwd mode\n"); 981 982 if ((strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") != 0 && 983 strcmp(cur_fwd_eng->fwd_mode_name, "txonly") != 0) && 984 (!nb_rxq || !nb_txq)) 985 rte_exit(EXIT_FAILURE, 986 "Either rxq or txq are 0, cannot use %s fwd mode\n", 987 cur_fwd_eng->fwd_mode_name); 988 989 if (all_ports_started() == 0) { 990 printf("Not all ports were started\n"); 991 return; 992 } 993 if (test_done == 0) { 994 printf("Packet forwarding already started\n"); 995 return; 996 } 997 998 if (init_fwd_streams() < 0) { 999 printf("Fail from init_fwd_streams()\n"); 1000 return; 1001 } 1002 1003 if(dcb_test) { 1004 for (i = 0; i < nb_fwd_ports; i++) { 1005 pt_id = fwd_ports_ids[i]; 1006 port = &ports[pt_id]; 1007 if (!port->dcb_flag) { 1008 printf("In DCB mode, all forwarding ports must " 1009 "be configured in this mode.\n"); 1010 return; 1011 } 1012 } 1013 if (nb_fwd_lcores == 1) { 1014 printf("In DCB mode,the nb forwarding cores " 1015 "should be larger than 1.\n"); 1016 return; 1017 } 1018 } 1019 test_done = 0; 1020 1021 if(!no_flush_rx) 1022 flush_fwd_rx_queues(); 1023 1024 fwd_config_setup(); 1025 rxtx_config_display(); 1026 1027 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) { 1028 pt_id = fwd_ports_ids[i]; 1029 port = &ports[pt_id]; 1030 rte_eth_stats_get(pt_id, &port->stats); 1031 port->tx_dropped = 0; 1032 1033 map_port_queue_stats_mapping_registers(pt_id, port); 1034 } 1035 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) { 1036 fwd_streams[sm_id]->rx_packets = 0; 1037 fwd_streams[sm_id]->tx_packets = 0; 1038 fwd_streams[sm_id]->fwd_dropped = 0; 1039 fwd_streams[sm_id]->rx_bad_ip_csum = 0; 1040 fwd_streams[sm_id]->rx_bad_l4_csum = 0; 1041 1042 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS 1043 memset(&fwd_streams[sm_id]->rx_burst_stats, 0, 1044 sizeof(fwd_streams[sm_id]->rx_burst_stats)); 1045 memset(&fwd_streams[sm_id]->tx_burst_stats, 0, 1046 sizeof(fwd_streams[sm_id]->tx_burst_stats)); 1047 #endif 1048 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES 1049 fwd_streams[sm_id]->core_cycles = 0; 1050 #endif 1051 } 1052 if (with_tx_first) { 1053 port_fwd_begin = tx_only_engine.port_fwd_begin; 1054 if (port_fwd_begin != NULL) { 1055 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) 1056 (*port_fwd_begin)(fwd_ports_ids[i]); 1057 } 1058 launch_packet_forwarding(run_one_txonly_burst_on_core); 1059 rte_eal_mp_wait_lcore(); 1060 port_fwd_end = tx_only_engine.port_fwd_end; 1061 if (port_fwd_end != NULL) { 1062 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) 1063 (*port_fwd_end)(fwd_ports_ids[i]); 1064 } 1065 } 1066 launch_packet_forwarding(start_pkt_forward_on_core); 1067 } 1068 1069 void 1070 stop_packet_forwarding(void) 1071 { 1072 struct rte_eth_stats stats; 1073 struct rte_port *port; 1074 port_fwd_end_t port_fwd_end; 1075 int i; 1076 portid_t pt_id; 1077 streamid_t sm_id; 1078 lcoreid_t lc_id; 1079 uint64_t total_recv; 1080 uint64_t total_xmit; 1081 uint64_t total_rx_dropped; 1082 uint64_t total_tx_dropped; 1083 uint64_t total_rx_nombuf; 1084 uint64_t tx_dropped; 1085 uint64_t rx_bad_ip_csum; 1086 uint64_t rx_bad_l4_csum; 1087 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES 1088 uint64_t fwd_cycles; 1089 #endif 1090 static const char *acc_stats_border = "+++++++++++++++"; 1091 1092 if (all_ports_started() == 0) { 1093 printf("Not all ports were started\n"); 1094 return; 1095 } 1096 if (test_done) { 1097 printf("Packet forwarding not started\n"); 1098 return; 1099 } 1100 printf("Telling cores to stop..."); 1101 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) 1102 fwd_lcores[lc_id]->stopped = 1; 1103 printf("\nWaiting for lcores to finish...\n"); 1104 rte_eal_mp_wait_lcore(); 1105 port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end; 1106 if (port_fwd_end != NULL) { 1107 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) { 1108 pt_id = fwd_ports_ids[i]; 1109 (*port_fwd_end)(pt_id); 1110 } 1111 } 1112 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES 1113 fwd_cycles = 0; 1114 #endif 1115 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) { 1116 if (cur_fwd_config.nb_fwd_streams > 1117 cur_fwd_config.nb_fwd_ports) { 1118 fwd_stream_stats_display(sm_id); 1119 ports[fwd_streams[sm_id]->tx_port].tx_stream = NULL; 1120 ports[fwd_streams[sm_id]->rx_port].rx_stream = NULL; 1121 } else { 1122 ports[fwd_streams[sm_id]->tx_port].tx_stream = 1123 fwd_streams[sm_id]; 1124 ports[fwd_streams[sm_id]->rx_port].rx_stream = 1125 fwd_streams[sm_id]; 1126 } 1127 tx_dropped = ports[fwd_streams[sm_id]->tx_port].tx_dropped; 1128 tx_dropped = (uint64_t) (tx_dropped + 1129 fwd_streams[sm_id]->fwd_dropped); 1130 ports[fwd_streams[sm_id]->tx_port].tx_dropped = tx_dropped; 1131 1132 rx_bad_ip_csum = 1133 ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum; 1134 rx_bad_ip_csum = (uint64_t) (rx_bad_ip_csum + 1135 fwd_streams[sm_id]->rx_bad_ip_csum); 1136 ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum = 1137 rx_bad_ip_csum; 1138 1139 rx_bad_l4_csum = 1140 ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum; 1141 rx_bad_l4_csum = (uint64_t) (rx_bad_l4_csum + 1142 fwd_streams[sm_id]->rx_bad_l4_csum); 1143 ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum = 1144 rx_bad_l4_csum; 1145 1146 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES 1147 fwd_cycles = (uint64_t) (fwd_cycles + 1148 fwd_streams[sm_id]->core_cycles); 1149 #endif 1150 } 1151 total_recv = 0; 1152 total_xmit = 0; 1153 total_rx_dropped = 0; 1154 total_tx_dropped = 0; 1155 total_rx_nombuf = 0; 1156 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) { 1157 pt_id = fwd_ports_ids[i]; 1158 1159 port = &ports[pt_id]; 1160 rte_eth_stats_get(pt_id, &stats); 1161 stats.ipackets -= port->stats.ipackets; 1162 port->stats.ipackets = 0; 1163 stats.opackets -= port->stats.opackets; 1164 port->stats.opackets = 0; 1165 stats.ibytes -= port->stats.ibytes; 1166 port->stats.ibytes = 0; 1167 stats.obytes -= port->stats.obytes; 1168 port->stats.obytes = 0; 1169 stats.imissed -= port->stats.imissed; 1170 port->stats.imissed = 0; 1171 stats.oerrors -= port->stats.oerrors; 1172 port->stats.oerrors = 0; 1173 stats.rx_nombuf -= port->stats.rx_nombuf; 1174 port->stats.rx_nombuf = 0; 1175 1176 total_recv += stats.ipackets; 1177 total_xmit += stats.opackets; 1178 total_rx_dropped += stats.imissed; 1179 total_tx_dropped += port->tx_dropped; 1180 total_rx_nombuf += stats.rx_nombuf; 1181 1182 fwd_port_stats_display(pt_id, &stats); 1183 } 1184 printf("\n %s Accumulated forward statistics for all ports" 1185 "%s\n", 1186 acc_stats_border, acc_stats_border); 1187 printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: " 1188 "%-"PRIu64"\n" 1189 " TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: " 1190 "%-"PRIu64"\n", 1191 total_recv, total_rx_dropped, total_recv + total_rx_dropped, 1192 total_xmit, total_tx_dropped, total_xmit + total_tx_dropped); 1193 if (total_rx_nombuf > 0) 1194 printf(" RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf); 1195 printf(" %s++++++++++++++++++++++++++++++++++++++++++++++" 1196 "%s\n", 1197 acc_stats_border, acc_stats_border); 1198 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES 1199 if (total_recv > 0) 1200 printf("\n CPU cycles/packet=%u (total cycles=" 1201 "%"PRIu64" / total RX packets=%"PRIu64")\n", 1202 (unsigned int)(fwd_cycles / total_recv), 1203 fwd_cycles, total_recv); 1204 #endif 1205 printf("\nDone.\n"); 1206 test_done = 1; 1207 } 1208 1209 void 1210 dev_set_link_up(portid_t pid) 1211 { 1212 if (rte_eth_dev_set_link_up((uint8_t)pid) < 0) 1213 printf("\nSet link up fail.\n"); 1214 } 1215 1216 void 1217 dev_set_link_down(portid_t pid) 1218 { 1219 if (rte_eth_dev_set_link_down((uint8_t)pid) < 0) 1220 printf("\nSet link down fail.\n"); 1221 } 1222 1223 static int 1224 all_ports_started(void) 1225 { 1226 portid_t pi; 1227 struct rte_port *port; 1228 1229 FOREACH_PORT(pi, ports) { 1230 port = &ports[pi]; 1231 /* Check if there is a port which is not started */ 1232 if ((port->port_status != RTE_PORT_STARTED) && 1233 (port->slave_flag == 0)) 1234 return 0; 1235 } 1236 1237 /* No port is not started */ 1238 return 1; 1239 } 1240 1241 int 1242 all_ports_stopped(void) 1243 { 1244 portid_t pi; 1245 struct rte_port *port; 1246 1247 FOREACH_PORT(pi, ports) { 1248 port = &ports[pi]; 1249 if ((port->port_status != RTE_PORT_STOPPED) && 1250 (port->slave_flag == 0)) 1251 return 0; 1252 } 1253 1254 return 1; 1255 } 1256 1257 int 1258 port_is_started(portid_t port_id) 1259 { 1260 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1261 return 0; 1262 1263 if (ports[port_id].port_status != RTE_PORT_STARTED) 1264 return 0; 1265 1266 return 1; 1267 } 1268 1269 static int 1270 port_is_closed(portid_t port_id) 1271 { 1272 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1273 return 0; 1274 1275 if (ports[port_id].port_status != RTE_PORT_CLOSED) 1276 return 0; 1277 1278 return 1; 1279 } 1280 1281 int 1282 start_port(portid_t pid) 1283 { 1284 int diag, need_check_link_status = -1; 1285 portid_t pi; 1286 queueid_t qi; 1287 struct rte_port *port; 1288 struct ether_addr mac_addr; 1289 1290 if (port_id_is_invalid(pid, ENABLED_WARN)) 1291 return 0; 1292 1293 if(dcb_config) 1294 dcb_test = 1; 1295 FOREACH_PORT(pi, ports) { 1296 if (pid != pi && pid != (portid_t)RTE_PORT_ALL) 1297 continue; 1298 1299 need_check_link_status = 0; 1300 port = &ports[pi]; 1301 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED, 1302 RTE_PORT_HANDLING) == 0) { 1303 printf("Port %d is now not stopped\n", pi); 1304 continue; 1305 } 1306 1307 if (port->need_reconfig > 0) { 1308 port->need_reconfig = 0; 1309 1310 printf("Configuring Port %d (socket %u)\n", pi, 1311 port->socket_id); 1312 /* configure port */ 1313 diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq, 1314 &(port->dev_conf)); 1315 if (diag != 0) { 1316 if (rte_atomic16_cmpset(&(port->port_status), 1317 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0) 1318 printf("Port %d can not be set back " 1319 "to stopped\n", pi); 1320 printf("Fail to configure port %d\n", pi); 1321 /* try to reconfigure port next time */ 1322 port->need_reconfig = 1; 1323 return -1; 1324 } 1325 } 1326 if (port->need_reconfig_queues > 0) { 1327 port->need_reconfig_queues = 0; 1328 /* setup tx queues */ 1329 for (qi = 0; qi < nb_txq; qi++) { 1330 if ((numa_support) && 1331 (txring_numa[pi] != NUMA_NO_CONFIG)) 1332 diag = rte_eth_tx_queue_setup(pi, qi, 1333 nb_txd,txring_numa[pi], 1334 &(port->tx_conf)); 1335 else 1336 diag = rte_eth_tx_queue_setup(pi, qi, 1337 nb_txd,port->socket_id, 1338 &(port->tx_conf)); 1339 1340 if (diag == 0) 1341 continue; 1342 1343 /* Fail to setup tx queue, return */ 1344 if (rte_atomic16_cmpset(&(port->port_status), 1345 RTE_PORT_HANDLING, 1346 RTE_PORT_STOPPED) == 0) 1347 printf("Port %d can not be set back " 1348 "to stopped\n", pi); 1349 printf("Fail to configure port %d tx queues\n", pi); 1350 /* try to reconfigure queues next time */ 1351 port->need_reconfig_queues = 1; 1352 return -1; 1353 } 1354 /* setup rx queues */ 1355 for (qi = 0; qi < nb_rxq; qi++) { 1356 if ((numa_support) && 1357 (rxring_numa[pi] != NUMA_NO_CONFIG)) { 1358 struct rte_mempool * mp = 1359 mbuf_pool_find(rxring_numa[pi]); 1360 if (mp == NULL) { 1361 printf("Failed to setup RX queue:" 1362 "No mempool allocation" 1363 "on the socket %d\n", 1364 rxring_numa[pi]); 1365 return -1; 1366 } 1367 1368 diag = rte_eth_rx_queue_setup(pi, qi, 1369 nb_rxd,rxring_numa[pi], 1370 &(port->rx_conf),mp); 1371 } 1372 else 1373 diag = rte_eth_rx_queue_setup(pi, qi, 1374 nb_rxd,port->socket_id, 1375 &(port->rx_conf), 1376 mbuf_pool_find(port->socket_id)); 1377 1378 if (diag == 0) 1379 continue; 1380 1381 1382 /* Fail to setup rx queue, return */ 1383 if (rte_atomic16_cmpset(&(port->port_status), 1384 RTE_PORT_HANDLING, 1385 RTE_PORT_STOPPED) == 0) 1386 printf("Port %d can not be set back " 1387 "to stopped\n", pi); 1388 printf("Fail to configure port %d rx queues\n", pi); 1389 /* try to reconfigure queues next time */ 1390 port->need_reconfig_queues = 1; 1391 return -1; 1392 } 1393 } 1394 /* start port */ 1395 if (rte_eth_dev_start(pi) < 0) { 1396 printf("Fail to start port %d\n", pi); 1397 1398 /* Fail to setup rx queue, return */ 1399 if (rte_atomic16_cmpset(&(port->port_status), 1400 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0) 1401 printf("Port %d can not be set back to " 1402 "stopped\n", pi); 1403 continue; 1404 } 1405 1406 if (rte_atomic16_cmpset(&(port->port_status), 1407 RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0) 1408 printf("Port %d can not be set into started\n", pi); 1409 1410 rte_eth_macaddr_get(pi, &mac_addr); 1411 printf("Port %d: %02X:%02X:%02X:%02X:%02X:%02X\n", pi, 1412 mac_addr.addr_bytes[0], mac_addr.addr_bytes[1], 1413 mac_addr.addr_bytes[2], mac_addr.addr_bytes[3], 1414 mac_addr.addr_bytes[4], mac_addr.addr_bytes[5]); 1415 1416 /* at least one port started, need checking link status */ 1417 need_check_link_status = 1; 1418 } 1419 1420 if (need_check_link_status == 1 && !no_link_check) 1421 check_all_ports_link_status(RTE_PORT_ALL); 1422 else if (need_check_link_status == 0) 1423 printf("Please stop the ports first\n"); 1424 1425 printf("Done\n"); 1426 return 0; 1427 } 1428 1429 void 1430 stop_port(portid_t pid) 1431 { 1432 portid_t pi; 1433 struct rte_port *port; 1434 int need_check_link_status = 0; 1435 1436 if (dcb_test) { 1437 dcb_test = 0; 1438 dcb_config = 0; 1439 } 1440 1441 if (port_id_is_invalid(pid, ENABLED_WARN)) 1442 return; 1443 1444 printf("Stopping ports...\n"); 1445 1446 FOREACH_PORT(pi, ports) { 1447 if (pid != pi && pid != (portid_t)RTE_PORT_ALL) 1448 continue; 1449 1450 if (port_is_forwarding(pi) != 0 && test_done == 0) { 1451 printf("Please remove port %d from forwarding configuration.\n", pi); 1452 continue; 1453 } 1454 1455 if (port_is_bonding_slave(pi)) { 1456 printf("Please remove port %d from bonded device.\n", pi); 1457 continue; 1458 } 1459 1460 port = &ports[pi]; 1461 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED, 1462 RTE_PORT_HANDLING) == 0) 1463 continue; 1464 1465 rte_eth_dev_stop(pi); 1466 1467 if (rte_atomic16_cmpset(&(port->port_status), 1468 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0) 1469 printf("Port %d can not be set into stopped\n", pi); 1470 need_check_link_status = 1; 1471 } 1472 if (need_check_link_status && !no_link_check) 1473 check_all_ports_link_status(RTE_PORT_ALL); 1474 1475 printf("Done\n"); 1476 } 1477 1478 void 1479 close_port(portid_t pid) 1480 { 1481 portid_t pi; 1482 struct rte_port *port; 1483 1484 if (port_id_is_invalid(pid, ENABLED_WARN)) 1485 return; 1486 1487 printf("Closing ports...\n"); 1488 1489 FOREACH_PORT(pi, ports) { 1490 if (pid != pi && pid != (portid_t)RTE_PORT_ALL) 1491 continue; 1492 1493 if (port_is_forwarding(pi) != 0 && test_done == 0) { 1494 printf("Please remove port %d from forwarding configuration.\n", pi); 1495 continue; 1496 } 1497 1498 if (port_is_bonding_slave(pi)) { 1499 printf("Please remove port %d from bonded device.\n", pi); 1500 continue; 1501 } 1502 1503 port = &ports[pi]; 1504 if (rte_atomic16_cmpset(&(port->port_status), 1505 RTE_PORT_CLOSED, RTE_PORT_CLOSED) == 1) { 1506 printf("Port %d is already closed\n", pi); 1507 continue; 1508 } 1509 1510 if (rte_atomic16_cmpset(&(port->port_status), 1511 RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) { 1512 printf("Port %d is now not stopped\n", pi); 1513 continue; 1514 } 1515 1516 rte_eth_dev_close(pi); 1517 1518 if (rte_atomic16_cmpset(&(port->port_status), 1519 RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0) 1520 printf("Port %d cannot be set to closed\n", pi); 1521 } 1522 1523 printf("Done\n"); 1524 } 1525 1526 void 1527 attach_port(char *identifier) 1528 { 1529 portid_t pi = 0; 1530 unsigned int socket_id; 1531 1532 printf("Attaching a new port...\n"); 1533 1534 if (identifier == NULL) { 1535 printf("Invalid parameters are specified\n"); 1536 return; 1537 } 1538 1539 if (rte_eth_dev_attach(identifier, &pi)) 1540 return; 1541 1542 ports[pi].enabled = 1; 1543 socket_id = (unsigned)rte_eth_dev_socket_id(pi); 1544 /* if socket_id is invalid, set to 0 */ 1545 if (check_socket_id(socket_id) < 0) 1546 socket_id = 0; 1547 reconfig(pi, socket_id); 1548 rte_eth_promiscuous_enable(pi); 1549 1550 nb_ports = rte_eth_dev_count(); 1551 1552 ports[pi].port_status = RTE_PORT_STOPPED; 1553 1554 printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports); 1555 printf("Done\n"); 1556 } 1557 1558 void 1559 detach_port(uint8_t port_id) 1560 { 1561 char name[RTE_ETH_NAME_MAX_LEN]; 1562 1563 printf("Detaching a port...\n"); 1564 1565 if (!port_is_closed(port_id)) { 1566 printf("Please close port first\n"); 1567 return; 1568 } 1569 1570 if (rte_eth_dev_detach(port_id, name)) 1571 return; 1572 1573 ports[port_id].enabled = 0; 1574 nb_ports = rte_eth_dev_count(); 1575 1576 printf("Port '%s' is detached. Now total ports is %d\n", 1577 name, nb_ports); 1578 printf("Done\n"); 1579 return; 1580 } 1581 1582 void 1583 pmd_test_exit(void) 1584 { 1585 portid_t pt_id; 1586 1587 if (test_done == 0) 1588 stop_packet_forwarding(); 1589 1590 if (ports != NULL) { 1591 no_link_check = 1; 1592 FOREACH_PORT(pt_id, ports) { 1593 printf("\nShutting down port %d...\n", pt_id); 1594 fflush(stdout); 1595 stop_port(pt_id); 1596 close_port(pt_id); 1597 } 1598 } 1599 printf("\nBye...\n"); 1600 } 1601 1602 typedef void (*cmd_func_t)(void); 1603 struct pmd_test_command { 1604 const char *cmd_name; 1605 cmd_func_t cmd_func; 1606 }; 1607 1608 #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0])) 1609 1610 /* Check the link status of all ports in up to 9s, and print them finally */ 1611 static void 1612 check_all_ports_link_status(uint32_t port_mask) 1613 { 1614 #define CHECK_INTERVAL 100 /* 100ms */ 1615 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */ 1616 uint8_t portid, count, all_ports_up, print_flag = 0; 1617 struct rte_eth_link link; 1618 1619 printf("Checking link statuses...\n"); 1620 fflush(stdout); 1621 for (count = 0; count <= MAX_CHECK_TIME; count++) { 1622 all_ports_up = 1; 1623 FOREACH_PORT(portid, ports) { 1624 if ((port_mask & (1 << portid)) == 0) 1625 continue; 1626 memset(&link, 0, sizeof(link)); 1627 rte_eth_link_get_nowait(portid, &link); 1628 /* print link status if flag set */ 1629 if (print_flag == 1) { 1630 if (link.link_status) 1631 printf("Port %d Link Up - speed %u " 1632 "Mbps - %s\n", (uint8_t)portid, 1633 (unsigned)link.link_speed, 1634 (link.link_duplex == ETH_LINK_FULL_DUPLEX) ? 1635 ("full-duplex") : ("half-duplex\n")); 1636 else 1637 printf("Port %d Link Down\n", 1638 (uint8_t)portid); 1639 continue; 1640 } 1641 /* clear all_ports_up flag if any link down */ 1642 if (link.link_status == ETH_LINK_DOWN) { 1643 all_ports_up = 0; 1644 break; 1645 } 1646 } 1647 /* after finally printing all link status, get out */ 1648 if (print_flag == 1) 1649 break; 1650 1651 if (all_ports_up == 0) { 1652 fflush(stdout); 1653 rte_delay_ms(CHECK_INTERVAL); 1654 } 1655 1656 /* set the print_flag if all ports up or timeout */ 1657 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) { 1658 print_flag = 1; 1659 } 1660 } 1661 } 1662 1663 static int 1664 set_tx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port) 1665 { 1666 uint16_t i; 1667 int diag; 1668 uint8_t mapping_found = 0; 1669 1670 for (i = 0; i < nb_tx_queue_stats_mappings; i++) { 1671 if ((tx_queue_stats_mappings[i].port_id == port_id) && 1672 (tx_queue_stats_mappings[i].queue_id < nb_txq )) { 1673 diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id, 1674 tx_queue_stats_mappings[i].queue_id, 1675 tx_queue_stats_mappings[i].stats_counter_id); 1676 if (diag != 0) 1677 return diag; 1678 mapping_found = 1; 1679 } 1680 } 1681 if (mapping_found) 1682 port->tx_queue_stats_mapping_enabled = 1; 1683 return 0; 1684 } 1685 1686 static int 1687 set_rx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port) 1688 { 1689 uint16_t i; 1690 int diag; 1691 uint8_t mapping_found = 0; 1692 1693 for (i = 0; i < nb_rx_queue_stats_mappings; i++) { 1694 if ((rx_queue_stats_mappings[i].port_id == port_id) && 1695 (rx_queue_stats_mappings[i].queue_id < nb_rxq )) { 1696 diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id, 1697 rx_queue_stats_mappings[i].queue_id, 1698 rx_queue_stats_mappings[i].stats_counter_id); 1699 if (diag != 0) 1700 return diag; 1701 mapping_found = 1; 1702 } 1703 } 1704 if (mapping_found) 1705 port->rx_queue_stats_mapping_enabled = 1; 1706 return 0; 1707 } 1708 1709 static void 1710 map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port) 1711 { 1712 int diag = 0; 1713 1714 diag = set_tx_queue_stats_mapping_registers(pi, port); 1715 if (diag != 0) { 1716 if (diag == -ENOTSUP) { 1717 port->tx_queue_stats_mapping_enabled = 0; 1718 printf("TX queue stats mapping not supported port id=%d\n", pi); 1719 } 1720 else 1721 rte_exit(EXIT_FAILURE, 1722 "set_tx_queue_stats_mapping_registers " 1723 "failed for port id=%d diag=%d\n", 1724 pi, diag); 1725 } 1726 1727 diag = set_rx_queue_stats_mapping_registers(pi, port); 1728 if (diag != 0) { 1729 if (diag == -ENOTSUP) { 1730 port->rx_queue_stats_mapping_enabled = 0; 1731 printf("RX queue stats mapping not supported port id=%d\n", pi); 1732 } 1733 else 1734 rte_exit(EXIT_FAILURE, 1735 "set_rx_queue_stats_mapping_registers " 1736 "failed for port id=%d diag=%d\n", 1737 pi, diag); 1738 } 1739 } 1740 1741 static void 1742 rxtx_port_config(struct rte_port *port) 1743 { 1744 port->rx_conf = port->dev_info.default_rxconf; 1745 port->tx_conf = port->dev_info.default_txconf; 1746 1747 /* Check if any RX/TX parameters have been passed */ 1748 if (rx_pthresh != RTE_PMD_PARAM_UNSET) 1749 port->rx_conf.rx_thresh.pthresh = rx_pthresh; 1750 1751 if (rx_hthresh != RTE_PMD_PARAM_UNSET) 1752 port->rx_conf.rx_thresh.hthresh = rx_hthresh; 1753 1754 if (rx_wthresh != RTE_PMD_PARAM_UNSET) 1755 port->rx_conf.rx_thresh.wthresh = rx_wthresh; 1756 1757 if (rx_free_thresh != RTE_PMD_PARAM_UNSET) 1758 port->rx_conf.rx_free_thresh = rx_free_thresh; 1759 1760 if (rx_drop_en != RTE_PMD_PARAM_UNSET) 1761 port->rx_conf.rx_drop_en = rx_drop_en; 1762 1763 if (tx_pthresh != RTE_PMD_PARAM_UNSET) 1764 port->tx_conf.tx_thresh.pthresh = tx_pthresh; 1765 1766 if (tx_hthresh != RTE_PMD_PARAM_UNSET) 1767 port->tx_conf.tx_thresh.hthresh = tx_hthresh; 1768 1769 if (tx_wthresh != RTE_PMD_PARAM_UNSET) 1770 port->tx_conf.tx_thresh.wthresh = tx_wthresh; 1771 1772 if (tx_rs_thresh != RTE_PMD_PARAM_UNSET) 1773 port->tx_conf.tx_rs_thresh = tx_rs_thresh; 1774 1775 if (tx_free_thresh != RTE_PMD_PARAM_UNSET) 1776 port->tx_conf.tx_free_thresh = tx_free_thresh; 1777 1778 if (txq_flags != RTE_PMD_PARAM_UNSET) 1779 port->tx_conf.txq_flags = txq_flags; 1780 } 1781 1782 void 1783 init_port_config(void) 1784 { 1785 portid_t pid; 1786 struct rte_port *port; 1787 1788 FOREACH_PORT(pid, ports) { 1789 port = &ports[pid]; 1790 port->dev_conf.rxmode = rx_mode; 1791 port->dev_conf.fdir_conf = fdir_conf; 1792 if (nb_rxq > 1) { 1793 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL; 1794 port->dev_conf.rx_adv_conf.rss_conf.rss_hf = rss_hf; 1795 } else { 1796 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL; 1797 port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0; 1798 } 1799 1800 if (port->dcb_flag == 0 && port->dev_info.max_vfs == 0) { 1801 if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0) 1802 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_RSS; 1803 else 1804 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE; 1805 } 1806 1807 if (port->dev_info.max_vfs != 0) { 1808 if (port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0) 1809 port->dev_conf.rxmode.mq_mode = 1810 ETH_MQ_RX_VMDQ_RSS; 1811 else 1812 port->dev_conf.rxmode.mq_mode = 1813 ETH_MQ_RX_NONE; 1814 1815 port->dev_conf.txmode.mq_mode = ETH_MQ_TX_NONE; 1816 } 1817 1818 rxtx_port_config(port); 1819 1820 rte_eth_macaddr_get(pid, &port->eth_addr); 1821 1822 map_port_queue_stats_mapping_registers(pid, port); 1823 #ifdef RTE_NIC_BYPASS 1824 rte_eth_dev_bypass_init(pid); 1825 #endif 1826 } 1827 } 1828 1829 void set_port_slave_flag(portid_t slave_pid) 1830 { 1831 struct rte_port *port; 1832 1833 port = &ports[slave_pid]; 1834 port->slave_flag = 1; 1835 } 1836 1837 void clear_port_slave_flag(portid_t slave_pid) 1838 { 1839 struct rte_port *port; 1840 1841 port = &ports[slave_pid]; 1842 port->slave_flag = 0; 1843 } 1844 1845 uint8_t port_is_bonding_slave(portid_t slave_pid) 1846 { 1847 struct rte_port *port; 1848 1849 port = &ports[slave_pid]; 1850 return port->slave_flag; 1851 } 1852 1853 const uint16_t vlan_tags[] = { 1854 0, 1, 2, 3, 4, 5, 6, 7, 1855 8, 9, 10, 11, 12, 13, 14, 15, 1856 16, 17, 18, 19, 20, 21, 22, 23, 1857 24, 25, 26, 27, 28, 29, 30, 31 1858 }; 1859 1860 static int 1861 get_eth_dcb_conf(struct rte_eth_conf *eth_conf, 1862 enum dcb_mode_enable dcb_mode, 1863 enum rte_eth_nb_tcs num_tcs, 1864 uint8_t pfc_en) 1865 { 1866 uint8_t i; 1867 1868 /* 1869 * Builds up the correct configuration for dcb+vt based on the vlan tags array 1870 * given above, and the number of traffic classes available for use. 1871 */ 1872 if (dcb_mode == DCB_VT_ENABLED) { 1873 struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf = 1874 ð_conf->rx_adv_conf.vmdq_dcb_conf; 1875 struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf = 1876 ð_conf->tx_adv_conf.vmdq_dcb_tx_conf; 1877 1878 /* VMDQ+DCB RX and TX configrations */ 1879 vmdq_rx_conf->enable_default_pool = 0; 1880 vmdq_rx_conf->default_pool = 0; 1881 vmdq_rx_conf->nb_queue_pools = 1882 (num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS); 1883 vmdq_tx_conf->nb_queue_pools = 1884 (num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS); 1885 1886 vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools; 1887 for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) { 1888 vmdq_rx_conf->pool_map[i].vlan_id = vlan_tags[i]; 1889 vmdq_rx_conf->pool_map[i].pools = 1890 1 << (i % vmdq_rx_conf->nb_queue_pools); 1891 } 1892 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) { 1893 vmdq_rx_conf->dcb_tc[i] = i; 1894 vmdq_tx_conf->dcb_tc[i] = i; 1895 } 1896 1897 /* set DCB mode of RX and TX of multiple queues */ 1898 eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB; 1899 eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB; 1900 } else { 1901 struct rte_eth_dcb_rx_conf *rx_conf = 1902 ð_conf->rx_adv_conf.dcb_rx_conf; 1903 struct rte_eth_dcb_tx_conf *tx_conf = 1904 ð_conf->tx_adv_conf.dcb_tx_conf; 1905 1906 rx_conf->nb_tcs = num_tcs; 1907 tx_conf->nb_tcs = num_tcs; 1908 1909 for (i = 0; i < num_tcs; i++) { 1910 rx_conf->dcb_tc[i] = i; 1911 tx_conf->dcb_tc[i] = i; 1912 } 1913 eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB_RSS; 1914 eth_conf->rx_adv_conf.rss_conf.rss_hf = rss_hf; 1915 eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB; 1916 } 1917 1918 if (pfc_en) 1919 eth_conf->dcb_capability_en = 1920 ETH_DCB_PG_SUPPORT | ETH_DCB_PFC_SUPPORT; 1921 else 1922 eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT; 1923 1924 return 0; 1925 } 1926 1927 int 1928 init_port_dcb_config(portid_t pid, 1929 enum dcb_mode_enable dcb_mode, 1930 enum rte_eth_nb_tcs num_tcs, 1931 uint8_t pfc_en) 1932 { 1933 struct rte_eth_conf port_conf; 1934 struct rte_eth_dev_info dev_info; 1935 struct rte_port *rte_port; 1936 int retval; 1937 uint16_t i; 1938 1939 rte_eth_dev_info_get(pid, &dev_info); 1940 1941 /* If dev_info.vmdq_pool_base is greater than 0, 1942 * the queue id of vmdq pools is started after pf queues. 1943 */ 1944 if (dcb_mode == DCB_VT_ENABLED && dev_info.vmdq_pool_base > 0) { 1945 printf("VMDQ_DCB multi-queue mode is nonsensical" 1946 " for port %d.", pid); 1947 return -1; 1948 } 1949 1950 /* Assume the ports in testpmd have the same dcb capability 1951 * and has the same number of rxq and txq in dcb mode 1952 */ 1953 if (dcb_mode == DCB_VT_ENABLED) { 1954 nb_rxq = dev_info.max_rx_queues; 1955 nb_txq = dev_info.max_tx_queues; 1956 } else { 1957 /*if vt is disabled, use all pf queues */ 1958 if (dev_info.vmdq_pool_base == 0) { 1959 nb_rxq = dev_info.max_rx_queues; 1960 nb_txq = dev_info.max_tx_queues; 1961 } else { 1962 nb_rxq = (queueid_t)num_tcs; 1963 nb_txq = (queueid_t)num_tcs; 1964 1965 } 1966 } 1967 rx_free_thresh = 64; 1968 1969 memset(&port_conf, 0, sizeof(struct rte_eth_conf)); 1970 /* Enter DCB configuration status */ 1971 dcb_config = 1; 1972 1973 /*set configuration of DCB in vt mode and DCB in non-vt mode*/ 1974 retval = get_eth_dcb_conf(&port_conf, dcb_mode, num_tcs, pfc_en); 1975 if (retval < 0) 1976 return retval; 1977 1978 rte_port = &ports[pid]; 1979 memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf)); 1980 1981 rxtx_port_config(rte_port); 1982 /* VLAN filter */ 1983 rte_port->dev_conf.rxmode.hw_vlan_filter = 1; 1984 for (i = 0; i < RTE_DIM(vlan_tags); i++) 1985 rx_vft_set(pid, vlan_tags[i], 1); 1986 1987 rte_eth_macaddr_get(pid, &rte_port->eth_addr); 1988 map_port_queue_stats_mapping_registers(pid, rte_port); 1989 1990 rte_port->dcb_flag = 1; 1991 1992 return 0; 1993 } 1994 1995 static void 1996 init_port(void) 1997 { 1998 portid_t pid; 1999 2000 /* Configuration of Ethernet ports. */ 2001 ports = rte_zmalloc("testpmd: ports", 2002 sizeof(struct rte_port) * RTE_MAX_ETHPORTS, 2003 RTE_CACHE_LINE_SIZE); 2004 if (ports == NULL) { 2005 rte_exit(EXIT_FAILURE, 2006 "rte_zmalloc(%d struct rte_port) failed\n", 2007 RTE_MAX_ETHPORTS); 2008 } 2009 2010 /* enabled allocated ports */ 2011 for (pid = 0; pid < nb_ports; pid++) 2012 ports[pid].enabled = 1; 2013 } 2014 2015 static void 2016 force_quit(void) 2017 { 2018 pmd_test_exit(); 2019 prompt_exit(); 2020 } 2021 2022 static void 2023 signal_handler(int signum) 2024 { 2025 if (signum == SIGINT || signum == SIGTERM) { 2026 printf("\nSignal %d received, preparing to exit...\n", 2027 signum); 2028 force_quit(); 2029 /* exit with the expected status */ 2030 signal(signum, SIG_DFL); 2031 kill(getpid(), signum); 2032 } 2033 } 2034 2035 int 2036 main(int argc, char** argv) 2037 { 2038 int diag; 2039 uint8_t port_id; 2040 2041 signal(SIGINT, signal_handler); 2042 signal(SIGTERM, signal_handler); 2043 2044 diag = rte_eal_init(argc, argv); 2045 if (diag < 0) 2046 rte_panic("Cannot init EAL\n"); 2047 2048 nb_ports = (portid_t) rte_eth_dev_count(); 2049 if (nb_ports == 0) 2050 RTE_LOG(WARNING, EAL, "No probed ethernet devices\n"); 2051 2052 /* allocate port structures, and init them */ 2053 init_port(); 2054 2055 set_def_fwd_config(); 2056 if (nb_lcores == 0) 2057 rte_panic("Empty set of forwarding logical cores - check the " 2058 "core mask supplied in the command parameters\n"); 2059 2060 argc -= diag; 2061 argv += diag; 2062 if (argc > 1) 2063 launch_args_parse(argc, argv); 2064 2065 if (!nb_rxq && !nb_txq) 2066 printf("Warning: Either rx or tx queues should be non-zero\n"); 2067 2068 if (nb_rxq > 1 && nb_rxq > nb_txq) 2069 printf("Warning: nb_rxq=%d enables RSS configuration, " 2070 "but nb_txq=%d will prevent to fully test it.\n", 2071 nb_rxq, nb_txq); 2072 2073 init_config(); 2074 if (start_port(RTE_PORT_ALL) != 0) 2075 rte_exit(EXIT_FAILURE, "Start ports failed\n"); 2076 2077 /* set all ports to promiscuous mode by default */ 2078 FOREACH_PORT(port_id, ports) 2079 rte_eth_promiscuous_enable(port_id); 2080 2081 #ifdef RTE_LIBRTE_CMDLINE 2082 if (interactive == 1) { 2083 if (auto_start) { 2084 printf("Start automatic packet forwarding\n"); 2085 start_packet_forwarding(0); 2086 } 2087 prompt(); 2088 } else 2089 #endif 2090 { 2091 char c; 2092 int rc; 2093 2094 printf("No commandline core given, start packet forwarding\n"); 2095 start_packet_forwarding(0); 2096 printf("Press enter to exit\n"); 2097 rc = read(0, &c, 1); 2098 pmd_test_exit(); 2099 if (rc < 0) 2100 return 1; 2101 } 2102 2103 return 0; 2104 } 2105