1 /*- 2 * BSD LICENSE 3 * 4 * Copyright(c) 2010-2016 Intel Corporation. All rights reserved. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of Intel Corporation nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include <stdarg.h> 35 #include <stdio.h> 36 #include <stdlib.h> 37 #include <signal.h> 38 #include <string.h> 39 #include <time.h> 40 #include <fcntl.h> 41 #include <sys/types.h> 42 #include <errno.h> 43 44 #include <sys/queue.h> 45 #include <sys/stat.h> 46 47 #include <stdint.h> 48 #include <unistd.h> 49 #include <inttypes.h> 50 51 #include <rte_common.h> 52 #include <rte_errno.h> 53 #include <rte_byteorder.h> 54 #include <rte_log.h> 55 #include <rte_debug.h> 56 #include <rte_cycles.h> 57 #include <rte_memory.h> 58 #include <rte_memcpy.h> 59 #include <rte_memzone.h> 60 #include <rte_launch.h> 61 #include <rte_eal.h> 62 #include <rte_per_lcore.h> 63 #include <rte_lcore.h> 64 #include <rte_atomic.h> 65 #include <rte_branch_prediction.h> 66 #include <rte_ring.h> 67 #include <rte_mempool.h> 68 #include <rte_malloc.h> 69 #include <rte_mbuf.h> 70 #include <rte_interrupts.h> 71 #include <rte_pci.h> 72 #include <rte_ether.h> 73 #include <rte_ethdev.h> 74 #include <rte_dev.h> 75 #include <rte_string_fns.h> 76 #ifdef RTE_LIBRTE_PMD_XENVIRT 77 #include <rte_eth_xenvirt.h> 78 #endif 79 80 #include "testpmd.h" 81 82 uint16_t verbose_level = 0; /**< Silent by default. */ 83 84 /* use master core for command line ? */ 85 uint8_t interactive = 0; 86 uint8_t auto_start = 0; 87 88 /* 89 * NUMA support configuration. 90 * When set, the NUMA support attempts to dispatch the allocation of the 91 * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the 92 * probed ports among the CPU sockets 0 and 1. 93 * Otherwise, all memory is allocated from CPU socket 0. 94 */ 95 uint8_t numa_support = 0; /**< No numa support by default */ 96 97 /* 98 * In UMA mode,all memory is allocated from socket 0 if --socket-num is 99 * not configured. 100 */ 101 uint8_t socket_num = UMA_NO_CONFIG; 102 103 /* 104 * Use ANONYMOUS mapped memory (might be not physically continuous) for mbufs. 105 */ 106 uint8_t mp_anon = 0; 107 108 /* 109 * Record the Ethernet address of peer target ports to which packets are 110 * forwarded. 111 * Must be instanciated with the ethernet addresses of peer traffic generator 112 * ports. 113 */ 114 struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS]; 115 portid_t nb_peer_eth_addrs = 0; 116 117 /* 118 * Probed Target Environment. 119 */ 120 struct rte_port *ports; /**< For all probed ethernet ports. */ 121 portid_t nb_ports; /**< Number of probed ethernet ports. */ 122 struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */ 123 lcoreid_t nb_lcores; /**< Number of probed logical cores. */ 124 125 /* 126 * Test Forwarding Configuration. 127 * nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores 128 * nb_fwd_ports <= nb_cfg_ports <= nb_ports 129 */ 130 lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */ 131 lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */ 132 portid_t nb_cfg_ports; /**< Number of configured ports. */ 133 portid_t nb_fwd_ports; /**< Number of forwarding ports. */ 134 135 unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */ 136 portid_t fwd_ports_ids[RTE_MAX_ETHPORTS]; /**< Port ids configuration. */ 137 138 struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */ 139 streamid_t nb_fwd_streams; /**< Is equal to (nb_ports * nb_rxq). */ 140 141 /* 142 * Forwarding engines. 143 */ 144 struct fwd_engine * fwd_engines[] = { 145 &io_fwd_engine, 146 &mac_fwd_engine, 147 &mac_retry_fwd_engine, 148 &mac_swap_engine, 149 &flow_gen_engine, 150 &rx_only_engine, 151 &tx_only_engine, 152 &csum_fwd_engine, 153 &icmp_echo_engine, 154 #ifdef RTE_LIBRTE_IEEE1588 155 &ieee1588_fwd_engine, 156 #endif 157 NULL, 158 }; 159 160 struct fwd_config cur_fwd_config; 161 struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */ 162 163 uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */ 164 uint32_t param_total_num_mbufs = 0; /**< number of mbufs in all pools - if 165 * specified on command-line. */ 166 167 /* 168 * Configuration of packet segments used by the "txonly" processing engine. 169 */ 170 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */ 171 uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = { 172 TXONLY_DEF_PACKET_LEN, 173 }; 174 uint8_t tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */ 175 176 enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF; 177 /**< Split policy for packets to TX. */ 178 179 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */ 180 uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */ 181 182 /* current configuration is in DCB or not,0 means it is not in DCB mode */ 183 uint8_t dcb_config = 0; 184 185 /* Whether the dcb is in testing status */ 186 uint8_t dcb_test = 0; 187 188 /* 189 * Configurable number of RX/TX queues. 190 */ 191 queueid_t nb_rxq = 1; /**< Number of RX queues per port. */ 192 queueid_t nb_txq = 1; /**< Number of TX queues per port. */ 193 194 /* 195 * Configurable number of RX/TX ring descriptors. 196 */ 197 #define RTE_TEST_RX_DESC_DEFAULT 128 198 #define RTE_TEST_TX_DESC_DEFAULT 512 199 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */ 200 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */ 201 202 #define RTE_PMD_PARAM_UNSET -1 203 /* 204 * Configurable values of RX and TX ring threshold registers. 205 */ 206 207 int8_t rx_pthresh = RTE_PMD_PARAM_UNSET; 208 int8_t rx_hthresh = RTE_PMD_PARAM_UNSET; 209 int8_t rx_wthresh = RTE_PMD_PARAM_UNSET; 210 211 int8_t tx_pthresh = RTE_PMD_PARAM_UNSET; 212 int8_t tx_hthresh = RTE_PMD_PARAM_UNSET; 213 int8_t tx_wthresh = RTE_PMD_PARAM_UNSET; 214 215 /* 216 * Configurable value of RX free threshold. 217 */ 218 int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET; 219 220 /* 221 * Configurable value of RX drop enable. 222 */ 223 int8_t rx_drop_en = RTE_PMD_PARAM_UNSET; 224 225 /* 226 * Configurable value of TX free threshold. 227 */ 228 int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET; 229 230 /* 231 * Configurable value of TX RS bit threshold. 232 */ 233 int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET; 234 235 /* 236 * Configurable value of TX queue flags. 237 */ 238 int32_t txq_flags = RTE_PMD_PARAM_UNSET; 239 240 /* 241 * Receive Side Scaling (RSS) configuration. 242 */ 243 uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */ 244 245 /* 246 * Port topology configuration 247 */ 248 uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */ 249 250 /* 251 * Avoids to flush all the RX streams before starts forwarding. 252 */ 253 uint8_t no_flush_rx = 0; /* flush by default */ 254 255 /* 256 * Avoids to check link status when starting/stopping a port. 257 */ 258 uint8_t no_link_check = 0; /* check by default */ 259 260 /* 261 * NIC bypass mode configuration options. 262 */ 263 #ifdef RTE_NIC_BYPASS 264 265 /* The NIC bypass watchdog timeout. */ 266 uint32_t bypass_timeout = RTE_BYPASS_TMT_OFF; 267 268 #endif 269 270 /* 271 * Ethernet device configuration. 272 */ 273 struct rte_eth_rxmode rx_mode = { 274 .max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */ 275 .split_hdr_size = 0, 276 .header_split = 0, /**< Header Split disabled. */ 277 .hw_ip_checksum = 0, /**< IP checksum offload disabled. */ 278 .hw_vlan_filter = 1, /**< VLAN filtering enabled. */ 279 .hw_vlan_strip = 1, /**< VLAN strip enabled. */ 280 .hw_vlan_extend = 0, /**< Extended VLAN disabled. */ 281 .jumbo_frame = 0, /**< Jumbo Frame Support disabled. */ 282 .hw_strip_crc = 0, /**< CRC stripping by hardware disabled. */ 283 }; 284 285 struct rte_fdir_conf fdir_conf = { 286 .mode = RTE_FDIR_MODE_NONE, 287 .pballoc = RTE_FDIR_PBALLOC_64K, 288 .status = RTE_FDIR_REPORT_STATUS, 289 .mask = { 290 .vlan_tci_mask = 0x0, 291 .ipv4_mask = { 292 .src_ip = 0xFFFFFFFF, 293 .dst_ip = 0xFFFFFFFF, 294 }, 295 .ipv6_mask = { 296 .src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF}, 297 .dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF}, 298 }, 299 .src_port_mask = 0xFFFF, 300 .dst_port_mask = 0xFFFF, 301 .mac_addr_byte_mask = 0xFF, 302 .tunnel_type_mask = 1, 303 .tunnel_id_mask = 0xFFFFFFFF, 304 }, 305 .drop_queue = 127, 306 }; 307 308 volatile int test_done = 1; /* stop packet forwarding when set to 1. */ 309 310 struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS]; 311 struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS]; 312 313 struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array; 314 struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array; 315 316 uint16_t nb_tx_queue_stats_mappings = 0; 317 uint16_t nb_rx_queue_stats_mappings = 0; 318 319 unsigned max_socket = 0; 320 321 /* Forward function declarations */ 322 static void map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port); 323 static void check_all_ports_link_status(uint32_t port_mask); 324 325 /* 326 * Check if all the ports are started. 327 * If yes, return positive value. If not, return zero. 328 */ 329 static int all_ports_started(void); 330 331 /* 332 * Find next enabled port 333 */ 334 portid_t 335 find_next_port(portid_t p, struct rte_port *ports, int size) 336 { 337 if (ports == NULL) 338 rte_exit(-EINVAL, "failed to find a next port id\n"); 339 340 while ((p < size) && (ports[p].enabled == 0)) 341 p++; 342 return p; 343 } 344 345 /* 346 * Setup default configuration. 347 */ 348 static void 349 set_default_fwd_lcores_config(void) 350 { 351 unsigned int i; 352 unsigned int nb_lc; 353 unsigned int sock_num; 354 355 nb_lc = 0; 356 for (i = 0; i < RTE_MAX_LCORE; i++) { 357 sock_num = rte_lcore_to_socket_id(i) + 1; 358 if (sock_num > max_socket) { 359 if (sock_num > RTE_MAX_NUMA_NODES) 360 rte_exit(EXIT_FAILURE, "Total sockets greater than %u\n", RTE_MAX_NUMA_NODES); 361 max_socket = sock_num; 362 } 363 if (!rte_lcore_is_enabled(i)) 364 continue; 365 if (i == rte_get_master_lcore()) 366 continue; 367 fwd_lcores_cpuids[nb_lc++] = i; 368 } 369 nb_lcores = (lcoreid_t) nb_lc; 370 nb_cfg_lcores = nb_lcores; 371 nb_fwd_lcores = 1; 372 } 373 374 static void 375 set_def_peer_eth_addrs(void) 376 { 377 portid_t i; 378 379 for (i = 0; i < RTE_MAX_ETHPORTS; i++) { 380 peer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR; 381 peer_eth_addrs[i].addr_bytes[5] = i; 382 } 383 } 384 385 static void 386 set_default_fwd_ports_config(void) 387 { 388 portid_t pt_id; 389 390 for (pt_id = 0; pt_id < nb_ports; pt_id++) 391 fwd_ports_ids[pt_id] = pt_id; 392 393 nb_cfg_ports = nb_ports; 394 nb_fwd_ports = nb_ports; 395 } 396 397 void 398 set_def_fwd_config(void) 399 { 400 set_default_fwd_lcores_config(); 401 set_def_peer_eth_addrs(); 402 set_default_fwd_ports_config(); 403 } 404 405 /* 406 * Configuration initialisation done once at init time. 407 */ 408 static void 409 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf, 410 unsigned int socket_id) 411 { 412 char pool_name[RTE_MEMPOOL_NAMESIZE]; 413 struct rte_mempool *rte_mp = NULL; 414 uint32_t mb_size; 415 416 mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size; 417 mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name)); 418 419 RTE_LOG(INFO, USER1, 420 "create a new mbuf pool <%s>: n=%u, size=%u, socket=%u\n", 421 pool_name, nb_mbuf, mbuf_seg_size, socket_id); 422 423 #ifdef RTE_LIBRTE_PMD_XENVIRT 424 rte_mp = rte_mempool_gntalloc_create(pool_name, nb_mbuf, mb_size, 425 (unsigned) mb_mempool_cache, 426 sizeof(struct rte_pktmbuf_pool_private), 427 rte_pktmbuf_pool_init, NULL, 428 rte_pktmbuf_init, NULL, 429 socket_id, 0); 430 #endif 431 432 /* if the former XEN allocation failed fall back to normal allocation */ 433 if (rte_mp == NULL) { 434 if (mp_anon != 0) { 435 rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf, 436 mb_size, (unsigned) mb_mempool_cache, 437 sizeof(struct rte_pktmbuf_pool_private), 438 socket_id, 0); 439 440 if (rte_mempool_populate_anon(rte_mp) == 0) { 441 rte_mempool_free(rte_mp); 442 rte_mp = NULL; 443 } 444 rte_pktmbuf_pool_init(rte_mp, NULL); 445 rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL); 446 } else { 447 /* wrapper to rte_mempool_create() */ 448 rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf, 449 mb_mempool_cache, 0, mbuf_seg_size, socket_id); 450 } 451 } 452 453 if (rte_mp == NULL) { 454 rte_exit(EXIT_FAILURE, 455 "Creation of mbuf pool for socket %u failed: %s\n", 456 socket_id, rte_strerror(rte_errno)); 457 } else if (verbose_level > 0) { 458 rte_mempool_dump(stdout, rte_mp); 459 } 460 } 461 462 /* 463 * Check given socket id is valid or not with NUMA mode, 464 * if valid, return 0, else return -1 465 */ 466 static int 467 check_socket_id(const unsigned int socket_id) 468 { 469 static int warning_once = 0; 470 471 if (socket_id >= max_socket) { 472 if (!warning_once && numa_support) 473 printf("Warning: NUMA should be configured manually by" 474 " using --port-numa-config and" 475 " --ring-numa-config parameters along with" 476 " --numa.\n"); 477 warning_once = 1; 478 return -1; 479 } 480 return 0; 481 } 482 483 static void 484 init_config(void) 485 { 486 portid_t pid; 487 struct rte_port *port; 488 struct rte_mempool *mbp; 489 unsigned int nb_mbuf_per_pool; 490 lcoreid_t lc_id; 491 uint8_t port_per_socket[RTE_MAX_NUMA_NODES]; 492 493 memset(port_per_socket,0,RTE_MAX_NUMA_NODES); 494 /* Configuration of logical cores. */ 495 fwd_lcores = rte_zmalloc("testpmd: fwd_lcores", 496 sizeof(struct fwd_lcore *) * nb_lcores, 497 RTE_CACHE_LINE_SIZE); 498 if (fwd_lcores == NULL) { 499 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) " 500 "failed\n", nb_lcores); 501 } 502 for (lc_id = 0; lc_id < nb_lcores; lc_id++) { 503 fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore", 504 sizeof(struct fwd_lcore), 505 RTE_CACHE_LINE_SIZE); 506 if (fwd_lcores[lc_id] == NULL) { 507 rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) " 508 "failed\n"); 509 } 510 fwd_lcores[lc_id]->cpuid_idx = lc_id; 511 } 512 513 /* 514 * Create pools of mbuf. 515 * If NUMA support is disabled, create a single pool of mbuf in 516 * socket 0 memory by default. 517 * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1. 518 * 519 * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and 520 * nb_txd can be configured at run time. 521 */ 522 if (param_total_num_mbufs) 523 nb_mbuf_per_pool = param_total_num_mbufs; 524 else { 525 nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX + (nb_lcores * mb_mempool_cache) 526 + RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST; 527 528 if (!numa_support) 529 nb_mbuf_per_pool = 530 (nb_mbuf_per_pool * RTE_MAX_ETHPORTS); 531 } 532 533 if (!numa_support) { 534 if (socket_num == UMA_NO_CONFIG) 535 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0); 536 else 537 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 538 socket_num); 539 } 540 541 FOREACH_PORT(pid, ports) { 542 port = &ports[pid]; 543 rte_eth_dev_info_get(pid, &port->dev_info); 544 545 if (numa_support) { 546 if (port_numa[pid] != NUMA_NO_CONFIG) 547 port_per_socket[port_numa[pid]]++; 548 else { 549 uint32_t socket_id = rte_eth_dev_socket_id(pid); 550 551 /* if socket_id is invalid, set to 0 */ 552 if (check_socket_id(socket_id) < 0) 553 socket_id = 0; 554 port_per_socket[socket_id]++; 555 } 556 } 557 558 /* set flag to initialize port/queue */ 559 port->need_reconfig = 1; 560 port->need_reconfig_queues = 1; 561 } 562 563 if (numa_support) { 564 uint8_t i; 565 unsigned int nb_mbuf; 566 567 if (param_total_num_mbufs) 568 nb_mbuf_per_pool = nb_mbuf_per_pool/nb_ports; 569 570 for (i = 0; i < max_socket; i++) { 571 nb_mbuf = (nb_mbuf_per_pool * RTE_MAX_ETHPORTS); 572 if (nb_mbuf) 573 mbuf_pool_create(mbuf_data_size, 574 nb_mbuf,i); 575 } 576 } 577 init_port_config(); 578 579 /* 580 * Records which Mbuf pool to use by each logical core, if needed. 581 */ 582 for (lc_id = 0; lc_id < nb_lcores; lc_id++) { 583 mbp = mbuf_pool_find( 584 rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id])); 585 586 if (mbp == NULL) 587 mbp = mbuf_pool_find(0); 588 fwd_lcores[lc_id]->mbp = mbp; 589 } 590 591 /* Configuration of packet forwarding streams. */ 592 if (init_fwd_streams() < 0) 593 rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n"); 594 595 fwd_config_setup(); 596 } 597 598 599 void 600 reconfig(portid_t new_port_id, unsigned socket_id) 601 { 602 struct rte_port *port; 603 604 /* Reconfiguration of Ethernet ports. */ 605 port = &ports[new_port_id]; 606 rte_eth_dev_info_get(new_port_id, &port->dev_info); 607 608 /* set flag to initialize port/queue */ 609 port->need_reconfig = 1; 610 port->need_reconfig_queues = 1; 611 port->socket_id = socket_id; 612 613 init_port_config(); 614 } 615 616 617 int 618 init_fwd_streams(void) 619 { 620 portid_t pid; 621 struct rte_port *port; 622 streamid_t sm_id, nb_fwd_streams_new; 623 queueid_t q; 624 625 /* set socket id according to numa or not */ 626 FOREACH_PORT(pid, ports) { 627 port = &ports[pid]; 628 if (nb_rxq > port->dev_info.max_rx_queues) { 629 printf("Fail: nb_rxq(%d) is greater than " 630 "max_rx_queues(%d)\n", nb_rxq, 631 port->dev_info.max_rx_queues); 632 return -1; 633 } 634 if (nb_txq > port->dev_info.max_tx_queues) { 635 printf("Fail: nb_txq(%d) is greater than " 636 "max_tx_queues(%d)\n", nb_txq, 637 port->dev_info.max_tx_queues); 638 return -1; 639 } 640 if (numa_support) { 641 if (port_numa[pid] != NUMA_NO_CONFIG) 642 port->socket_id = port_numa[pid]; 643 else { 644 port->socket_id = rte_eth_dev_socket_id(pid); 645 646 /* if socket_id is invalid, set to 0 */ 647 if (check_socket_id(port->socket_id) < 0) 648 port->socket_id = 0; 649 } 650 } 651 else { 652 if (socket_num == UMA_NO_CONFIG) 653 port->socket_id = 0; 654 else 655 port->socket_id = socket_num; 656 } 657 } 658 659 q = RTE_MAX(nb_rxq, nb_txq); 660 if (q == 0) { 661 printf("Fail: Cannot allocate fwd streams as number of queues is 0\n"); 662 return -1; 663 } 664 nb_fwd_streams_new = (streamid_t)(nb_ports * q); 665 if (nb_fwd_streams_new == nb_fwd_streams) 666 return 0; 667 /* clear the old */ 668 if (fwd_streams != NULL) { 669 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) { 670 if (fwd_streams[sm_id] == NULL) 671 continue; 672 rte_free(fwd_streams[sm_id]); 673 fwd_streams[sm_id] = NULL; 674 } 675 rte_free(fwd_streams); 676 fwd_streams = NULL; 677 } 678 679 /* init new */ 680 nb_fwd_streams = nb_fwd_streams_new; 681 fwd_streams = rte_zmalloc("testpmd: fwd_streams", 682 sizeof(struct fwd_stream *) * nb_fwd_streams, RTE_CACHE_LINE_SIZE); 683 if (fwd_streams == NULL) 684 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_stream *)) " 685 "failed\n", nb_fwd_streams); 686 687 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) { 688 fwd_streams[sm_id] = rte_zmalloc("testpmd: struct fwd_stream", 689 sizeof(struct fwd_stream), RTE_CACHE_LINE_SIZE); 690 if (fwd_streams[sm_id] == NULL) 691 rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_stream)" 692 " failed\n"); 693 } 694 695 return 0; 696 } 697 698 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS 699 static void 700 pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs) 701 { 702 unsigned int total_burst; 703 unsigned int nb_burst; 704 unsigned int burst_stats[3]; 705 uint16_t pktnb_stats[3]; 706 uint16_t nb_pkt; 707 int burst_percent[3]; 708 709 /* 710 * First compute the total number of packet bursts and the 711 * two highest numbers of bursts of the same number of packets. 712 */ 713 total_burst = 0; 714 burst_stats[0] = burst_stats[1] = burst_stats[2] = 0; 715 pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0; 716 for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) { 717 nb_burst = pbs->pkt_burst_spread[nb_pkt]; 718 if (nb_burst == 0) 719 continue; 720 total_burst += nb_burst; 721 if (nb_burst > burst_stats[0]) { 722 burst_stats[1] = burst_stats[0]; 723 pktnb_stats[1] = pktnb_stats[0]; 724 burst_stats[0] = nb_burst; 725 pktnb_stats[0] = nb_pkt; 726 } 727 } 728 if (total_burst == 0) 729 return; 730 burst_percent[0] = (burst_stats[0] * 100) / total_burst; 731 printf(" %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst, 732 burst_percent[0], (int) pktnb_stats[0]); 733 if (burst_stats[0] == total_burst) { 734 printf("]\n"); 735 return; 736 } 737 if (burst_stats[0] + burst_stats[1] == total_burst) { 738 printf(" + %d%% of %d pkts]\n", 739 100 - burst_percent[0], pktnb_stats[1]); 740 return; 741 } 742 burst_percent[1] = (burst_stats[1] * 100) / total_burst; 743 burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]); 744 if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) { 745 printf(" + %d%% of others]\n", 100 - burst_percent[0]); 746 return; 747 } 748 printf(" + %d%% of %d pkts + %d%% of others]\n", 749 burst_percent[1], (int) pktnb_stats[1], burst_percent[2]); 750 } 751 #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */ 752 753 static void 754 fwd_port_stats_display(portid_t port_id, struct rte_eth_stats *stats) 755 { 756 struct rte_port *port; 757 uint8_t i; 758 759 static const char *fwd_stats_border = "----------------------"; 760 761 port = &ports[port_id]; 762 printf("\n %s Forward statistics for port %-2d %s\n", 763 fwd_stats_border, port_id, fwd_stats_border); 764 765 if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) { 766 printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: " 767 "%-"PRIu64"\n", 768 stats->ipackets, stats->imissed, 769 (uint64_t) (stats->ipackets + stats->imissed)); 770 771 if (cur_fwd_eng == &csum_fwd_engine) 772 printf(" Bad-ipcsum: %-14"PRIu64" Bad-l4csum: %-14"PRIu64" \n", 773 port->rx_bad_ip_csum, port->rx_bad_l4_csum); 774 if ((stats->ierrors + stats->rx_nombuf) > 0) { 775 printf(" RX-error: %-"PRIu64"\n", stats->ierrors); 776 printf(" RX-nombufs: %-14"PRIu64"\n", stats->rx_nombuf); 777 } 778 779 printf(" TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: " 780 "%-"PRIu64"\n", 781 stats->opackets, port->tx_dropped, 782 (uint64_t) (stats->opackets + port->tx_dropped)); 783 } 784 else { 785 printf(" RX-packets: %14"PRIu64" RX-dropped:%14"PRIu64" RX-total:" 786 "%14"PRIu64"\n", 787 stats->ipackets, stats->imissed, 788 (uint64_t) (stats->ipackets + stats->imissed)); 789 790 if (cur_fwd_eng == &csum_fwd_engine) 791 printf(" Bad-ipcsum:%14"PRIu64" Bad-l4csum:%14"PRIu64"\n", 792 port->rx_bad_ip_csum, port->rx_bad_l4_csum); 793 if ((stats->ierrors + stats->rx_nombuf) > 0) { 794 printf(" RX-error:%"PRIu64"\n", stats->ierrors); 795 printf(" RX-nombufs: %14"PRIu64"\n", 796 stats->rx_nombuf); 797 } 798 799 printf(" TX-packets: %14"PRIu64" TX-dropped:%14"PRIu64" TX-total:" 800 "%14"PRIu64"\n", 801 stats->opackets, port->tx_dropped, 802 (uint64_t) (stats->opackets + port->tx_dropped)); 803 } 804 805 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS 806 if (port->rx_stream) 807 pkt_burst_stats_display("RX", 808 &port->rx_stream->rx_burst_stats); 809 if (port->tx_stream) 810 pkt_burst_stats_display("TX", 811 &port->tx_stream->tx_burst_stats); 812 #endif 813 814 if (port->rx_queue_stats_mapping_enabled) { 815 printf("\n"); 816 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) { 817 printf(" Stats reg %2d RX-packets:%14"PRIu64 818 " RX-errors:%14"PRIu64 819 " RX-bytes:%14"PRIu64"\n", 820 i, stats->q_ipackets[i], stats->q_errors[i], stats->q_ibytes[i]); 821 } 822 printf("\n"); 823 } 824 if (port->tx_queue_stats_mapping_enabled) { 825 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) { 826 printf(" Stats reg %2d TX-packets:%14"PRIu64 827 " TX-bytes:%14"PRIu64"\n", 828 i, stats->q_opackets[i], stats->q_obytes[i]); 829 } 830 } 831 832 printf(" %s--------------------------------%s\n", 833 fwd_stats_border, fwd_stats_border); 834 } 835 836 static void 837 fwd_stream_stats_display(streamid_t stream_id) 838 { 839 struct fwd_stream *fs; 840 static const char *fwd_top_stats_border = "-------"; 841 842 fs = fwd_streams[stream_id]; 843 if ((fs->rx_packets == 0) && (fs->tx_packets == 0) && 844 (fs->fwd_dropped == 0)) 845 return; 846 printf("\n %s Forward Stats for RX Port=%2d/Queue=%2d -> " 847 "TX Port=%2d/Queue=%2d %s\n", 848 fwd_top_stats_border, fs->rx_port, fs->rx_queue, 849 fs->tx_port, fs->tx_queue, fwd_top_stats_border); 850 printf(" RX-packets: %-14u TX-packets: %-14u TX-dropped: %-14u", 851 fs->rx_packets, fs->tx_packets, fs->fwd_dropped); 852 853 /* if checksum mode */ 854 if (cur_fwd_eng == &csum_fwd_engine) { 855 printf(" RX- bad IP checksum: %-14u Rx- bad L4 checksum: " 856 "%-14u\n", fs->rx_bad_ip_csum, fs->rx_bad_l4_csum); 857 } 858 859 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS 860 pkt_burst_stats_display("RX", &fs->rx_burst_stats); 861 pkt_burst_stats_display("TX", &fs->tx_burst_stats); 862 #endif 863 } 864 865 static void 866 flush_fwd_rx_queues(void) 867 { 868 struct rte_mbuf *pkts_burst[MAX_PKT_BURST]; 869 portid_t rxp; 870 portid_t port_id; 871 queueid_t rxq; 872 uint16_t nb_rx; 873 uint16_t i; 874 uint8_t j; 875 876 for (j = 0; j < 2; j++) { 877 for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) { 878 for (rxq = 0; rxq < nb_rxq; rxq++) { 879 port_id = fwd_ports_ids[rxp]; 880 do { 881 nb_rx = rte_eth_rx_burst(port_id, rxq, 882 pkts_burst, MAX_PKT_BURST); 883 for (i = 0; i < nb_rx; i++) 884 rte_pktmbuf_free(pkts_burst[i]); 885 } while (nb_rx > 0); 886 } 887 } 888 rte_delay_ms(10); /* wait 10 milli-seconds before retrying */ 889 } 890 } 891 892 static void 893 run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd) 894 { 895 struct fwd_stream **fsm; 896 streamid_t nb_fs; 897 streamid_t sm_id; 898 899 fsm = &fwd_streams[fc->stream_idx]; 900 nb_fs = fc->stream_nb; 901 do { 902 for (sm_id = 0; sm_id < nb_fs; sm_id++) 903 (*pkt_fwd)(fsm[sm_id]); 904 } while (! fc->stopped); 905 } 906 907 static int 908 start_pkt_forward_on_core(void *fwd_arg) 909 { 910 run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg, 911 cur_fwd_config.fwd_eng->packet_fwd); 912 return 0; 913 } 914 915 /* 916 * Run the TXONLY packet forwarding engine to send a single burst of packets. 917 * Used to start communication flows in network loopback test configurations. 918 */ 919 static int 920 run_one_txonly_burst_on_core(void *fwd_arg) 921 { 922 struct fwd_lcore *fwd_lc; 923 struct fwd_lcore tmp_lcore; 924 925 fwd_lc = (struct fwd_lcore *) fwd_arg; 926 tmp_lcore = *fwd_lc; 927 tmp_lcore.stopped = 1; 928 run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd); 929 return 0; 930 } 931 932 /* 933 * Launch packet forwarding: 934 * - Setup per-port forwarding context. 935 * - launch logical cores with their forwarding configuration. 936 */ 937 static void 938 launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore) 939 { 940 port_fwd_begin_t port_fwd_begin; 941 unsigned int i; 942 unsigned int lc_id; 943 int diag; 944 945 port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin; 946 if (port_fwd_begin != NULL) { 947 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) 948 (*port_fwd_begin)(fwd_ports_ids[i]); 949 } 950 for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) { 951 lc_id = fwd_lcores_cpuids[i]; 952 if ((interactive == 0) || (lc_id != rte_lcore_id())) { 953 fwd_lcores[i]->stopped = 0; 954 diag = rte_eal_remote_launch(pkt_fwd_on_lcore, 955 fwd_lcores[i], lc_id); 956 if (diag != 0) 957 printf("launch lcore %u failed - diag=%d\n", 958 lc_id, diag); 959 } 960 } 961 } 962 963 /* 964 * Launch packet forwarding configuration. 965 */ 966 void 967 start_packet_forwarding(int with_tx_first) 968 { 969 port_fwd_begin_t port_fwd_begin; 970 port_fwd_end_t port_fwd_end; 971 struct rte_port *port; 972 unsigned int i; 973 portid_t pt_id; 974 streamid_t sm_id; 975 976 if (strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") == 0 && !nb_rxq) 977 rte_exit(EXIT_FAILURE, "rxq are 0, cannot use rxonly fwd mode\n"); 978 979 if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 && !nb_txq) 980 rte_exit(EXIT_FAILURE, "txq are 0, cannot use txonly fwd mode\n"); 981 982 if ((strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") != 0 && 983 strcmp(cur_fwd_eng->fwd_mode_name, "txonly") != 0) && 984 (!nb_rxq || !nb_txq)) 985 rte_exit(EXIT_FAILURE, 986 "Either rxq or txq are 0, cannot use %s fwd mode\n", 987 cur_fwd_eng->fwd_mode_name); 988 989 if (all_ports_started() == 0) { 990 printf("Not all ports were started\n"); 991 return; 992 } 993 if (test_done == 0) { 994 printf("Packet forwarding already started\n"); 995 return; 996 } 997 998 if (init_fwd_streams() < 0) { 999 printf("Fail from init_fwd_streams()\n"); 1000 return; 1001 } 1002 1003 if(dcb_test) { 1004 for (i = 0; i < nb_fwd_ports; i++) { 1005 pt_id = fwd_ports_ids[i]; 1006 port = &ports[pt_id]; 1007 if (!port->dcb_flag) { 1008 printf("In DCB mode, all forwarding ports must " 1009 "be configured in this mode.\n"); 1010 return; 1011 } 1012 } 1013 if (nb_fwd_lcores == 1) { 1014 printf("In DCB mode,the nb forwarding cores " 1015 "should be larger than 1.\n"); 1016 return; 1017 } 1018 } 1019 test_done = 0; 1020 1021 if(!no_flush_rx) 1022 flush_fwd_rx_queues(); 1023 1024 fwd_config_setup(); 1025 rxtx_config_display(); 1026 1027 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) { 1028 pt_id = fwd_ports_ids[i]; 1029 port = &ports[pt_id]; 1030 rte_eth_stats_get(pt_id, &port->stats); 1031 port->tx_dropped = 0; 1032 1033 map_port_queue_stats_mapping_registers(pt_id, port); 1034 } 1035 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) { 1036 fwd_streams[sm_id]->rx_packets = 0; 1037 fwd_streams[sm_id]->tx_packets = 0; 1038 fwd_streams[sm_id]->fwd_dropped = 0; 1039 fwd_streams[sm_id]->rx_bad_ip_csum = 0; 1040 fwd_streams[sm_id]->rx_bad_l4_csum = 0; 1041 1042 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS 1043 memset(&fwd_streams[sm_id]->rx_burst_stats, 0, 1044 sizeof(fwd_streams[sm_id]->rx_burst_stats)); 1045 memset(&fwd_streams[sm_id]->tx_burst_stats, 0, 1046 sizeof(fwd_streams[sm_id]->tx_burst_stats)); 1047 #endif 1048 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES 1049 fwd_streams[sm_id]->core_cycles = 0; 1050 #endif 1051 } 1052 if (with_tx_first) { 1053 port_fwd_begin = tx_only_engine.port_fwd_begin; 1054 if (port_fwd_begin != NULL) { 1055 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) 1056 (*port_fwd_begin)(fwd_ports_ids[i]); 1057 } 1058 launch_packet_forwarding(run_one_txonly_burst_on_core); 1059 rte_eal_mp_wait_lcore(); 1060 port_fwd_end = tx_only_engine.port_fwd_end; 1061 if (port_fwd_end != NULL) { 1062 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) 1063 (*port_fwd_end)(fwd_ports_ids[i]); 1064 } 1065 } 1066 launch_packet_forwarding(start_pkt_forward_on_core); 1067 } 1068 1069 void 1070 stop_packet_forwarding(void) 1071 { 1072 struct rte_eth_stats stats; 1073 struct rte_port *port; 1074 port_fwd_end_t port_fwd_end; 1075 int i; 1076 portid_t pt_id; 1077 streamid_t sm_id; 1078 lcoreid_t lc_id; 1079 uint64_t total_recv; 1080 uint64_t total_xmit; 1081 uint64_t total_rx_dropped; 1082 uint64_t total_tx_dropped; 1083 uint64_t total_rx_nombuf; 1084 uint64_t tx_dropped; 1085 uint64_t rx_bad_ip_csum; 1086 uint64_t rx_bad_l4_csum; 1087 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES 1088 uint64_t fwd_cycles; 1089 #endif 1090 static const char *acc_stats_border = "+++++++++++++++"; 1091 1092 if (test_done) { 1093 printf("Packet forwarding not started\n"); 1094 return; 1095 } 1096 printf("Telling cores to stop..."); 1097 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) 1098 fwd_lcores[lc_id]->stopped = 1; 1099 printf("\nWaiting for lcores to finish...\n"); 1100 rte_eal_mp_wait_lcore(); 1101 port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end; 1102 if (port_fwd_end != NULL) { 1103 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) { 1104 pt_id = fwd_ports_ids[i]; 1105 (*port_fwd_end)(pt_id); 1106 } 1107 } 1108 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES 1109 fwd_cycles = 0; 1110 #endif 1111 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) { 1112 if (cur_fwd_config.nb_fwd_streams > 1113 cur_fwd_config.nb_fwd_ports) { 1114 fwd_stream_stats_display(sm_id); 1115 ports[fwd_streams[sm_id]->tx_port].tx_stream = NULL; 1116 ports[fwd_streams[sm_id]->rx_port].rx_stream = NULL; 1117 } else { 1118 ports[fwd_streams[sm_id]->tx_port].tx_stream = 1119 fwd_streams[sm_id]; 1120 ports[fwd_streams[sm_id]->rx_port].rx_stream = 1121 fwd_streams[sm_id]; 1122 } 1123 tx_dropped = ports[fwd_streams[sm_id]->tx_port].tx_dropped; 1124 tx_dropped = (uint64_t) (tx_dropped + 1125 fwd_streams[sm_id]->fwd_dropped); 1126 ports[fwd_streams[sm_id]->tx_port].tx_dropped = tx_dropped; 1127 1128 rx_bad_ip_csum = 1129 ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum; 1130 rx_bad_ip_csum = (uint64_t) (rx_bad_ip_csum + 1131 fwd_streams[sm_id]->rx_bad_ip_csum); 1132 ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum = 1133 rx_bad_ip_csum; 1134 1135 rx_bad_l4_csum = 1136 ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum; 1137 rx_bad_l4_csum = (uint64_t) (rx_bad_l4_csum + 1138 fwd_streams[sm_id]->rx_bad_l4_csum); 1139 ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum = 1140 rx_bad_l4_csum; 1141 1142 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES 1143 fwd_cycles = (uint64_t) (fwd_cycles + 1144 fwd_streams[sm_id]->core_cycles); 1145 #endif 1146 } 1147 total_recv = 0; 1148 total_xmit = 0; 1149 total_rx_dropped = 0; 1150 total_tx_dropped = 0; 1151 total_rx_nombuf = 0; 1152 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) { 1153 pt_id = fwd_ports_ids[i]; 1154 1155 port = &ports[pt_id]; 1156 rte_eth_stats_get(pt_id, &stats); 1157 stats.ipackets -= port->stats.ipackets; 1158 port->stats.ipackets = 0; 1159 stats.opackets -= port->stats.opackets; 1160 port->stats.opackets = 0; 1161 stats.ibytes -= port->stats.ibytes; 1162 port->stats.ibytes = 0; 1163 stats.obytes -= port->stats.obytes; 1164 port->stats.obytes = 0; 1165 stats.imissed -= port->stats.imissed; 1166 port->stats.imissed = 0; 1167 stats.oerrors -= port->stats.oerrors; 1168 port->stats.oerrors = 0; 1169 stats.rx_nombuf -= port->stats.rx_nombuf; 1170 port->stats.rx_nombuf = 0; 1171 1172 total_recv += stats.ipackets; 1173 total_xmit += stats.opackets; 1174 total_rx_dropped += stats.imissed; 1175 total_tx_dropped += port->tx_dropped; 1176 total_rx_nombuf += stats.rx_nombuf; 1177 1178 fwd_port_stats_display(pt_id, &stats); 1179 } 1180 printf("\n %s Accumulated forward statistics for all ports" 1181 "%s\n", 1182 acc_stats_border, acc_stats_border); 1183 printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: " 1184 "%-"PRIu64"\n" 1185 " TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: " 1186 "%-"PRIu64"\n", 1187 total_recv, total_rx_dropped, total_recv + total_rx_dropped, 1188 total_xmit, total_tx_dropped, total_xmit + total_tx_dropped); 1189 if (total_rx_nombuf > 0) 1190 printf(" RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf); 1191 printf(" %s++++++++++++++++++++++++++++++++++++++++++++++" 1192 "%s\n", 1193 acc_stats_border, acc_stats_border); 1194 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES 1195 if (total_recv > 0) 1196 printf("\n CPU cycles/packet=%u (total cycles=" 1197 "%"PRIu64" / total RX packets=%"PRIu64")\n", 1198 (unsigned int)(fwd_cycles / total_recv), 1199 fwd_cycles, total_recv); 1200 #endif 1201 printf("\nDone.\n"); 1202 test_done = 1; 1203 } 1204 1205 void 1206 dev_set_link_up(portid_t pid) 1207 { 1208 if (rte_eth_dev_set_link_up((uint8_t)pid) < 0) 1209 printf("\nSet link up fail.\n"); 1210 } 1211 1212 void 1213 dev_set_link_down(portid_t pid) 1214 { 1215 if (rte_eth_dev_set_link_down((uint8_t)pid) < 0) 1216 printf("\nSet link down fail.\n"); 1217 } 1218 1219 static int 1220 all_ports_started(void) 1221 { 1222 portid_t pi; 1223 struct rte_port *port; 1224 1225 FOREACH_PORT(pi, ports) { 1226 port = &ports[pi]; 1227 /* Check if there is a port which is not started */ 1228 if ((port->port_status != RTE_PORT_STARTED) && 1229 (port->slave_flag == 0)) 1230 return 0; 1231 } 1232 1233 /* No port is not started */ 1234 return 1; 1235 } 1236 1237 int 1238 all_ports_stopped(void) 1239 { 1240 portid_t pi; 1241 struct rte_port *port; 1242 1243 FOREACH_PORT(pi, ports) { 1244 port = &ports[pi]; 1245 if ((port->port_status != RTE_PORT_STOPPED) && 1246 (port->slave_flag == 0)) 1247 return 0; 1248 } 1249 1250 return 1; 1251 } 1252 1253 int 1254 port_is_started(portid_t port_id) 1255 { 1256 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1257 return 0; 1258 1259 if (ports[port_id].port_status != RTE_PORT_STARTED) 1260 return 0; 1261 1262 return 1; 1263 } 1264 1265 static int 1266 port_is_closed(portid_t port_id) 1267 { 1268 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1269 return 0; 1270 1271 if (ports[port_id].port_status != RTE_PORT_CLOSED) 1272 return 0; 1273 1274 return 1; 1275 } 1276 1277 int 1278 start_port(portid_t pid) 1279 { 1280 int diag, need_check_link_status = -1; 1281 portid_t pi; 1282 queueid_t qi; 1283 struct rte_port *port; 1284 struct ether_addr mac_addr; 1285 1286 if (port_id_is_invalid(pid, ENABLED_WARN)) 1287 return 0; 1288 1289 if(dcb_config) 1290 dcb_test = 1; 1291 FOREACH_PORT(pi, ports) { 1292 if (pid != pi && pid != (portid_t)RTE_PORT_ALL) 1293 continue; 1294 1295 need_check_link_status = 0; 1296 port = &ports[pi]; 1297 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED, 1298 RTE_PORT_HANDLING) == 0) { 1299 printf("Port %d is now not stopped\n", pi); 1300 continue; 1301 } 1302 1303 if (port->need_reconfig > 0) { 1304 port->need_reconfig = 0; 1305 1306 printf("Configuring Port %d (socket %u)\n", pi, 1307 port->socket_id); 1308 /* configure port */ 1309 diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq, 1310 &(port->dev_conf)); 1311 if (diag != 0) { 1312 if (rte_atomic16_cmpset(&(port->port_status), 1313 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0) 1314 printf("Port %d can not be set back " 1315 "to stopped\n", pi); 1316 printf("Fail to configure port %d\n", pi); 1317 /* try to reconfigure port next time */ 1318 port->need_reconfig = 1; 1319 return -1; 1320 } 1321 } 1322 if (port->need_reconfig_queues > 0) { 1323 port->need_reconfig_queues = 0; 1324 /* setup tx queues */ 1325 for (qi = 0; qi < nb_txq; qi++) { 1326 if ((numa_support) && 1327 (txring_numa[pi] != NUMA_NO_CONFIG)) 1328 diag = rte_eth_tx_queue_setup(pi, qi, 1329 nb_txd,txring_numa[pi], 1330 &(port->tx_conf)); 1331 else 1332 diag = rte_eth_tx_queue_setup(pi, qi, 1333 nb_txd,port->socket_id, 1334 &(port->tx_conf)); 1335 1336 if (diag == 0) 1337 continue; 1338 1339 /* Fail to setup tx queue, return */ 1340 if (rte_atomic16_cmpset(&(port->port_status), 1341 RTE_PORT_HANDLING, 1342 RTE_PORT_STOPPED) == 0) 1343 printf("Port %d can not be set back " 1344 "to stopped\n", pi); 1345 printf("Fail to configure port %d tx queues\n", pi); 1346 /* try to reconfigure queues next time */ 1347 port->need_reconfig_queues = 1; 1348 return -1; 1349 } 1350 /* setup rx queues */ 1351 for (qi = 0; qi < nb_rxq; qi++) { 1352 if ((numa_support) && 1353 (rxring_numa[pi] != NUMA_NO_CONFIG)) { 1354 struct rte_mempool * mp = 1355 mbuf_pool_find(rxring_numa[pi]); 1356 if (mp == NULL) { 1357 printf("Failed to setup RX queue:" 1358 "No mempool allocation" 1359 " on the socket %d\n", 1360 rxring_numa[pi]); 1361 return -1; 1362 } 1363 1364 diag = rte_eth_rx_queue_setup(pi, qi, 1365 nb_rxd,rxring_numa[pi], 1366 &(port->rx_conf),mp); 1367 } else { 1368 struct rte_mempool *mp = 1369 mbuf_pool_find(port->socket_id); 1370 if (mp == NULL) { 1371 printf("Failed to setup RX queue:" 1372 "No mempool allocation" 1373 " on the socket %d\n", 1374 port->socket_id); 1375 return -1; 1376 } 1377 diag = rte_eth_rx_queue_setup(pi, qi, 1378 nb_rxd,port->socket_id, 1379 &(port->rx_conf), mp); 1380 } 1381 if (diag == 0) 1382 continue; 1383 1384 /* Fail to setup rx queue, return */ 1385 if (rte_atomic16_cmpset(&(port->port_status), 1386 RTE_PORT_HANDLING, 1387 RTE_PORT_STOPPED) == 0) 1388 printf("Port %d can not be set back " 1389 "to stopped\n", pi); 1390 printf("Fail to configure port %d rx queues\n", pi); 1391 /* try to reconfigure queues next time */ 1392 port->need_reconfig_queues = 1; 1393 return -1; 1394 } 1395 } 1396 /* start port */ 1397 if (rte_eth_dev_start(pi) < 0) { 1398 printf("Fail to start port %d\n", pi); 1399 1400 /* Fail to setup rx queue, return */ 1401 if (rte_atomic16_cmpset(&(port->port_status), 1402 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0) 1403 printf("Port %d can not be set back to " 1404 "stopped\n", pi); 1405 continue; 1406 } 1407 1408 if (rte_atomic16_cmpset(&(port->port_status), 1409 RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0) 1410 printf("Port %d can not be set into started\n", pi); 1411 1412 rte_eth_macaddr_get(pi, &mac_addr); 1413 printf("Port %d: %02X:%02X:%02X:%02X:%02X:%02X\n", pi, 1414 mac_addr.addr_bytes[0], mac_addr.addr_bytes[1], 1415 mac_addr.addr_bytes[2], mac_addr.addr_bytes[3], 1416 mac_addr.addr_bytes[4], mac_addr.addr_bytes[5]); 1417 1418 /* at least one port started, need checking link status */ 1419 need_check_link_status = 1; 1420 } 1421 1422 if (need_check_link_status == 1 && !no_link_check) 1423 check_all_ports_link_status(RTE_PORT_ALL); 1424 else if (need_check_link_status == 0) 1425 printf("Please stop the ports first\n"); 1426 1427 printf("Done\n"); 1428 return 0; 1429 } 1430 1431 void 1432 stop_port(portid_t pid) 1433 { 1434 portid_t pi; 1435 struct rte_port *port; 1436 int need_check_link_status = 0; 1437 1438 if (dcb_test) { 1439 dcb_test = 0; 1440 dcb_config = 0; 1441 } 1442 1443 if (port_id_is_invalid(pid, ENABLED_WARN)) 1444 return; 1445 1446 printf("Stopping ports...\n"); 1447 1448 FOREACH_PORT(pi, ports) { 1449 if (pid != pi && pid != (portid_t)RTE_PORT_ALL) 1450 continue; 1451 1452 if (port_is_forwarding(pi) != 0 && test_done == 0) { 1453 printf("Please remove port %d from forwarding configuration.\n", pi); 1454 continue; 1455 } 1456 1457 if (port_is_bonding_slave(pi)) { 1458 printf("Please remove port %d from bonded device.\n", pi); 1459 continue; 1460 } 1461 1462 port = &ports[pi]; 1463 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED, 1464 RTE_PORT_HANDLING) == 0) 1465 continue; 1466 1467 rte_eth_dev_stop(pi); 1468 1469 if (rte_atomic16_cmpset(&(port->port_status), 1470 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0) 1471 printf("Port %d can not be set into stopped\n", pi); 1472 need_check_link_status = 1; 1473 } 1474 if (need_check_link_status && !no_link_check) 1475 check_all_ports_link_status(RTE_PORT_ALL); 1476 1477 printf("Done\n"); 1478 } 1479 1480 void 1481 close_port(portid_t pid) 1482 { 1483 portid_t pi; 1484 struct rte_port *port; 1485 1486 if (port_id_is_invalid(pid, ENABLED_WARN)) 1487 return; 1488 1489 printf("Closing ports...\n"); 1490 1491 FOREACH_PORT(pi, ports) { 1492 if (pid != pi && pid != (portid_t)RTE_PORT_ALL) 1493 continue; 1494 1495 if (port_is_forwarding(pi) != 0 && test_done == 0) { 1496 printf("Please remove port %d from forwarding configuration.\n", pi); 1497 continue; 1498 } 1499 1500 if (port_is_bonding_slave(pi)) { 1501 printf("Please remove port %d from bonded device.\n", pi); 1502 continue; 1503 } 1504 1505 port = &ports[pi]; 1506 if (rte_atomic16_cmpset(&(port->port_status), 1507 RTE_PORT_CLOSED, RTE_PORT_CLOSED) == 1) { 1508 printf("Port %d is already closed\n", pi); 1509 continue; 1510 } 1511 1512 if (rte_atomic16_cmpset(&(port->port_status), 1513 RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) { 1514 printf("Port %d is now not stopped\n", pi); 1515 continue; 1516 } 1517 1518 rte_eth_dev_close(pi); 1519 1520 if (rte_atomic16_cmpset(&(port->port_status), 1521 RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0) 1522 printf("Port %d cannot be set to closed\n", pi); 1523 } 1524 1525 printf("Done\n"); 1526 } 1527 1528 void 1529 attach_port(char *identifier) 1530 { 1531 portid_t pi = 0; 1532 unsigned int socket_id; 1533 1534 printf("Attaching a new port...\n"); 1535 1536 if (identifier == NULL) { 1537 printf("Invalid parameters are specified\n"); 1538 return; 1539 } 1540 1541 if (rte_eth_dev_attach(identifier, &pi)) 1542 return; 1543 1544 ports[pi].enabled = 1; 1545 socket_id = (unsigned)rte_eth_dev_socket_id(pi); 1546 /* if socket_id is invalid, set to 0 */ 1547 if (check_socket_id(socket_id) < 0) 1548 socket_id = 0; 1549 reconfig(pi, socket_id); 1550 rte_eth_promiscuous_enable(pi); 1551 1552 nb_ports = rte_eth_dev_count(); 1553 1554 ports[pi].port_status = RTE_PORT_STOPPED; 1555 1556 printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports); 1557 printf("Done\n"); 1558 } 1559 1560 void 1561 detach_port(uint8_t port_id) 1562 { 1563 char name[RTE_ETH_NAME_MAX_LEN]; 1564 1565 printf("Detaching a port...\n"); 1566 1567 if (!port_is_closed(port_id)) { 1568 printf("Please close port first\n"); 1569 return; 1570 } 1571 1572 if (rte_eth_dev_detach(port_id, name)) 1573 return; 1574 1575 ports[port_id].enabled = 0; 1576 nb_ports = rte_eth_dev_count(); 1577 1578 printf("Port '%s' is detached. Now total ports is %d\n", 1579 name, nb_ports); 1580 printf("Done\n"); 1581 return; 1582 } 1583 1584 void 1585 pmd_test_exit(void) 1586 { 1587 portid_t pt_id; 1588 1589 if (test_done == 0) 1590 stop_packet_forwarding(); 1591 1592 if (ports != NULL) { 1593 no_link_check = 1; 1594 FOREACH_PORT(pt_id, ports) { 1595 printf("\nShutting down port %d...\n", pt_id); 1596 fflush(stdout); 1597 stop_port(pt_id); 1598 close_port(pt_id); 1599 } 1600 } 1601 printf("\nBye...\n"); 1602 } 1603 1604 typedef void (*cmd_func_t)(void); 1605 struct pmd_test_command { 1606 const char *cmd_name; 1607 cmd_func_t cmd_func; 1608 }; 1609 1610 #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0])) 1611 1612 /* Check the link status of all ports in up to 9s, and print them finally */ 1613 static void 1614 check_all_ports_link_status(uint32_t port_mask) 1615 { 1616 #define CHECK_INTERVAL 100 /* 100ms */ 1617 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */ 1618 uint8_t portid, count, all_ports_up, print_flag = 0; 1619 struct rte_eth_link link; 1620 1621 printf("Checking link statuses...\n"); 1622 fflush(stdout); 1623 for (count = 0; count <= MAX_CHECK_TIME; count++) { 1624 all_ports_up = 1; 1625 FOREACH_PORT(portid, ports) { 1626 if ((port_mask & (1 << portid)) == 0) 1627 continue; 1628 memset(&link, 0, sizeof(link)); 1629 rte_eth_link_get_nowait(portid, &link); 1630 /* print link status if flag set */ 1631 if (print_flag == 1) { 1632 if (link.link_status) 1633 printf("Port %d Link Up - speed %u " 1634 "Mbps - %s\n", (uint8_t)portid, 1635 (unsigned)link.link_speed, 1636 (link.link_duplex == ETH_LINK_FULL_DUPLEX) ? 1637 ("full-duplex") : ("half-duplex\n")); 1638 else 1639 printf("Port %d Link Down\n", 1640 (uint8_t)portid); 1641 continue; 1642 } 1643 /* clear all_ports_up flag if any link down */ 1644 if (link.link_status == ETH_LINK_DOWN) { 1645 all_ports_up = 0; 1646 break; 1647 } 1648 } 1649 /* after finally printing all link status, get out */ 1650 if (print_flag == 1) 1651 break; 1652 1653 if (all_ports_up == 0) { 1654 fflush(stdout); 1655 rte_delay_ms(CHECK_INTERVAL); 1656 } 1657 1658 /* set the print_flag if all ports up or timeout */ 1659 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) { 1660 print_flag = 1; 1661 } 1662 } 1663 } 1664 1665 static int 1666 set_tx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port) 1667 { 1668 uint16_t i; 1669 int diag; 1670 uint8_t mapping_found = 0; 1671 1672 for (i = 0; i < nb_tx_queue_stats_mappings; i++) { 1673 if ((tx_queue_stats_mappings[i].port_id == port_id) && 1674 (tx_queue_stats_mappings[i].queue_id < nb_txq )) { 1675 diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id, 1676 tx_queue_stats_mappings[i].queue_id, 1677 tx_queue_stats_mappings[i].stats_counter_id); 1678 if (diag != 0) 1679 return diag; 1680 mapping_found = 1; 1681 } 1682 } 1683 if (mapping_found) 1684 port->tx_queue_stats_mapping_enabled = 1; 1685 return 0; 1686 } 1687 1688 static int 1689 set_rx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port) 1690 { 1691 uint16_t i; 1692 int diag; 1693 uint8_t mapping_found = 0; 1694 1695 for (i = 0; i < nb_rx_queue_stats_mappings; i++) { 1696 if ((rx_queue_stats_mappings[i].port_id == port_id) && 1697 (rx_queue_stats_mappings[i].queue_id < nb_rxq )) { 1698 diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id, 1699 rx_queue_stats_mappings[i].queue_id, 1700 rx_queue_stats_mappings[i].stats_counter_id); 1701 if (diag != 0) 1702 return diag; 1703 mapping_found = 1; 1704 } 1705 } 1706 if (mapping_found) 1707 port->rx_queue_stats_mapping_enabled = 1; 1708 return 0; 1709 } 1710 1711 static void 1712 map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port) 1713 { 1714 int diag = 0; 1715 1716 diag = set_tx_queue_stats_mapping_registers(pi, port); 1717 if (diag != 0) { 1718 if (diag == -ENOTSUP) { 1719 port->tx_queue_stats_mapping_enabled = 0; 1720 printf("TX queue stats mapping not supported port id=%d\n", pi); 1721 } 1722 else 1723 rte_exit(EXIT_FAILURE, 1724 "set_tx_queue_stats_mapping_registers " 1725 "failed for port id=%d diag=%d\n", 1726 pi, diag); 1727 } 1728 1729 diag = set_rx_queue_stats_mapping_registers(pi, port); 1730 if (diag != 0) { 1731 if (diag == -ENOTSUP) { 1732 port->rx_queue_stats_mapping_enabled = 0; 1733 printf("RX queue stats mapping not supported port id=%d\n", pi); 1734 } 1735 else 1736 rte_exit(EXIT_FAILURE, 1737 "set_rx_queue_stats_mapping_registers " 1738 "failed for port id=%d diag=%d\n", 1739 pi, diag); 1740 } 1741 } 1742 1743 static void 1744 rxtx_port_config(struct rte_port *port) 1745 { 1746 port->rx_conf = port->dev_info.default_rxconf; 1747 port->tx_conf = port->dev_info.default_txconf; 1748 1749 /* Check if any RX/TX parameters have been passed */ 1750 if (rx_pthresh != RTE_PMD_PARAM_UNSET) 1751 port->rx_conf.rx_thresh.pthresh = rx_pthresh; 1752 1753 if (rx_hthresh != RTE_PMD_PARAM_UNSET) 1754 port->rx_conf.rx_thresh.hthresh = rx_hthresh; 1755 1756 if (rx_wthresh != RTE_PMD_PARAM_UNSET) 1757 port->rx_conf.rx_thresh.wthresh = rx_wthresh; 1758 1759 if (rx_free_thresh != RTE_PMD_PARAM_UNSET) 1760 port->rx_conf.rx_free_thresh = rx_free_thresh; 1761 1762 if (rx_drop_en != RTE_PMD_PARAM_UNSET) 1763 port->rx_conf.rx_drop_en = rx_drop_en; 1764 1765 if (tx_pthresh != RTE_PMD_PARAM_UNSET) 1766 port->tx_conf.tx_thresh.pthresh = tx_pthresh; 1767 1768 if (tx_hthresh != RTE_PMD_PARAM_UNSET) 1769 port->tx_conf.tx_thresh.hthresh = tx_hthresh; 1770 1771 if (tx_wthresh != RTE_PMD_PARAM_UNSET) 1772 port->tx_conf.tx_thresh.wthresh = tx_wthresh; 1773 1774 if (tx_rs_thresh != RTE_PMD_PARAM_UNSET) 1775 port->tx_conf.tx_rs_thresh = tx_rs_thresh; 1776 1777 if (tx_free_thresh != RTE_PMD_PARAM_UNSET) 1778 port->tx_conf.tx_free_thresh = tx_free_thresh; 1779 1780 if (txq_flags != RTE_PMD_PARAM_UNSET) 1781 port->tx_conf.txq_flags = txq_flags; 1782 } 1783 1784 void 1785 init_port_config(void) 1786 { 1787 portid_t pid; 1788 struct rte_port *port; 1789 1790 FOREACH_PORT(pid, ports) { 1791 port = &ports[pid]; 1792 port->dev_conf.rxmode = rx_mode; 1793 port->dev_conf.fdir_conf = fdir_conf; 1794 if (nb_rxq > 1) { 1795 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL; 1796 port->dev_conf.rx_adv_conf.rss_conf.rss_hf = rss_hf; 1797 } else { 1798 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL; 1799 port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0; 1800 } 1801 1802 if (port->dcb_flag == 0 && port->dev_info.max_vfs == 0) { 1803 if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0) 1804 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_RSS; 1805 else 1806 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE; 1807 } 1808 1809 if (port->dev_info.max_vfs != 0) { 1810 if (port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0) 1811 port->dev_conf.rxmode.mq_mode = 1812 ETH_MQ_RX_VMDQ_RSS; 1813 else 1814 port->dev_conf.rxmode.mq_mode = 1815 ETH_MQ_RX_NONE; 1816 1817 port->dev_conf.txmode.mq_mode = ETH_MQ_TX_NONE; 1818 } 1819 1820 rxtx_port_config(port); 1821 1822 rte_eth_macaddr_get(pid, &port->eth_addr); 1823 1824 map_port_queue_stats_mapping_registers(pid, port); 1825 #ifdef RTE_NIC_BYPASS 1826 rte_eth_dev_bypass_init(pid); 1827 #endif 1828 } 1829 } 1830 1831 void set_port_slave_flag(portid_t slave_pid) 1832 { 1833 struct rte_port *port; 1834 1835 port = &ports[slave_pid]; 1836 port->slave_flag = 1; 1837 } 1838 1839 void clear_port_slave_flag(portid_t slave_pid) 1840 { 1841 struct rte_port *port; 1842 1843 port = &ports[slave_pid]; 1844 port->slave_flag = 0; 1845 } 1846 1847 uint8_t port_is_bonding_slave(portid_t slave_pid) 1848 { 1849 struct rte_port *port; 1850 1851 port = &ports[slave_pid]; 1852 return port->slave_flag; 1853 } 1854 1855 const uint16_t vlan_tags[] = { 1856 0, 1, 2, 3, 4, 5, 6, 7, 1857 8, 9, 10, 11, 12, 13, 14, 15, 1858 16, 17, 18, 19, 20, 21, 22, 23, 1859 24, 25, 26, 27, 28, 29, 30, 31 1860 }; 1861 1862 static int 1863 get_eth_dcb_conf(struct rte_eth_conf *eth_conf, 1864 enum dcb_mode_enable dcb_mode, 1865 enum rte_eth_nb_tcs num_tcs, 1866 uint8_t pfc_en) 1867 { 1868 uint8_t i; 1869 1870 /* 1871 * Builds up the correct configuration for dcb+vt based on the vlan tags array 1872 * given above, and the number of traffic classes available for use. 1873 */ 1874 if (dcb_mode == DCB_VT_ENABLED) { 1875 struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf = 1876 ð_conf->rx_adv_conf.vmdq_dcb_conf; 1877 struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf = 1878 ð_conf->tx_adv_conf.vmdq_dcb_tx_conf; 1879 1880 /* VMDQ+DCB RX and TX configrations */ 1881 vmdq_rx_conf->enable_default_pool = 0; 1882 vmdq_rx_conf->default_pool = 0; 1883 vmdq_rx_conf->nb_queue_pools = 1884 (num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS); 1885 vmdq_tx_conf->nb_queue_pools = 1886 (num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS); 1887 1888 vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools; 1889 for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) { 1890 vmdq_rx_conf->pool_map[i].vlan_id = vlan_tags[i]; 1891 vmdq_rx_conf->pool_map[i].pools = 1892 1 << (i % vmdq_rx_conf->nb_queue_pools); 1893 } 1894 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) { 1895 vmdq_rx_conf->dcb_tc[i] = i; 1896 vmdq_tx_conf->dcb_tc[i] = i; 1897 } 1898 1899 /* set DCB mode of RX and TX of multiple queues */ 1900 eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB; 1901 eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB; 1902 } else { 1903 struct rte_eth_dcb_rx_conf *rx_conf = 1904 ð_conf->rx_adv_conf.dcb_rx_conf; 1905 struct rte_eth_dcb_tx_conf *tx_conf = 1906 ð_conf->tx_adv_conf.dcb_tx_conf; 1907 1908 rx_conf->nb_tcs = num_tcs; 1909 tx_conf->nb_tcs = num_tcs; 1910 1911 for (i = 0; i < num_tcs; i++) { 1912 rx_conf->dcb_tc[i] = i; 1913 tx_conf->dcb_tc[i] = i; 1914 } 1915 eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB_RSS; 1916 eth_conf->rx_adv_conf.rss_conf.rss_hf = rss_hf; 1917 eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB; 1918 } 1919 1920 if (pfc_en) 1921 eth_conf->dcb_capability_en = 1922 ETH_DCB_PG_SUPPORT | ETH_DCB_PFC_SUPPORT; 1923 else 1924 eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT; 1925 1926 return 0; 1927 } 1928 1929 int 1930 init_port_dcb_config(portid_t pid, 1931 enum dcb_mode_enable dcb_mode, 1932 enum rte_eth_nb_tcs num_tcs, 1933 uint8_t pfc_en) 1934 { 1935 struct rte_eth_conf port_conf; 1936 struct rte_eth_dev_info dev_info; 1937 struct rte_port *rte_port; 1938 int retval; 1939 uint16_t i; 1940 1941 rte_eth_dev_info_get(pid, &dev_info); 1942 1943 /* If dev_info.vmdq_pool_base is greater than 0, 1944 * the queue id of vmdq pools is started after pf queues. 1945 */ 1946 if (dcb_mode == DCB_VT_ENABLED && dev_info.vmdq_pool_base > 0) { 1947 printf("VMDQ_DCB multi-queue mode is nonsensical" 1948 " for port %d.", pid); 1949 return -1; 1950 } 1951 1952 /* Assume the ports in testpmd have the same dcb capability 1953 * and has the same number of rxq and txq in dcb mode 1954 */ 1955 if (dcb_mode == DCB_VT_ENABLED) { 1956 nb_rxq = dev_info.max_rx_queues; 1957 nb_txq = dev_info.max_tx_queues; 1958 } else { 1959 /*if vt is disabled, use all pf queues */ 1960 if (dev_info.vmdq_pool_base == 0) { 1961 nb_rxq = dev_info.max_rx_queues; 1962 nb_txq = dev_info.max_tx_queues; 1963 } else { 1964 nb_rxq = (queueid_t)num_tcs; 1965 nb_txq = (queueid_t)num_tcs; 1966 1967 } 1968 } 1969 rx_free_thresh = 64; 1970 1971 memset(&port_conf, 0, sizeof(struct rte_eth_conf)); 1972 /* Enter DCB configuration status */ 1973 dcb_config = 1; 1974 1975 /*set configuration of DCB in vt mode and DCB in non-vt mode*/ 1976 retval = get_eth_dcb_conf(&port_conf, dcb_mode, num_tcs, pfc_en); 1977 if (retval < 0) 1978 return retval; 1979 1980 rte_port = &ports[pid]; 1981 memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf)); 1982 1983 rxtx_port_config(rte_port); 1984 /* VLAN filter */ 1985 rte_port->dev_conf.rxmode.hw_vlan_filter = 1; 1986 for (i = 0; i < RTE_DIM(vlan_tags); i++) 1987 rx_vft_set(pid, vlan_tags[i], 1); 1988 1989 rte_eth_macaddr_get(pid, &rte_port->eth_addr); 1990 map_port_queue_stats_mapping_registers(pid, rte_port); 1991 1992 rte_port->dcb_flag = 1; 1993 1994 return 0; 1995 } 1996 1997 static void 1998 init_port(void) 1999 { 2000 portid_t pid; 2001 2002 /* Configuration of Ethernet ports. */ 2003 ports = rte_zmalloc("testpmd: ports", 2004 sizeof(struct rte_port) * RTE_MAX_ETHPORTS, 2005 RTE_CACHE_LINE_SIZE); 2006 if (ports == NULL) { 2007 rte_exit(EXIT_FAILURE, 2008 "rte_zmalloc(%d struct rte_port) failed\n", 2009 RTE_MAX_ETHPORTS); 2010 } 2011 2012 /* enabled allocated ports */ 2013 for (pid = 0; pid < nb_ports; pid++) 2014 ports[pid].enabled = 1; 2015 } 2016 2017 static void 2018 force_quit(void) 2019 { 2020 pmd_test_exit(); 2021 prompt_exit(); 2022 } 2023 2024 static void 2025 signal_handler(int signum) 2026 { 2027 if (signum == SIGINT || signum == SIGTERM) { 2028 printf("\nSignal %d received, preparing to exit...\n", 2029 signum); 2030 force_quit(); 2031 /* exit with the expected status */ 2032 signal(signum, SIG_DFL); 2033 kill(getpid(), signum); 2034 } 2035 } 2036 2037 int 2038 main(int argc, char** argv) 2039 { 2040 int diag; 2041 uint8_t port_id; 2042 2043 signal(SIGINT, signal_handler); 2044 signal(SIGTERM, signal_handler); 2045 2046 diag = rte_eal_init(argc, argv); 2047 if (diag < 0) 2048 rte_panic("Cannot init EAL\n"); 2049 2050 nb_ports = (portid_t) rte_eth_dev_count(); 2051 if (nb_ports == 0) 2052 RTE_LOG(WARNING, EAL, "No probed ethernet devices\n"); 2053 2054 /* allocate port structures, and init them */ 2055 init_port(); 2056 2057 set_def_fwd_config(); 2058 if (nb_lcores == 0) 2059 rte_panic("Empty set of forwarding logical cores - check the " 2060 "core mask supplied in the command parameters\n"); 2061 2062 argc -= diag; 2063 argv += diag; 2064 if (argc > 1) 2065 launch_args_parse(argc, argv); 2066 2067 if (!nb_rxq && !nb_txq) 2068 printf("Warning: Either rx or tx queues should be non-zero\n"); 2069 2070 if (nb_rxq > 1 && nb_rxq > nb_txq) 2071 printf("Warning: nb_rxq=%d enables RSS configuration, " 2072 "but nb_txq=%d will prevent to fully test it.\n", 2073 nb_rxq, nb_txq); 2074 2075 init_config(); 2076 if (start_port(RTE_PORT_ALL) != 0) 2077 rte_exit(EXIT_FAILURE, "Start ports failed\n"); 2078 2079 /* set all ports to promiscuous mode by default */ 2080 FOREACH_PORT(port_id, ports) 2081 rte_eth_promiscuous_enable(port_id); 2082 2083 #ifdef RTE_LIBRTE_CMDLINE 2084 if (interactive == 1) { 2085 if (auto_start) { 2086 printf("Start automatic packet forwarding\n"); 2087 start_packet_forwarding(0); 2088 } 2089 prompt(); 2090 } else 2091 #endif 2092 { 2093 char c; 2094 int rc; 2095 2096 printf("No commandline core given, start packet forwarding\n"); 2097 start_packet_forwarding(0); 2098 printf("Press enter to exit\n"); 2099 rc = read(0, &c, 1); 2100 pmd_test_exit(); 2101 if (rc < 0) 2102 return 1; 2103 } 2104 2105 return 0; 2106 } 2107