1 /*- 2 * BSD LICENSE 3 * 4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of Intel Corporation nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include <stdarg.h> 35 #include <stdio.h> 36 #include <stdlib.h> 37 #include <signal.h> 38 #include <string.h> 39 #include <time.h> 40 #include <fcntl.h> 41 #include <sys/types.h> 42 #include <errno.h> 43 44 #include <sys/queue.h> 45 #include <sys/stat.h> 46 47 #include <stdint.h> 48 #include <unistd.h> 49 #include <inttypes.h> 50 51 #include <rte_common.h> 52 #include <rte_byteorder.h> 53 #include <rte_log.h> 54 #include <rte_debug.h> 55 #include <rte_cycles.h> 56 #include <rte_memory.h> 57 #include <rte_memcpy.h> 58 #include <rte_memzone.h> 59 #include <rte_launch.h> 60 #include <rte_eal.h> 61 #include <rte_per_lcore.h> 62 #include <rte_lcore.h> 63 #include <rte_atomic.h> 64 #include <rte_branch_prediction.h> 65 #include <rte_ring.h> 66 #include <rte_mempool.h> 67 #include <rte_malloc.h> 68 #include <rte_mbuf.h> 69 #include <rte_interrupts.h> 70 #include <rte_pci.h> 71 #include <rte_ether.h> 72 #include <rte_ethdev.h> 73 #include <rte_dev.h> 74 #include <rte_string_fns.h> 75 #ifdef RTE_LIBRTE_PMD_XENVIRT 76 #include <rte_eth_xenvirt.h> 77 #endif 78 79 #include "testpmd.h" 80 #include "mempool_osdep.h" 81 82 uint16_t verbose_level = 0; /**< Silent by default. */ 83 84 /* use master core for command line ? */ 85 uint8_t interactive = 0; 86 uint8_t auto_start = 0; 87 88 /* 89 * NUMA support configuration. 90 * When set, the NUMA support attempts to dispatch the allocation of the 91 * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the 92 * probed ports among the CPU sockets 0 and 1. 93 * Otherwise, all memory is allocated from CPU socket 0. 94 */ 95 uint8_t numa_support = 0; /**< No numa support by default */ 96 97 /* 98 * In UMA mode,all memory is allocated from socket 0 if --socket-num is 99 * not configured. 100 */ 101 uint8_t socket_num = UMA_NO_CONFIG; 102 103 /* 104 * Use ANONYMOUS mapped memory (might be not physically continuous) for mbufs. 105 */ 106 uint8_t mp_anon = 0; 107 108 /* 109 * Record the Ethernet address of peer target ports to which packets are 110 * forwarded. 111 * Must be instanciated with the ethernet addresses of peer traffic generator 112 * ports. 113 */ 114 struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS]; 115 portid_t nb_peer_eth_addrs = 0; 116 117 /* 118 * Probed Target Environment. 119 */ 120 struct rte_port *ports; /**< For all probed ethernet ports. */ 121 portid_t nb_ports; /**< Number of probed ethernet ports. */ 122 struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */ 123 lcoreid_t nb_lcores; /**< Number of probed logical cores. */ 124 125 /* 126 * Test Forwarding Configuration. 127 * nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores 128 * nb_fwd_ports <= nb_cfg_ports <= nb_ports 129 */ 130 lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */ 131 lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */ 132 portid_t nb_cfg_ports; /**< Number of configured ports. */ 133 portid_t nb_fwd_ports; /**< Number of forwarding ports. */ 134 135 unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */ 136 portid_t fwd_ports_ids[RTE_MAX_ETHPORTS]; /**< Port ids configuration. */ 137 138 struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */ 139 streamid_t nb_fwd_streams; /**< Is equal to (nb_ports * nb_rxq). */ 140 141 /* 142 * Forwarding engines. 143 */ 144 struct fwd_engine * fwd_engines[] = { 145 &io_fwd_engine, 146 &mac_fwd_engine, 147 &mac_retry_fwd_engine, 148 &mac_swap_engine, 149 &flow_gen_engine, 150 &rx_only_engine, 151 &tx_only_engine, 152 &csum_fwd_engine, 153 &icmp_echo_engine, 154 #ifdef RTE_LIBRTE_IEEE1588 155 &ieee1588_fwd_engine, 156 #endif 157 NULL, 158 }; 159 160 struct fwd_config cur_fwd_config; 161 struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */ 162 163 uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */ 164 uint32_t param_total_num_mbufs = 0; /**< number of mbufs in all pools - if 165 * specified on command-line. */ 166 167 /* 168 * Configuration of packet segments used by the "txonly" processing engine. 169 */ 170 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */ 171 uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = { 172 TXONLY_DEF_PACKET_LEN, 173 }; 174 uint8_t tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */ 175 176 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */ 177 uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */ 178 179 /* current configuration is in DCB or not,0 means it is not in DCB mode */ 180 uint8_t dcb_config = 0; 181 182 /* Whether the dcb is in testing status */ 183 uint8_t dcb_test = 0; 184 185 /* DCB on and VT on mapping is default */ 186 enum dcb_queue_mapping_mode dcb_q_mapping = DCB_VT_Q_MAPPING; 187 188 /* 189 * Configurable number of RX/TX queues. 190 */ 191 queueid_t nb_rxq = 1; /**< Number of RX queues per port. */ 192 queueid_t nb_txq = 1; /**< Number of TX queues per port. */ 193 194 /* 195 * Configurable number of RX/TX ring descriptors. 196 */ 197 #define RTE_TEST_RX_DESC_DEFAULT 128 198 #define RTE_TEST_TX_DESC_DEFAULT 512 199 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */ 200 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */ 201 202 #define RTE_PMD_PARAM_UNSET -1 203 /* 204 * Configurable values of RX and TX ring threshold registers. 205 */ 206 207 int8_t rx_pthresh = RTE_PMD_PARAM_UNSET; 208 int8_t rx_hthresh = RTE_PMD_PARAM_UNSET; 209 int8_t rx_wthresh = RTE_PMD_PARAM_UNSET; 210 211 int8_t tx_pthresh = RTE_PMD_PARAM_UNSET; 212 int8_t tx_hthresh = RTE_PMD_PARAM_UNSET; 213 int8_t tx_wthresh = RTE_PMD_PARAM_UNSET; 214 215 /* 216 * Configurable value of RX free threshold. 217 */ 218 int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET; 219 220 /* 221 * Configurable value of RX drop enable. 222 */ 223 int8_t rx_drop_en = RTE_PMD_PARAM_UNSET; 224 225 /* 226 * Configurable value of TX free threshold. 227 */ 228 int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET; 229 230 /* 231 * Configurable value of TX RS bit threshold. 232 */ 233 int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET; 234 235 /* 236 * Configurable value of TX queue flags. 237 */ 238 int32_t txq_flags = RTE_PMD_PARAM_UNSET; 239 240 /* 241 * Receive Side Scaling (RSS) configuration. 242 */ 243 uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */ 244 245 /* 246 * Port topology configuration 247 */ 248 uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */ 249 250 /* 251 * Avoids to flush all the RX streams before starts forwarding. 252 */ 253 uint8_t no_flush_rx = 0; /* flush by default */ 254 255 /* 256 * Avoids to check link status when starting/stopping a port. 257 */ 258 uint8_t no_link_check = 0; /* check by default */ 259 260 /* 261 * NIC bypass mode configuration options. 262 */ 263 #ifdef RTE_NIC_BYPASS 264 265 /* The NIC bypass watchdog timeout. */ 266 uint32_t bypass_timeout = RTE_BYPASS_TMT_OFF; 267 268 #endif 269 270 /* 271 * Ethernet device configuration. 272 */ 273 struct rte_eth_rxmode rx_mode = { 274 .max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */ 275 .split_hdr_size = 0, 276 .header_split = 0, /**< Header Split disabled. */ 277 .hw_ip_checksum = 0, /**< IP checksum offload disabled. */ 278 .hw_vlan_filter = 1, /**< VLAN filtering enabled. */ 279 .hw_vlan_strip = 1, /**< VLAN strip enabled. */ 280 .hw_vlan_extend = 0, /**< Extended VLAN disabled. */ 281 .jumbo_frame = 0, /**< Jumbo Frame Support disabled. */ 282 .hw_strip_crc = 0, /**< CRC stripping by hardware disabled. */ 283 }; 284 285 struct rte_fdir_conf fdir_conf = { 286 .mode = RTE_FDIR_MODE_NONE, 287 .pballoc = RTE_FDIR_PBALLOC_64K, 288 .status = RTE_FDIR_REPORT_STATUS, 289 .mask = { 290 .vlan_tci_mask = 0x0, 291 .ipv4_mask = { 292 .src_ip = 0xFFFFFFFF, 293 .dst_ip = 0xFFFFFFFF, 294 }, 295 .ipv6_mask = { 296 .src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF}, 297 .dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF}, 298 }, 299 .src_port_mask = 0xFFFF, 300 .dst_port_mask = 0xFFFF, 301 }, 302 .drop_queue = 127, 303 }; 304 305 volatile int test_done = 1; /* stop packet forwarding when set to 1. */ 306 307 struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS]; 308 struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS]; 309 310 struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array; 311 struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array; 312 313 uint16_t nb_tx_queue_stats_mappings = 0; 314 uint16_t nb_rx_queue_stats_mappings = 0; 315 316 /* Forward function declarations */ 317 static void map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port); 318 static void check_all_ports_link_status(uint32_t port_mask); 319 320 /* 321 * Check if all the ports are started. 322 * If yes, return positive value. If not, return zero. 323 */ 324 static int all_ports_started(void); 325 326 /* 327 * Find next enabled port 328 */ 329 portid_t 330 find_next_port(portid_t p, struct rte_port *ports, int size) 331 { 332 if (ports == NULL) 333 rte_exit(-EINVAL, "failed to find a next port id\n"); 334 335 while ((ports[p].enabled == 0) && (p < size)) 336 p++; 337 return p; 338 } 339 340 /* 341 * Setup default configuration. 342 */ 343 static void 344 set_default_fwd_lcores_config(void) 345 { 346 unsigned int i; 347 unsigned int nb_lc; 348 349 nb_lc = 0; 350 for (i = 0; i < RTE_MAX_LCORE; i++) { 351 if (! rte_lcore_is_enabled(i)) 352 continue; 353 if (i == rte_get_master_lcore()) 354 continue; 355 fwd_lcores_cpuids[nb_lc++] = i; 356 } 357 nb_lcores = (lcoreid_t) nb_lc; 358 nb_cfg_lcores = nb_lcores; 359 nb_fwd_lcores = 1; 360 } 361 362 static void 363 set_def_peer_eth_addrs(void) 364 { 365 portid_t i; 366 367 for (i = 0; i < RTE_MAX_ETHPORTS; i++) { 368 peer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR; 369 peer_eth_addrs[i].addr_bytes[5] = i; 370 } 371 } 372 373 static void 374 set_default_fwd_ports_config(void) 375 { 376 portid_t pt_id; 377 378 for (pt_id = 0; pt_id < nb_ports; pt_id++) 379 fwd_ports_ids[pt_id] = pt_id; 380 381 nb_cfg_ports = nb_ports; 382 nb_fwd_ports = nb_ports; 383 } 384 385 void 386 set_def_fwd_config(void) 387 { 388 set_default_fwd_lcores_config(); 389 set_def_peer_eth_addrs(); 390 set_default_fwd_ports_config(); 391 } 392 393 /* 394 * Configuration initialisation done once at init time. 395 */ 396 struct mbuf_ctor_arg { 397 uint16_t seg_buf_offset; /**< offset of data in data segment of mbuf. */ 398 uint16_t seg_buf_size; /**< size of data segment in mbuf. */ 399 }; 400 401 struct mbuf_pool_ctor_arg { 402 uint16_t seg_buf_size; /**< size of data segment in mbuf. */ 403 }; 404 405 static void 406 testpmd_mbuf_ctor(struct rte_mempool *mp, 407 void *opaque_arg, 408 void *raw_mbuf, 409 __attribute__((unused)) unsigned i) 410 { 411 struct mbuf_ctor_arg *mb_ctor_arg; 412 struct rte_mbuf *mb; 413 414 mb_ctor_arg = (struct mbuf_ctor_arg *) opaque_arg; 415 mb = (struct rte_mbuf *) raw_mbuf; 416 417 mb->pool = mp; 418 mb->buf_addr = (void *) ((char *)mb + mb_ctor_arg->seg_buf_offset); 419 mb->buf_physaddr = (uint64_t) (rte_mempool_virt2phy(mp, mb) + 420 mb_ctor_arg->seg_buf_offset); 421 mb->buf_len = mb_ctor_arg->seg_buf_size; 422 mb->ol_flags = 0; 423 mb->data_off = RTE_PKTMBUF_HEADROOM; 424 mb->nb_segs = 1; 425 mb->tx_offload = 0; 426 mb->vlan_tci = 0; 427 mb->hash.rss = 0; 428 } 429 430 static void 431 testpmd_mbuf_pool_ctor(struct rte_mempool *mp, 432 void *opaque_arg) 433 { 434 struct mbuf_pool_ctor_arg *mbp_ctor_arg; 435 struct rte_pktmbuf_pool_private *mbp_priv; 436 437 if (mp->private_data_size < sizeof(struct rte_pktmbuf_pool_private)) { 438 printf("%s(%s) private_data_size %d < %d\n", 439 __func__, mp->name, (int) mp->private_data_size, 440 (int) sizeof(struct rte_pktmbuf_pool_private)); 441 return; 442 } 443 mbp_ctor_arg = (struct mbuf_pool_ctor_arg *) opaque_arg; 444 mbp_priv = rte_mempool_get_priv(mp); 445 mbp_priv->mbuf_data_room_size = mbp_ctor_arg->seg_buf_size; 446 } 447 448 static void 449 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf, 450 unsigned int socket_id) 451 { 452 char pool_name[RTE_MEMPOOL_NAMESIZE]; 453 struct rte_mempool *rte_mp; 454 struct mbuf_pool_ctor_arg mbp_ctor_arg; 455 struct mbuf_ctor_arg mb_ctor_arg; 456 uint32_t mb_size; 457 458 mbp_ctor_arg.seg_buf_size = (uint16_t) (RTE_PKTMBUF_HEADROOM + 459 mbuf_seg_size); 460 mb_ctor_arg.seg_buf_offset = 461 (uint16_t) RTE_CACHE_LINE_ROUNDUP(sizeof(struct rte_mbuf)); 462 mb_ctor_arg.seg_buf_size = mbp_ctor_arg.seg_buf_size; 463 mb_size = mb_ctor_arg.seg_buf_offset + mb_ctor_arg.seg_buf_size; 464 mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name)); 465 466 #ifdef RTE_LIBRTE_PMD_XENVIRT 467 rte_mp = rte_mempool_gntalloc_create(pool_name, nb_mbuf, mb_size, 468 (unsigned) mb_mempool_cache, 469 sizeof(struct rte_pktmbuf_pool_private), 470 testpmd_mbuf_pool_ctor, &mbp_ctor_arg, 471 testpmd_mbuf_ctor, &mb_ctor_arg, 472 socket_id, 0); 473 474 475 476 #else 477 if (mp_anon != 0) 478 rte_mp = mempool_anon_create(pool_name, nb_mbuf, mb_size, 479 (unsigned) mb_mempool_cache, 480 sizeof(struct rte_pktmbuf_pool_private), 481 testpmd_mbuf_pool_ctor, &mbp_ctor_arg, 482 testpmd_mbuf_ctor, &mb_ctor_arg, 483 socket_id, 0); 484 else 485 rte_mp = rte_mempool_create(pool_name, nb_mbuf, mb_size, 486 (unsigned) mb_mempool_cache, 487 sizeof(struct rte_pktmbuf_pool_private), 488 testpmd_mbuf_pool_ctor, &mbp_ctor_arg, 489 testpmd_mbuf_ctor, &mb_ctor_arg, 490 socket_id, 0); 491 492 #endif 493 494 if (rte_mp == NULL) { 495 rte_exit(EXIT_FAILURE, "Creation of mbuf pool for socket %u " 496 "failed\n", socket_id); 497 } else if (verbose_level > 0) { 498 rte_mempool_dump(stdout, rte_mp); 499 } 500 } 501 502 /* 503 * Check given socket id is valid or not with NUMA mode, 504 * if valid, return 0, else return -1 505 */ 506 static int 507 check_socket_id(const unsigned int socket_id) 508 { 509 static int warning_once = 0; 510 511 if (socket_id >= MAX_SOCKET) { 512 if (!warning_once && numa_support) 513 printf("Warning: NUMA should be configured manually by" 514 " using --port-numa-config and" 515 " --ring-numa-config parameters along with" 516 " --numa.\n"); 517 warning_once = 1; 518 return -1; 519 } 520 return 0; 521 } 522 523 static void 524 init_config(void) 525 { 526 portid_t pid; 527 struct rte_port *port; 528 struct rte_mempool *mbp; 529 unsigned int nb_mbuf_per_pool; 530 lcoreid_t lc_id; 531 uint8_t port_per_socket[MAX_SOCKET]; 532 533 memset(port_per_socket,0,MAX_SOCKET); 534 /* Configuration of logical cores. */ 535 fwd_lcores = rte_zmalloc("testpmd: fwd_lcores", 536 sizeof(struct fwd_lcore *) * nb_lcores, 537 RTE_CACHE_LINE_SIZE); 538 if (fwd_lcores == NULL) { 539 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) " 540 "failed\n", nb_lcores); 541 } 542 for (lc_id = 0; lc_id < nb_lcores; lc_id++) { 543 fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore", 544 sizeof(struct fwd_lcore), 545 RTE_CACHE_LINE_SIZE); 546 if (fwd_lcores[lc_id] == NULL) { 547 rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) " 548 "failed\n"); 549 } 550 fwd_lcores[lc_id]->cpuid_idx = lc_id; 551 } 552 553 /* 554 * Create pools of mbuf. 555 * If NUMA support is disabled, create a single pool of mbuf in 556 * socket 0 memory by default. 557 * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1. 558 * 559 * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and 560 * nb_txd can be configured at run time. 561 */ 562 if (param_total_num_mbufs) 563 nb_mbuf_per_pool = param_total_num_mbufs; 564 else { 565 nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX + (nb_lcores * mb_mempool_cache) 566 + RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST; 567 568 if (!numa_support) 569 nb_mbuf_per_pool = 570 (nb_mbuf_per_pool * RTE_MAX_ETHPORTS); 571 } 572 573 if (!numa_support) { 574 if (socket_num == UMA_NO_CONFIG) 575 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0); 576 else 577 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 578 socket_num); 579 } 580 581 FOREACH_PORT(pid, ports) { 582 port = &ports[pid]; 583 rte_eth_dev_info_get(pid, &port->dev_info); 584 585 if (numa_support) { 586 if (port_numa[pid] != NUMA_NO_CONFIG) 587 port_per_socket[port_numa[pid]]++; 588 else { 589 uint32_t socket_id = rte_eth_dev_socket_id(pid); 590 591 /* if socket_id is invalid, set to 0 */ 592 if (check_socket_id(socket_id) < 0) 593 socket_id = 0; 594 port_per_socket[socket_id]++; 595 } 596 } 597 598 /* set flag to initialize port/queue */ 599 port->need_reconfig = 1; 600 port->need_reconfig_queues = 1; 601 } 602 603 if (numa_support) { 604 uint8_t i; 605 unsigned int nb_mbuf; 606 607 if (param_total_num_mbufs) 608 nb_mbuf_per_pool = nb_mbuf_per_pool/nb_ports; 609 610 for (i = 0; i < MAX_SOCKET; i++) { 611 nb_mbuf = (nb_mbuf_per_pool * RTE_MAX_ETHPORTS); 612 if (nb_mbuf) 613 mbuf_pool_create(mbuf_data_size, 614 nb_mbuf,i); 615 } 616 } 617 init_port_config(); 618 619 /* 620 * Records which Mbuf pool to use by each logical core, if needed. 621 */ 622 for (lc_id = 0; lc_id < nb_lcores; lc_id++) { 623 mbp = mbuf_pool_find( 624 rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id])); 625 626 if (mbp == NULL) 627 mbp = mbuf_pool_find(0); 628 fwd_lcores[lc_id]->mbp = mbp; 629 } 630 631 /* Configuration of packet forwarding streams. */ 632 if (init_fwd_streams() < 0) 633 rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n"); 634 } 635 636 637 void 638 reconfig(portid_t new_port_id, unsigned socket_id) 639 { 640 struct rte_port *port; 641 642 /* Reconfiguration of Ethernet ports. */ 643 port = &ports[new_port_id]; 644 rte_eth_dev_info_get(new_port_id, &port->dev_info); 645 646 /* set flag to initialize port/queue */ 647 port->need_reconfig = 1; 648 port->need_reconfig_queues = 1; 649 port->socket_id = socket_id; 650 651 init_port_config(); 652 } 653 654 655 int 656 init_fwd_streams(void) 657 { 658 portid_t pid; 659 struct rte_port *port; 660 streamid_t sm_id, nb_fwd_streams_new; 661 662 /* set socket id according to numa or not */ 663 FOREACH_PORT(pid, ports) { 664 port = &ports[pid]; 665 if (nb_rxq > port->dev_info.max_rx_queues) { 666 printf("Fail: nb_rxq(%d) is greater than " 667 "max_rx_queues(%d)\n", nb_rxq, 668 port->dev_info.max_rx_queues); 669 return -1; 670 } 671 if (nb_txq > port->dev_info.max_tx_queues) { 672 printf("Fail: nb_txq(%d) is greater than " 673 "max_tx_queues(%d)\n", nb_txq, 674 port->dev_info.max_tx_queues); 675 return -1; 676 } 677 if (numa_support) { 678 if (port_numa[pid] != NUMA_NO_CONFIG) 679 port->socket_id = port_numa[pid]; 680 else { 681 port->socket_id = rte_eth_dev_socket_id(pid); 682 683 /* if socket_id is invalid, set to 0 */ 684 if (check_socket_id(port->socket_id) < 0) 685 port->socket_id = 0; 686 } 687 } 688 else { 689 if (socket_num == UMA_NO_CONFIG) 690 port->socket_id = 0; 691 else 692 port->socket_id = socket_num; 693 } 694 } 695 696 nb_fwd_streams_new = (streamid_t)(nb_ports * nb_rxq); 697 if (nb_fwd_streams_new == nb_fwd_streams) 698 return 0; 699 /* clear the old */ 700 if (fwd_streams != NULL) { 701 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) { 702 if (fwd_streams[sm_id] == NULL) 703 continue; 704 rte_free(fwd_streams[sm_id]); 705 fwd_streams[sm_id] = NULL; 706 } 707 rte_free(fwd_streams); 708 fwd_streams = NULL; 709 } 710 711 /* init new */ 712 nb_fwd_streams = nb_fwd_streams_new; 713 fwd_streams = rte_zmalloc("testpmd: fwd_streams", 714 sizeof(struct fwd_stream *) * nb_fwd_streams, RTE_CACHE_LINE_SIZE); 715 if (fwd_streams == NULL) 716 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_stream *)) " 717 "failed\n", nb_fwd_streams); 718 719 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) { 720 fwd_streams[sm_id] = rte_zmalloc("testpmd: struct fwd_stream", 721 sizeof(struct fwd_stream), RTE_CACHE_LINE_SIZE); 722 if (fwd_streams[sm_id] == NULL) 723 rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_stream)" 724 " failed\n"); 725 } 726 727 return 0; 728 } 729 730 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS 731 static void 732 pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs) 733 { 734 unsigned int total_burst; 735 unsigned int nb_burst; 736 unsigned int burst_stats[3]; 737 uint16_t pktnb_stats[3]; 738 uint16_t nb_pkt; 739 int burst_percent[3]; 740 741 /* 742 * First compute the total number of packet bursts and the 743 * two highest numbers of bursts of the same number of packets. 744 */ 745 total_burst = 0; 746 burst_stats[0] = burst_stats[1] = burst_stats[2] = 0; 747 pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0; 748 for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) { 749 nb_burst = pbs->pkt_burst_spread[nb_pkt]; 750 if (nb_burst == 0) 751 continue; 752 total_burst += nb_burst; 753 if (nb_burst > burst_stats[0]) { 754 burst_stats[1] = burst_stats[0]; 755 pktnb_stats[1] = pktnb_stats[0]; 756 burst_stats[0] = nb_burst; 757 pktnb_stats[0] = nb_pkt; 758 } 759 } 760 if (total_burst == 0) 761 return; 762 burst_percent[0] = (burst_stats[0] * 100) / total_burst; 763 printf(" %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst, 764 burst_percent[0], (int) pktnb_stats[0]); 765 if (burst_stats[0] == total_burst) { 766 printf("]\n"); 767 return; 768 } 769 if (burst_stats[0] + burst_stats[1] == total_burst) { 770 printf(" + %d%% of %d pkts]\n", 771 100 - burst_percent[0], pktnb_stats[1]); 772 return; 773 } 774 burst_percent[1] = (burst_stats[1] * 100) / total_burst; 775 burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]); 776 if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) { 777 printf(" + %d%% of others]\n", 100 - burst_percent[0]); 778 return; 779 } 780 printf(" + %d%% of %d pkts + %d%% of others]\n", 781 burst_percent[1], (int) pktnb_stats[1], burst_percent[2]); 782 } 783 #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */ 784 785 static void 786 fwd_port_stats_display(portid_t port_id, struct rte_eth_stats *stats) 787 { 788 struct rte_port *port; 789 uint8_t i; 790 791 static const char *fwd_stats_border = "----------------------"; 792 793 port = &ports[port_id]; 794 printf("\n %s Forward statistics for port %-2d %s\n", 795 fwd_stats_border, port_id, fwd_stats_border); 796 797 if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) { 798 printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: " 799 "%-"PRIu64"\n", 800 stats->ipackets, stats->imissed, 801 (uint64_t) (stats->ipackets + stats->imissed)); 802 803 if (cur_fwd_eng == &csum_fwd_engine) 804 printf(" Bad-ipcsum: %-14"PRIu64" Bad-l4csum: %-14"PRIu64" \n", 805 port->rx_bad_ip_csum, port->rx_bad_l4_csum); 806 if (((stats->ierrors - stats->imissed) + stats->rx_nombuf) > 0) { 807 printf(" RX-badcrc: %-14"PRIu64" RX-badlen: %-14"PRIu64 808 "RX-error: %-"PRIu64"\n", 809 stats->ibadcrc, stats->ibadlen, stats->ierrors); 810 printf(" RX-nombufs: %-14"PRIu64"\n", stats->rx_nombuf); 811 } 812 813 printf(" TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: " 814 "%-"PRIu64"\n", 815 stats->opackets, port->tx_dropped, 816 (uint64_t) (stats->opackets + port->tx_dropped)); 817 } 818 else { 819 printf(" RX-packets: %14"PRIu64" RX-dropped:%14"PRIu64" RX-total:" 820 "%14"PRIu64"\n", 821 stats->ipackets, stats->imissed, 822 (uint64_t) (stats->ipackets + stats->imissed)); 823 824 if (cur_fwd_eng == &csum_fwd_engine) 825 printf(" Bad-ipcsum:%14"PRIu64" Bad-l4csum:%14"PRIu64"\n", 826 port->rx_bad_ip_csum, port->rx_bad_l4_csum); 827 if (((stats->ierrors - stats->imissed) + stats->rx_nombuf) > 0) { 828 printf(" RX-badcrc: %14"PRIu64" RX-badlen: %14"PRIu64 829 " RX-error:%"PRIu64"\n", 830 stats->ibadcrc, stats->ibadlen, stats->ierrors); 831 printf(" RX-nombufs: %14"PRIu64"\n", 832 stats->rx_nombuf); 833 } 834 835 printf(" TX-packets: %14"PRIu64" TX-dropped:%14"PRIu64" TX-total:" 836 "%14"PRIu64"\n", 837 stats->opackets, port->tx_dropped, 838 (uint64_t) (stats->opackets + port->tx_dropped)); 839 } 840 841 /* Display statistics of XON/XOFF pause frames, if any. */ 842 if ((stats->tx_pause_xon | stats->rx_pause_xon | 843 stats->tx_pause_xoff | stats->rx_pause_xoff) > 0) { 844 printf(" RX-XOFF: %-14"PRIu64" RX-XON: %-14"PRIu64"\n", 845 stats->rx_pause_xoff, stats->rx_pause_xon); 846 printf(" TX-XOFF: %-14"PRIu64" TX-XON: %-14"PRIu64"\n", 847 stats->tx_pause_xoff, stats->tx_pause_xon); 848 } 849 850 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS 851 if (port->rx_stream) 852 pkt_burst_stats_display("RX", 853 &port->rx_stream->rx_burst_stats); 854 if (port->tx_stream) 855 pkt_burst_stats_display("TX", 856 &port->tx_stream->tx_burst_stats); 857 #endif 858 /* stats fdir */ 859 if (fdir_conf.mode != RTE_FDIR_MODE_NONE) 860 printf(" Fdirmiss:%14"PRIu64" Fdirmatch:%14"PRIu64"\n", 861 stats->fdirmiss, 862 stats->fdirmatch); 863 864 if (port->rx_queue_stats_mapping_enabled) { 865 printf("\n"); 866 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) { 867 printf(" Stats reg %2d RX-packets:%14"PRIu64 868 " RX-errors:%14"PRIu64 869 " RX-bytes:%14"PRIu64"\n", 870 i, stats->q_ipackets[i], stats->q_errors[i], stats->q_ibytes[i]); 871 } 872 printf("\n"); 873 } 874 if (port->tx_queue_stats_mapping_enabled) { 875 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) { 876 printf(" Stats reg %2d TX-packets:%14"PRIu64 877 " TX-bytes:%14"PRIu64"\n", 878 i, stats->q_opackets[i], stats->q_obytes[i]); 879 } 880 } 881 882 printf(" %s--------------------------------%s\n", 883 fwd_stats_border, fwd_stats_border); 884 } 885 886 static void 887 fwd_stream_stats_display(streamid_t stream_id) 888 { 889 struct fwd_stream *fs; 890 static const char *fwd_top_stats_border = "-------"; 891 892 fs = fwd_streams[stream_id]; 893 if ((fs->rx_packets == 0) && (fs->tx_packets == 0) && 894 (fs->fwd_dropped == 0)) 895 return; 896 printf("\n %s Forward Stats for RX Port=%2d/Queue=%2d -> " 897 "TX Port=%2d/Queue=%2d %s\n", 898 fwd_top_stats_border, fs->rx_port, fs->rx_queue, 899 fs->tx_port, fs->tx_queue, fwd_top_stats_border); 900 printf(" RX-packets: %-14u TX-packets: %-14u TX-dropped: %-14u", 901 fs->rx_packets, fs->tx_packets, fs->fwd_dropped); 902 903 /* if checksum mode */ 904 if (cur_fwd_eng == &csum_fwd_engine) { 905 printf(" RX- bad IP checksum: %-14u Rx- bad L4 checksum: " 906 "%-14u\n", fs->rx_bad_ip_csum, fs->rx_bad_l4_csum); 907 } 908 909 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS 910 pkt_burst_stats_display("RX", &fs->rx_burst_stats); 911 pkt_burst_stats_display("TX", &fs->tx_burst_stats); 912 #endif 913 } 914 915 static void 916 flush_fwd_rx_queues(void) 917 { 918 struct rte_mbuf *pkts_burst[MAX_PKT_BURST]; 919 portid_t rxp; 920 portid_t port_id; 921 queueid_t rxq; 922 uint16_t nb_rx; 923 uint16_t i; 924 uint8_t j; 925 926 for (j = 0; j < 2; j++) { 927 for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) { 928 for (rxq = 0; rxq < nb_rxq; rxq++) { 929 port_id = fwd_ports_ids[rxp]; 930 do { 931 nb_rx = rte_eth_rx_burst(port_id, rxq, 932 pkts_burst, MAX_PKT_BURST); 933 for (i = 0; i < nb_rx; i++) 934 rte_pktmbuf_free(pkts_burst[i]); 935 } while (nb_rx > 0); 936 } 937 } 938 rte_delay_ms(10); /* wait 10 milli-seconds before retrying */ 939 } 940 } 941 942 static void 943 run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd) 944 { 945 struct fwd_stream **fsm; 946 streamid_t nb_fs; 947 streamid_t sm_id; 948 949 fsm = &fwd_streams[fc->stream_idx]; 950 nb_fs = fc->stream_nb; 951 do { 952 for (sm_id = 0; sm_id < nb_fs; sm_id++) 953 (*pkt_fwd)(fsm[sm_id]); 954 } while (! fc->stopped); 955 } 956 957 static int 958 start_pkt_forward_on_core(void *fwd_arg) 959 { 960 run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg, 961 cur_fwd_config.fwd_eng->packet_fwd); 962 return 0; 963 } 964 965 /* 966 * Run the TXONLY packet forwarding engine to send a single burst of packets. 967 * Used to start communication flows in network loopback test configurations. 968 */ 969 static int 970 run_one_txonly_burst_on_core(void *fwd_arg) 971 { 972 struct fwd_lcore *fwd_lc; 973 struct fwd_lcore tmp_lcore; 974 975 fwd_lc = (struct fwd_lcore *) fwd_arg; 976 tmp_lcore = *fwd_lc; 977 tmp_lcore.stopped = 1; 978 run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd); 979 return 0; 980 } 981 982 /* 983 * Launch packet forwarding: 984 * - Setup per-port forwarding context. 985 * - launch logical cores with their forwarding configuration. 986 */ 987 static void 988 launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore) 989 { 990 port_fwd_begin_t port_fwd_begin; 991 unsigned int i; 992 unsigned int lc_id; 993 int diag; 994 995 port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin; 996 if (port_fwd_begin != NULL) { 997 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) 998 (*port_fwd_begin)(fwd_ports_ids[i]); 999 } 1000 for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) { 1001 lc_id = fwd_lcores_cpuids[i]; 1002 if ((interactive == 0) || (lc_id != rte_lcore_id())) { 1003 fwd_lcores[i]->stopped = 0; 1004 diag = rte_eal_remote_launch(pkt_fwd_on_lcore, 1005 fwd_lcores[i], lc_id); 1006 if (diag != 0) 1007 printf("launch lcore %u failed - diag=%d\n", 1008 lc_id, diag); 1009 } 1010 } 1011 } 1012 1013 /* 1014 * Launch packet forwarding configuration. 1015 */ 1016 void 1017 start_packet_forwarding(int with_tx_first) 1018 { 1019 port_fwd_begin_t port_fwd_begin; 1020 port_fwd_end_t port_fwd_end; 1021 struct rte_port *port; 1022 unsigned int i; 1023 portid_t pt_id; 1024 streamid_t sm_id; 1025 1026 if (all_ports_started() == 0) { 1027 printf("Not all ports were started\n"); 1028 return; 1029 } 1030 if (test_done == 0) { 1031 printf("Packet forwarding already started\n"); 1032 return; 1033 } 1034 if(dcb_test) { 1035 for (i = 0; i < nb_fwd_ports; i++) { 1036 pt_id = fwd_ports_ids[i]; 1037 port = &ports[pt_id]; 1038 if (!port->dcb_flag) { 1039 printf("In DCB mode, all forwarding ports must " 1040 "be configured in this mode.\n"); 1041 return; 1042 } 1043 } 1044 if (nb_fwd_lcores == 1) { 1045 printf("In DCB mode,the nb forwarding cores " 1046 "should be larger than 1.\n"); 1047 return; 1048 } 1049 } 1050 test_done = 0; 1051 1052 if(!no_flush_rx) 1053 flush_fwd_rx_queues(); 1054 1055 fwd_config_setup(); 1056 rxtx_config_display(); 1057 1058 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) { 1059 pt_id = fwd_ports_ids[i]; 1060 port = &ports[pt_id]; 1061 rte_eth_stats_get(pt_id, &port->stats); 1062 port->tx_dropped = 0; 1063 1064 map_port_queue_stats_mapping_registers(pt_id, port); 1065 } 1066 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) { 1067 fwd_streams[sm_id]->rx_packets = 0; 1068 fwd_streams[sm_id]->tx_packets = 0; 1069 fwd_streams[sm_id]->fwd_dropped = 0; 1070 fwd_streams[sm_id]->rx_bad_ip_csum = 0; 1071 fwd_streams[sm_id]->rx_bad_l4_csum = 0; 1072 1073 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS 1074 memset(&fwd_streams[sm_id]->rx_burst_stats, 0, 1075 sizeof(fwd_streams[sm_id]->rx_burst_stats)); 1076 memset(&fwd_streams[sm_id]->tx_burst_stats, 0, 1077 sizeof(fwd_streams[sm_id]->tx_burst_stats)); 1078 #endif 1079 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES 1080 fwd_streams[sm_id]->core_cycles = 0; 1081 #endif 1082 } 1083 if (with_tx_first) { 1084 port_fwd_begin = tx_only_engine.port_fwd_begin; 1085 if (port_fwd_begin != NULL) { 1086 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) 1087 (*port_fwd_begin)(fwd_ports_ids[i]); 1088 } 1089 launch_packet_forwarding(run_one_txonly_burst_on_core); 1090 rte_eal_mp_wait_lcore(); 1091 port_fwd_end = tx_only_engine.port_fwd_end; 1092 if (port_fwd_end != NULL) { 1093 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) 1094 (*port_fwd_end)(fwd_ports_ids[i]); 1095 } 1096 } 1097 launch_packet_forwarding(start_pkt_forward_on_core); 1098 } 1099 1100 void 1101 stop_packet_forwarding(void) 1102 { 1103 struct rte_eth_stats stats; 1104 struct rte_port *port; 1105 port_fwd_end_t port_fwd_end; 1106 int i; 1107 portid_t pt_id; 1108 streamid_t sm_id; 1109 lcoreid_t lc_id; 1110 uint64_t total_recv; 1111 uint64_t total_xmit; 1112 uint64_t total_rx_dropped; 1113 uint64_t total_tx_dropped; 1114 uint64_t total_rx_nombuf; 1115 uint64_t tx_dropped; 1116 uint64_t rx_bad_ip_csum; 1117 uint64_t rx_bad_l4_csum; 1118 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES 1119 uint64_t fwd_cycles; 1120 #endif 1121 static const char *acc_stats_border = "+++++++++++++++"; 1122 1123 if (all_ports_started() == 0) { 1124 printf("Not all ports were started\n"); 1125 return; 1126 } 1127 if (test_done) { 1128 printf("Packet forwarding not started\n"); 1129 return; 1130 } 1131 printf("Telling cores to stop..."); 1132 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) 1133 fwd_lcores[lc_id]->stopped = 1; 1134 printf("\nWaiting for lcores to finish...\n"); 1135 rte_eal_mp_wait_lcore(); 1136 port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end; 1137 if (port_fwd_end != NULL) { 1138 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) { 1139 pt_id = fwd_ports_ids[i]; 1140 (*port_fwd_end)(pt_id); 1141 } 1142 } 1143 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES 1144 fwd_cycles = 0; 1145 #endif 1146 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) { 1147 if (cur_fwd_config.nb_fwd_streams > 1148 cur_fwd_config.nb_fwd_ports) { 1149 fwd_stream_stats_display(sm_id); 1150 ports[fwd_streams[sm_id]->tx_port].tx_stream = NULL; 1151 ports[fwd_streams[sm_id]->rx_port].rx_stream = NULL; 1152 } else { 1153 ports[fwd_streams[sm_id]->tx_port].tx_stream = 1154 fwd_streams[sm_id]; 1155 ports[fwd_streams[sm_id]->rx_port].rx_stream = 1156 fwd_streams[sm_id]; 1157 } 1158 tx_dropped = ports[fwd_streams[sm_id]->tx_port].tx_dropped; 1159 tx_dropped = (uint64_t) (tx_dropped + 1160 fwd_streams[sm_id]->fwd_dropped); 1161 ports[fwd_streams[sm_id]->tx_port].tx_dropped = tx_dropped; 1162 1163 rx_bad_ip_csum = 1164 ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum; 1165 rx_bad_ip_csum = (uint64_t) (rx_bad_ip_csum + 1166 fwd_streams[sm_id]->rx_bad_ip_csum); 1167 ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum = 1168 rx_bad_ip_csum; 1169 1170 rx_bad_l4_csum = 1171 ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum; 1172 rx_bad_l4_csum = (uint64_t) (rx_bad_l4_csum + 1173 fwd_streams[sm_id]->rx_bad_l4_csum); 1174 ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum = 1175 rx_bad_l4_csum; 1176 1177 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES 1178 fwd_cycles = (uint64_t) (fwd_cycles + 1179 fwd_streams[sm_id]->core_cycles); 1180 #endif 1181 } 1182 total_recv = 0; 1183 total_xmit = 0; 1184 total_rx_dropped = 0; 1185 total_tx_dropped = 0; 1186 total_rx_nombuf = 0; 1187 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) { 1188 pt_id = fwd_ports_ids[i]; 1189 1190 port = &ports[pt_id]; 1191 rte_eth_stats_get(pt_id, &stats); 1192 stats.ipackets -= port->stats.ipackets; 1193 port->stats.ipackets = 0; 1194 stats.opackets -= port->stats.opackets; 1195 port->stats.opackets = 0; 1196 stats.ibytes -= port->stats.ibytes; 1197 port->stats.ibytes = 0; 1198 stats.obytes -= port->stats.obytes; 1199 port->stats.obytes = 0; 1200 stats.imissed -= port->stats.imissed; 1201 port->stats.imissed = 0; 1202 stats.oerrors -= port->stats.oerrors; 1203 port->stats.oerrors = 0; 1204 stats.rx_nombuf -= port->stats.rx_nombuf; 1205 port->stats.rx_nombuf = 0; 1206 stats.fdirmatch -= port->stats.fdirmatch; 1207 port->stats.rx_nombuf = 0; 1208 stats.fdirmiss -= port->stats.fdirmiss; 1209 port->stats.rx_nombuf = 0; 1210 1211 total_recv += stats.ipackets; 1212 total_xmit += stats.opackets; 1213 total_rx_dropped += stats.imissed; 1214 total_tx_dropped += port->tx_dropped; 1215 total_rx_nombuf += stats.rx_nombuf; 1216 1217 fwd_port_stats_display(pt_id, &stats); 1218 } 1219 printf("\n %s Accumulated forward statistics for all ports" 1220 "%s\n", 1221 acc_stats_border, acc_stats_border); 1222 printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: " 1223 "%-"PRIu64"\n" 1224 " TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: " 1225 "%-"PRIu64"\n", 1226 total_recv, total_rx_dropped, total_recv + total_rx_dropped, 1227 total_xmit, total_tx_dropped, total_xmit + total_tx_dropped); 1228 if (total_rx_nombuf > 0) 1229 printf(" RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf); 1230 printf(" %s++++++++++++++++++++++++++++++++++++++++++++++" 1231 "%s\n", 1232 acc_stats_border, acc_stats_border); 1233 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES 1234 if (total_recv > 0) 1235 printf("\n CPU cycles/packet=%u (total cycles=" 1236 "%"PRIu64" / total RX packets=%"PRIu64")\n", 1237 (unsigned int)(fwd_cycles / total_recv), 1238 fwd_cycles, total_recv); 1239 #endif 1240 printf("\nDone.\n"); 1241 test_done = 1; 1242 } 1243 1244 void 1245 dev_set_link_up(portid_t pid) 1246 { 1247 if (rte_eth_dev_set_link_up((uint8_t)pid) < 0) 1248 printf("\nSet link up fail.\n"); 1249 } 1250 1251 void 1252 dev_set_link_down(portid_t pid) 1253 { 1254 if (rte_eth_dev_set_link_down((uint8_t)pid) < 0) 1255 printf("\nSet link down fail.\n"); 1256 } 1257 1258 static int 1259 all_ports_started(void) 1260 { 1261 portid_t pi; 1262 struct rte_port *port; 1263 1264 FOREACH_PORT(pi, ports) { 1265 port = &ports[pi]; 1266 /* Check if there is a port which is not started */ 1267 if (port->port_status != RTE_PORT_STARTED) 1268 return 0; 1269 } 1270 1271 /* No port is not started */ 1272 return 1; 1273 } 1274 1275 int 1276 all_ports_stopped(void) 1277 { 1278 portid_t pi; 1279 struct rte_port *port; 1280 1281 FOREACH_PORT(pi, ports) { 1282 port = &ports[pi]; 1283 if (port->port_status != RTE_PORT_STOPPED) 1284 return 0; 1285 } 1286 1287 return 1; 1288 } 1289 1290 int 1291 port_is_started(portid_t port_id) 1292 { 1293 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1294 return 0; 1295 1296 if (ports[port_id].port_status != RTE_PORT_STARTED) 1297 return 0; 1298 1299 return 1; 1300 } 1301 1302 static int 1303 port_is_closed(portid_t port_id) 1304 { 1305 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1306 return 0; 1307 1308 if (ports[port_id].port_status != RTE_PORT_CLOSED) 1309 return 0; 1310 1311 return 1; 1312 } 1313 1314 int 1315 start_port(portid_t pid) 1316 { 1317 int diag, need_check_link_status = -1; 1318 portid_t pi; 1319 queueid_t qi; 1320 struct rte_port *port; 1321 struct ether_addr mac_addr; 1322 1323 if (test_done == 0) { 1324 printf("Please stop forwarding first\n"); 1325 return -1; 1326 } 1327 1328 if (port_id_is_invalid(pid, ENABLED_WARN)) 1329 return 0; 1330 1331 if (init_fwd_streams() < 0) { 1332 printf("Fail from init_fwd_streams()\n"); 1333 return -1; 1334 } 1335 1336 if(dcb_config) 1337 dcb_test = 1; 1338 FOREACH_PORT(pi, ports) { 1339 if (pid != pi && pid != (portid_t)RTE_PORT_ALL) 1340 continue; 1341 1342 need_check_link_status = 0; 1343 port = &ports[pi]; 1344 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED, 1345 RTE_PORT_HANDLING) == 0) { 1346 printf("Port %d is now not stopped\n", pi); 1347 continue; 1348 } 1349 1350 if (port->need_reconfig > 0) { 1351 port->need_reconfig = 0; 1352 1353 printf("Configuring Port %d (socket %u)\n", pi, 1354 port->socket_id); 1355 /* configure port */ 1356 diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq, 1357 &(port->dev_conf)); 1358 if (diag != 0) { 1359 if (rte_atomic16_cmpset(&(port->port_status), 1360 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0) 1361 printf("Port %d can not be set back " 1362 "to stopped\n", pi); 1363 printf("Fail to configure port %d\n", pi); 1364 /* try to reconfigure port next time */ 1365 port->need_reconfig = 1; 1366 return -1; 1367 } 1368 } 1369 if (port->need_reconfig_queues > 0) { 1370 port->need_reconfig_queues = 0; 1371 /* setup tx queues */ 1372 for (qi = 0; qi < nb_txq; qi++) { 1373 if ((numa_support) && 1374 (txring_numa[pi] != NUMA_NO_CONFIG)) 1375 diag = rte_eth_tx_queue_setup(pi, qi, 1376 nb_txd,txring_numa[pi], 1377 &(port->tx_conf)); 1378 else 1379 diag = rte_eth_tx_queue_setup(pi, qi, 1380 nb_txd,port->socket_id, 1381 &(port->tx_conf)); 1382 1383 if (diag == 0) 1384 continue; 1385 1386 /* Fail to setup tx queue, return */ 1387 if (rte_atomic16_cmpset(&(port->port_status), 1388 RTE_PORT_HANDLING, 1389 RTE_PORT_STOPPED) == 0) 1390 printf("Port %d can not be set back " 1391 "to stopped\n", pi); 1392 printf("Fail to configure port %d tx queues\n", pi); 1393 /* try to reconfigure queues next time */ 1394 port->need_reconfig_queues = 1; 1395 return -1; 1396 } 1397 /* setup rx queues */ 1398 for (qi = 0; qi < nb_rxq; qi++) { 1399 if ((numa_support) && 1400 (rxring_numa[pi] != NUMA_NO_CONFIG)) { 1401 struct rte_mempool * mp = 1402 mbuf_pool_find(rxring_numa[pi]); 1403 if (mp == NULL) { 1404 printf("Failed to setup RX queue:" 1405 "No mempool allocation" 1406 "on the socket %d\n", 1407 rxring_numa[pi]); 1408 return -1; 1409 } 1410 1411 diag = rte_eth_rx_queue_setup(pi, qi, 1412 nb_rxd,rxring_numa[pi], 1413 &(port->rx_conf),mp); 1414 } 1415 else 1416 diag = rte_eth_rx_queue_setup(pi, qi, 1417 nb_rxd,port->socket_id, 1418 &(port->rx_conf), 1419 mbuf_pool_find(port->socket_id)); 1420 1421 if (diag == 0) 1422 continue; 1423 1424 1425 /* Fail to setup rx queue, return */ 1426 if (rte_atomic16_cmpset(&(port->port_status), 1427 RTE_PORT_HANDLING, 1428 RTE_PORT_STOPPED) == 0) 1429 printf("Port %d can not be set back " 1430 "to stopped\n", pi); 1431 printf("Fail to configure port %d rx queues\n", pi); 1432 /* try to reconfigure queues next time */ 1433 port->need_reconfig_queues = 1; 1434 return -1; 1435 } 1436 } 1437 /* start port */ 1438 if (rte_eth_dev_start(pi) < 0) { 1439 printf("Fail to start port %d\n", pi); 1440 1441 /* Fail to setup rx queue, return */ 1442 if (rte_atomic16_cmpset(&(port->port_status), 1443 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0) 1444 printf("Port %d can not be set back to " 1445 "stopped\n", pi); 1446 continue; 1447 } 1448 1449 if (rte_atomic16_cmpset(&(port->port_status), 1450 RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0) 1451 printf("Port %d can not be set into started\n", pi); 1452 1453 rte_eth_macaddr_get(pi, &mac_addr); 1454 printf("Port %d: %02X:%02X:%02X:%02X:%02X:%02X\n", pi, 1455 mac_addr.addr_bytes[0], mac_addr.addr_bytes[1], 1456 mac_addr.addr_bytes[2], mac_addr.addr_bytes[3], 1457 mac_addr.addr_bytes[4], mac_addr.addr_bytes[5]); 1458 1459 /* at least one port started, need checking link status */ 1460 need_check_link_status = 1; 1461 } 1462 1463 if (need_check_link_status == 1 && !no_link_check) 1464 check_all_ports_link_status(RTE_PORT_ALL); 1465 else if (need_check_link_status == 0) 1466 printf("Please stop the ports first\n"); 1467 1468 printf("Done\n"); 1469 return 0; 1470 } 1471 1472 void 1473 stop_port(portid_t pid) 1474 { 1475 portid_t pi; 1476 struct rte_port *port; 1477 int need_check_link_status = 0; 1478 1479 if (test_done == 0) { 1480 printf("Please stop forwarding first\n"); 1481 return; 1482 } 1483 if (dcb_test) { 1484 dcb_test = 0; 1485 dcb_config = 0; 1486 } 1487 1488 if (port_id_is_invalid(pid, ENABLED_WARN)) 1489 return; 1490 1491 printf("Stopping ports...\n"); 1492 1493 FOREACH_PORT(pi, ports) { 1494 if (pid != pi && pid != (portid_t)RTE_PORT_ALL) 1495 continue; 1496 1497 port = &ports[pi]; 1498 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED, 1499 RTE_PORT_HANDLING) == 0) 1500 continue; 1501 1502 rte_eth_dev_stop(pi); 1503 1504 if (rte_atomic16_cmpset(&(port->port_status), 1505 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0) 1506 printf("Port %d can not be set into stopped\n", pi); 1507 need_check_link_status = 1; 1508 } 1509 if (need_check_link_status && !no_link_check) 1510 check_all_ports_link_status(RTE_PORT_ALL); 1511 1512 printf("Done\n"); 1513 } 1514 1515 void 1516 close_port(portid_t pid) 1517 { 1518 portid_t pi; 1519 struct rte_port *port; 1520 1521 if (test_done == 0) { 1522 printf("Please stop forwarding first\n"); 1523 return; 1524 } 1525 1526 if (port_id_is_invalid(pid, ENABLED_WARN)) 1527 return; 1528 1529 printf("Closing ports...\n"); 1530 1531 FOREACH_PORT(pi, ports) { 1532 if (pid != pi && pid != (portid_t)RTE_PORT_ALL) 1533 continue; 1534 1535 port = &ports[pi]; 1536 if (rte_atomic16_cmpset(&(port->port_status), 1537 RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) { 1538 printf("Port %d is now not stopped\n", pi); 1539 continue; 1540 } 1541 1542 rte_eth_dev_close(pi); 1543 1544 if (rte_atomic16_cmpset(&(port->port_status), 1545 RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0) 1546 printf("Port %d can not be set into stopped\n", pi); 1547 } 1548 1549 printf("Done\n"); 1550 } 1551 1552 void 1553 attach_port(char *identifier) 1554 { 1555 portid_t i, j, pi = 0; 1556 1557 printf("Attaching a new port...\n"); 1558 1559 if (identifier == NULL) { 1560 printf("Invalid parameters are specified\n"); 1561 return; 1562 } 1563 1564 if (test_done == 0) { 1565 printf("Please stop forwarding first\n"); 1566 return; 1567 } 1568 1569 if (rte_eth_dev_attach(identifier, &pi)) 1570 return; 1571 1572 ports[pi].enabled = 1; 1573 reconfig(pi, rte_eth_dev_socket_id(pi)); 1574 rte_eth_promiscuous_enable(pi); 1575 1576 nb_ports = rte_eth_dev_count(); 1577 1578 /* set_default_fwd_ports_config(); */ 1579 bzero(fwd_ports_ids, sizeof(fwd_ports_ids)); 1580 i = 0; 1581 FOREACH_PORT(j, ports) { 1582 fwd_ports_ids[i] = j; 1583 i++; 1584 } 1585 nb_cfg_ports = nb_ports; 1586 nb_fwd_ports++; 1587 1588 ports[pi].port_status = RTE_PORT_STOPPED; 1589 1590 printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports); 1591 printf("Done\n"); 1592 } 1593 1594 void 1595 detach_port(uint8_t port_id) 1596 { 1597 portid_t i, pi = 0; 1598 char name[RTE_ETH_NAME_MAX_LEN]; 1599 1600 printf("Detaching a port...\n"); 1601 1602 if (!port_is_closed(port_id)) { 1603 printf("Please close port first\n"); 1604 return; 1605 } 1606 1607 rte_eth_promiscuous_disable(port_id); 1608 1609 if (rte_eth_dev_detach(port_id, name)) 1610 return; 1611 1612 ports[port_id].enabled = 0; 1613 nb_ports = rte_eth_dev_count(); 1614 1615 /* set_default_fwd_ports_config(); */ 1616 bzero(fwd_ports_ids, sizeof(fwd_ports_ids)); 1617 i = 0; 1618 FOREACH_PORT(pi, ports) { 1619 fwd_ports_ids[i] = pi; 1620 i++; 1621 } 1622 nb_cfg_ports = nb_ports; 1623 nb_fwd_ports--; 1624 1625 printf("Port '%s' is detached. Now total ports is %d\n", 1626 name, nb_ports); 1627 printf("Done\n"); 1628 return; 1629 } 1630 1631 void 1632 pmd_test_exit(void) 1633 { 1634 portid_t pt_id; 1635 1636 if (test_done == 0) 1637 stop_packet_forwarding(); 1638 1639 FOREACH_PORT(pt_id, ports) { 1640 printf("Stopping port %d...", pt_id); 1641 fflush(stdout); 1642 rte_eth_dev_close(pt_id); 1643 printf("done\n"); 1644 } 1645 printf("bye...\n"); 1646 } 1647 1648 typedef void (*cmd_func_t)(void); 1649 struct pmd_test_command { 1650 const char *cmd_name; 1651 cmd_func_t cmd_func; 1652 }; 1653 1654 #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0])) 1655 1656 /* Check the link status of all ports in up to 9s, and print them finally */ 1657 static void 1658 check_all_ports_link_status(uint32_t port_mask) 1659 { 1660 #define CHECK_INTERVAL 100 /* 100ms */ 1661 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */ 1662 uint8_t portid, count, all_ports_up, print_flag = 0; 1663 struct rte_eth_link link; 1664 1665 printf("Checking link statuses...\n"); 1666 fflush(stdout); 1667 for (count = 0; count <= MAX_CHECK_TIME; count++) { 1668 all_ports_up = 1; 1669 FOREACH_PORT(portid, ports) { 1670 if ((port_mask & (1 << portid)) == 0) 1671 continue; 1672 memset(&link, 0, sizeof(link)); 1673 rte_eth_link_get_nowait(portid, &link); 1674 /* print link status if flag set */ 1675 if (print_flag == 1) { 1676 if (link.link_status) 1677 printf("Port %d Link Up - speed %u " 1678 "Mbps - %s\n", (uint8_t)portid, 1679 (unsigned)link.link_speed, 1680 (link.link_duplex == ETH_LINK_FULL_DUPLEX) ? 1681 ("full-duplex") : ("half-duplex\n")); 1682 else 1683 printf("Port %d Link Down\n", 1684 (uint8_t)portid); 1685 continue; 1686 } 1687 /* clear all_ports_up flag if any link down */ 1688 if (link.link_status == 0) { 1689 all_ports_up = 0; 1690 break; 1691 } 1692 } 1693 /* after finally printing all link status, get out */ 1694 if (print_flag == 1) 1695 break; 1696 1697 if (all_ports_up == 0) { 1698 fflush(stdout); 1699 rte_delay_ms(CHECK_INTERVAL); 1700 } 1701 1702 /* set the print_flag if all ports up or timeout */ 1703 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) { 1704 print_flag = 1; 1705 } 1706 } 1707 } 1708 1709 static int 1710 set_tx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port) 1711 { 1712 uint16_t i; 1713 int diag; 1714 uint8_t mapping_found = 0; 1715 1716 for (i = 0; i < nb_tx_queue_stats_mappings; i++) { 1717 if ((tx_queue_stats_mappings[i].port_id == port_id) && 1718 (tx_queue_stats_mappings[i].queue_id < nb_txq )) { 1719 diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id, 1720 tx_queue_stats_mappings[i].queue_id, 1721 tx_queue_stats_mappings[i].stats_counter_id); 1722 if (diag != 0) 1723 return diag; 1724 mapping_found = 1; 1725 } 1726 } 1727 if (mapping_found) 1728 port->tx_queue_stats_mapping_enabled = 1; 1729 return 0; 1730 } 1731 1732 static int 1733 set_rx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port) 1734 { 1735 uint16_t i; 1736 int diag; 1737 uint8_t mapping_found = 0; 1738 1739 for (i = 0; i < nb_rx_queue_stats_mappings; i++) { 1740 if ((rx_queue_stats_mappings[i].port_id == port_id) && 1741 (rx_queue_stats_mappings[i].queue_id < nb_rxq )) { 1742 diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id, 1743 rx_queue_stats_mappings[i].queue_id, 1744 rx_queue_stats_mappings[i].stats_counter_id); 1745 if (diag != 0) 1746 return diag; 1747 mapping_found = 1; 1748 } 1749 } 1750 if (mapping_found) 1751 port->rx_queue_stats_mapping_enabled = 1; 1752 return 0; 1753 } 1754 1755 static void 1756 map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port) 1757 { 1758 int diag = 0; 1759 1760 diag = set_tx_queue_stats_mapping_registers(pi, port); 1761 if (diag != 0) { 1762 if (diag == -ENOTSUP) { 1763 port->tx_queue_stats_mapping_enabled = 0; 1764 printf("TX queue stats mapping not supported port id=%d\n", pi); 1765 } 1766 else 1767 rte_exit(EXIT_FAILURE, 1768 "set_tx_queue_stats_mapping_registers " 1769 "failed for port id=%d diag=%d\n", 1770 pi, diag); 1771 } 1772 1773 diag = set_rx_queue_stats_mapping_registers(pi, port); 1774 if (diag != 0) { 1775 if (diag == -ENOTSUP) { 1776 port->rx_queue_stats_mapping_enabled = 0; 1777 printf("RX queue stats mapping not supported port id=%d\n", pi); 1778 } 1779 else 1780 rte_exit(EXIT_FAILURE, 1781 "set_rx_queue_stats_mapping_registers " 1782 "failed for port id=%d diag=%d\n", 1783 pi, diag); 1784 } 1785 } 1786 1787 static void 1788 rxtx_port_config(struct rte_port *port) 1789 { 1790 port->rx_conf = port->dev_info.default_rxconf; 1791 port->tx_conf = port->dev_info.default_txconf; 1792 1793 /* Check if any RX/TX parameters have been passed */ 1794 if (rx_pthresh != RTE_PMD_PARAM_UNSET) 1795 port->rx_conf.rx_thresh.pthresh = rx_pthresh; 1796 1797 if (rx_hthresh != RTE_PMD_PARAM_UNSET) 1798 port->rx_conf.rx_thresh.hthresh = rx_hthresh; 1799 1800 if (rx_wthresh != RTE_PMD_PARAM_UNSET) 1801 port->rx_conf.rx_thresh.wthresh = rx_wthresh; 1802 1803 if (rx_free_thresh != RTE_PMD_PARAM_UNSET) 1804 port->rx_conf.rx_free_thresh = rx_free_thresh; 1805 1806 if (rx_drop_en != RTE_PMD_PARAM_UNSET) 1807 port->rx_conf.rx_drop_en = rx_drop_en; 1808 1809 if (tx_pthresh != RTE_PMD_PARAM_UNSET) 1810 port->tx_conf.tx_thresh.pthresh = tx_pthresh; 1811 1812 if (tx_hthresh != RTE_PMD_PARAM_UNSET) 1813 port->tx_conf.tx_thresh.hthresh = tx_hthresh; 1814 1815 if (tx_wthresh != RTE_PMD_PARAM_UNSET) 1816 port->tx_conf.tx_thresh.wthresh = tx_wthresh; 1817 1818 if (tx_rs_thresh != RTE_PMD_PARAM_UNSET) 1819 port->tx_conf.tx_rs_thresh = tx_rs_thresh; 1820 1821 if (tx_free_thresh != RTE_PMD_PARAM_UNSET) 1822 port->tx_conf.tx_free_thresh = tx_free_thresh; 1823 1824 if (txq_flags != RTE_PMD_PARAM_UNSET) 1825 port->tx_conf.txq_flags = txq_flags; 1826 } 1827 1828 void 1829 init_port_config(void) 1830 { 1831 portid_t pid; 1832 struct rte_port *port; 1833 1834 FOREACH_PORT(pid, ports) { 1835 port = &ports[pid]; 1836 port->dev_conf.rxmode = rx_mode; 1837 port->dev_conf.fdir_conf = fdir_conf; 1838 if (nb_rxq > 1) { 1839 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL; 1840 port->dev_conf.rx_adv_conf.rss_conf.rss_hf = rss_hf; 1841 } else { 1842 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL; 1843 port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0; 1844 } 1845 1846 if (port->dcb_flag == 0 && port->dev_info.max_vfs == 0) { 1847 if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0) 1848 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_RSS; 1849 else 1850 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE; 1851 } 1852 1853 if (port->dev_info.max_vfs != 0) { 1854 if (port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0) 1855 port->dev_conf.rxmode.mq_mode = 1856 ETH_MQ_RX_VMDQ_RSS; 1857 else 1858 port->dev_conf.rxmode.mq_mode = 1859 ETH_MQ_RX_NONE; 1860 1861 port->dev_conf.txmode.mq_mode = ETH_MQ_TX_NONE; 1862 } 1863 1864 rxtx_port_config(port); 1865 1866 rte_eth_macaddr_get(pid, &port->eth_addr); 1867 1868 map_port_queue_stats_mapping_registers(pid, port); 1869 #ifdef RTE_NIC_BYPASS 1870 rte_eth_dev_bypass_init(pid); 1871 #endif 1872 } 1873 } 1874 1875 const uint16_t vlan_tags[] = { 1876 0, 1, 2, 3, 4, 5, 6, 7, 1877 8, 9, 10, 11, 12, 13, 14, 15, 1878 16, 17, 18, 19, 20, 21, 22, 23, 1879 24, 25, 26, 27, 28, 29, 30, 31 1880 }; 1881 1882 static int 1883 get_eth_dcb_conf(struct rte_eth_conf *eth_conf, struct dcb_config *dcb_conf) 1884 { 1885 uint8_t i; 1886 1887 /* 1888 * Builds up the correct configuration for dcb+vt based on the vlan tags array 1889 * given above, and the number of traffic classes available for use. 1890 */ 1891 if (dcb_conf->dcb_mode == DCB_VT_ENABLED) { 1892 struct rte_eth_vmdq_dcb_conf vmdq_rx_conf; 1893 struct rte_eth_vmdq_dcb_tx_conf vmdq_tx_conf; 1894 1895 /* VMDQ+DCB RX and TX configrations */ 1896 vmdq_rx_conf.enable_default_pool = 0; 1897 vmdq_rx_conf.default_pool = 0; 1898 vmdq_rx_conf.nb_queue_pools = 1899 (dcb_conf->num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS); 1900 vmdq_tx_conf.nb_queue_pools = 1901 (dcb_conf->num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS); 1902 1903 vmdq_rx_conf.nb_pool_maps = sizeof( vlan_tags )/sizeof( vlan_tags[ 0 ]); 1904 for (i = 0; i < vmdq_rx_conf.nb_pool_maps; i++) { 1905 vmdq_rx_conf.pool_map[i].vlan_id = vlan_tags[ i ]; 1906 vmdq_rx_conf.pool_map[i].pools = 1 << (i % vmdq_rx_conf.nb_queue_pools); 1907 } 1908 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) { 1909 vmdq_rx_conf.dcb_queue[i] = i; 1910 vmdq_tx_conf.dcb_queue[i] = i; 1911 } 1912 1913 /*set DCB mode of RX and TX of multiple queues*/ 1914 eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB; 1915 eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB; 1916 if (dcb_conf->pfc_en) 1917 eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT|ETH_DCB_PFC_SUPPORT; 1918 else 1919 eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT; 1920 1921 (void)(rte_memcpy(ð_conf->rx_adv_conf.vmdq_dcb_conf, &vmdq_rx_conf, 1922 sizeof(struct rte_eth_vmdq_dcb_conf))); 1923 (void)(rte_memcpy(ð_conf->tx_adv_conf.vmdq_dcb_tx_conf, &vmdq_tx_conf, 1924 sizeof(struct rte_eth_vmdq_dcb_tx_conf))); 1925 } 1926 else { 1927 struct rte_eth_dcb_rx_conf rx_conf; 1928 struct rte_eth_dcb_tx_conf tx_conf; 1929 1930 /* queue mapping configuration of DCB RX and TX */ 1931 if (dcb_conf->num_tcs == ETH_4_TCS) 1932 dcb_q_mapping = DCB_4_TCS_Q_MAPPING; 1933 else 1934 dcb_q_mapping = DCB_8_TCS_Q_MAPPING; 1935 1936 rx_conf.nb_tcs = dcb_conf->num_tcs; 1937 tx_conf.nb_tcs = dcb_conf->num_tcs; 1938 1939 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++){ 1940 rx_conf.dcb_queue[i] = i; 1941 tx_conf.dcb_queue[i] = i; 1942 } 1943 eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB; 1944 eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB; 1945 if (dcb_conf->pfc_en) 1946 eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT|ETH_DCB_PFC_SUPPORT; 1947 else 1948 eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT; 1949 1950 (void)(rte_memcpy(ð_conf->rx_adv_conf.dcb_rx_conf, &rx_conf, 1951 sizeof(struct rte_eth_dcb_rx_conf))); 1952 (void)(rte_memcpy(ð_conf->tx_adv_conf.dcb_tx_conf, &tx_conf, 1953 sizeof(struct rte_eth_dcb_tx_conf))); 1954 } 1955 1956 return 0; 1957 } 1958 1959 int 1960 init_port_dcb_config(portid_t pid,struct dcb_config *dcb_conf) 1961 { 1962 struct rte_eth_conf port_conf; 1963 struct rte_port *rte_port; 1964 int retval; 1965 uint16_t nb_vlan; 1966 uint16_t i; 1967 1968 /* rxq and txq configuration in dcb mode */ 1969 nb_rxq = 128; 1970 nb_txq = 128; 1971 rx_free_thresh = 64; 1972 1973 memset(&port_conf,0,sizeof(struct rte_eth_conf)); 1974 /* Enter DCB configuration status */ 1975 dcb_config = 1; 1976 1977 nb_vlan = sizeof( vlan_tags )/sizeof( vlan_tags[ 0 ]); 1978 /*set configuration of DCB in vt mode and DCB in non-vt mode*/ 1979 retval = get_eth_dcb_conf(&port_conf, dcb_conf); 1980 if (retval < 0) 1981 return retval; 1982 1983 rte_port = &ports[pid]; 1984 memcpy(&rte_port->dev_conf, &port_conf,sizeof(struct rte_eth_conf)); 1985 1986 rxtx_port_config(rte_port); 1987 /* VLAN filter */ 1988 rte_port->dev_conf.rxmode.hw_vlan_filter = 1; 1989 for (i = 0; i < nb_vlan; i++){ 1990 rx_vft_set(pid, vlan_tags[i], 1); 1991 } 1992 1993 rte_eth_macaddr_get(pid, &rte_port->eth_addr); 1994 map_port_queue_stats_mapping_registers(pid, rte_port); 1995 1996 rte_port->dcb_flag = 1; 1997 1998 return 0; 1999 } 2000 2001 static void 2002 init_port(void) 2003 { 2004 portid_t pid; 2005 2006 /* Configuration of Ethernet ports. */ 2007 ports = rte_zmalloc("testpmd: ports", 2008 sizeof(struct rte_port) * RTE_MAX_ETHPORTS, 2009 RTE_CACHE_LINE_SIZE); 2010 if (ports == NULL) { 2011 rte_exit(EXIT_FAILURE, 2012 "rte_zmalloc(%d struct rte_port) failed\n", 2013 RTE_MAX_ETHPORTS); 2014 } 2015 2016 /* enabled allocated ports */ 2017 for (pid = 0; pid < nb_ports; pid++) 2018 ports[pid].enabled = 1; 2019 } 2020 2021 int 2022 main(int argc, char** argv) 2023 { 2024 int diag; 2025 uint8_t port_id; 2026 2027 diag = rte_eal_init(argc, argv); 2028 if (diag < 0) 2029 rte_panic("Cannot init EAL\n"); 2030 2031 nb_ports = (portid_t) rte_eth_dev_count(); 2032 if (nb_ports == 0) 2033 RTE_LOG(WARNING, EAL, "No probed ethernet devices\n"); 2034 2035 /* allocate port structures, and init them */ 2036 init_port(); 2037 2038 set_def_fwd_config(); 2039 if (nb_lcores == 0) 2040 rte_panic("Empty set of forwarding logical cores - check the " 2041 "core mask supplied in the command parameters\n"); 2042 2043 argc -= diag; 2044 argv += diag; 2045 if (argc > 1) 2046 launch_args_parse(argc, argv); 2047 2048 if (nb_rxq > nb_txq) 2049 printf("Warning: nb_rxq=%d enables RSS configuration, " 2050 "but nb_txq=%d will prevent to fully test it.\n", 2051 nb_rxq, nb_txq); 2052 2053 init_config(); 2054 if (start_port(RTE_PORT_ALL) != 0) 2055 rte_exit(EXIT_FAILURE, "Start ports failed\n"); 2056 2057 /* set all ports to promiscuous mode by default */ 2058 FOREACH_PORT(port_id, ports) 2059 rte_eth_promiscuous_enable(port_id); 2060 2061 #ifdef RTE_LIBRTE_CMDLINE 2062 if (interactive == 1) { 2063 if (auto_start) { 2064 printf("Start automatic packet forwarding\n"); 2065 start_packet_forwarding(0); 2066 } 2067 prompt(); 2068 } else 2069 #endif 2070 { 2071 char c; 2072 int rc; 2073 2074 printf("No commandline core given, start packet forwarding\n"); 2075 start_packet_forwarding(0); 2076 printf("Press enter to exit\n"); 2077 rc = read(0, &c, 1); 2078 if (rc < 0) 2079 return 1; 2080 } 2081 2082 return 0; 2083 } 2084