1 /*- 2 * BSD LICENSE 3 * 4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of Intel Corporation nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include <stdarg.h> 35 #include <stdio.h> 36 #include <stdlib.h> 37 #include <signal.h> 38 #include <string.h> 39 #include <time.h> 40 #include <fcntl.h> 41 #include <sys/types.h> 42 #include <errno.h> 43 44 #include <sys/queue.h> 45 #include <sys/stat.h> 46 47 #include <stdint.h> 48 #include <unistd.h> 49 #include <inttypes.h> 50 51 #include <rte_common.h> 52 #include <rte_byteorder.h> 53 #include <rte_log.h> 54 #include <rte_debug.h> 55 #include <rte_cycles.h> 56 #include <rte_memory.h> 57 #include <rte_memcpy.h> 58 #include <rte_memzone.h> 59 #include <rte_launch.h> 60 #include <rte_tailq.h> 61 #include <rte_eal.h> 62 #include <rte_per_lcore.h> 63 #include <rte_lcore.h> 64 #include <rte_atomic.h> 65 #include <rte_branch_prediction.h> 66 #include <rte_ring.h> 67 #include <rte_mempool.h> 68 #include <rte_malloc.h> 69 #include <rte_mbuf.h> 70 #include <rte_interrupts.h> 71 #include <rte_pci.h> 72 #include <rte_ether.h> 73 #include <rte_ethdev.h> 74 #include <rte_string_fns.h> 75 #ifdef RTE_LIBRTE_PMD_XENVIRT 76 #include <rte_eth_xenvirt.h> 77 #endif 78 79 #include "testpmd.h" 80 #include "mempool_osdep.h" 81 82 uint16_t verbose_level = 0; /**< Silent by default. */ 83 84 /* use master core for command line ? */ 85 uint8_t interactive = 0; 86 uint8_t auto_start = 0; 87 88 /* 89 * NUMA support configuration. 90 * When set, the NUMA support attempts to dispatch the allocation of the 91 * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the 92 * probed ports among the CPU sockets 0 and 1. 93 * Otherwise, all memory is allocated from CPU socket 0. 94 */ 95 uint8_t numa_support = 0; /**< No numa support by default */ 96 97 /* 98 * In UMA mode,all memory is allocated from socket 0 if --socket-num is 99 * not configured. 100 */ 101 uint8_t socket_num = UMA_NO_CONFIG; 102 103 /* 104 * Use ANONYMOUS mapped memory (might be not physically continuous) for mbufs. 105 */ 106 uint8_t mp_anon = 0; 107 108 /* 109 * Record the Ethernet address of peer target ports to which packets are 110 * forwarded. 111 * Must be instanciated with the ethernet addresses of peer traffic generator 112 * ports. 113 */ 114 struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS]; 115 portid_t nb_peer_eth_addrs = 0; 116 117 /* 118 * Probed Target Environment. 119 */ 120 struct rte_port *ports; /**< For all probed ethernet ports. */ 121 portid_t nb_ports; /**< Number of probed ethernet ports. */ 122 struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */ 123 lcoreid_t nb_lcores; /**< Number of probed logical cores. */ 124 125 /* 126 * Test Forwarding Configuration. 127 * nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores 128 * nb_fwd_ports <= nb_cfg_ports <= nb_ports 129 */ 130 lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */ 131 lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */ 132 portid_t nb_cfg_ports; /**< Number of configured ports. */ 133 portid_t nb_fwd_ports; /**< Number of forwarding ports. */ 134 135 unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */ 136 portid_t fwd_ports_ids[RTE_MAX_ETHPORTS]; /**< Port ids configuration. */ 137 138 struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */ 139 streamid_t nb_fwd_streams; /**< Is equal to (nb_ports * nb_rxq). */ 140 141 /* 142 * Forwarding engines. 143 */ 144 struct fwd_engine * fwd_engines[] = { 145 &io_fwd_engine, 146 &mac_fwd_engine, 147 &mac_retry_fwd_engine, 148 &mac_swap_engine, 149 &flow_gen_engine, 150 &rx_only_engine, 151 &tx_only_engine, 152 &csum_fwd_engine, 153 &icmp_echo_engine, 154 #ifdef RTE_LIBRTE_IEEE1588 155 &ieee1588_fwd_engine, 156 #endif 157 NULL, 158 }; 159 160 struct fwd_config cur_fwd_config; 161 struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */ 162 163 uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */ 164 uint32_t param_total_num_mbufs = 0; /**< number of mbufs in all pools - if 165 * specified on command-line. */ 166 167 /* 168 * Configuration of packet segments used by the "txonly" processing engine. 169 */ 170 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */ 171 uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = { 172 TXONLY_DEF_PACKET_LEN, 173 }; 174 uint8_t tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */ 175 176 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */ 177 uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */ 178 179 /* current configuration is in DCB or not,0 means it is not in DCB mode */ 180 uint8_t dcb_config = 0; 181 182 /* Whether the dcb is in testing status */ 183 uint8_t dcb_test = 0; 184 185 /* DCB on and VT on mapping is default */ 186 enum dcb_queue_mapping_mode dcb_q_mapping = DCB_VT_Q_MAPPING; 187 188 /* 189 * Configurable number of RX/TX queues. 190 */ 191 queueid_t nb_rxq = 1; /**< Number of RX queues per port. */ 192 queueid_t nb_txq = 1; /**< Number of TX queues per port. */ 193 194 /* 195 * Configurable number of RX/TX ring descriptors. 196 */ 197 #define RTE_TEST_RX_DESC_DEFAULT 128 198 #define RTE_TEST_TX_DESC_DEFAULT 512 199 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */ 200 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */ 201 202 /* 203 * Configurable values of RX and TX ring threshold registers. 204 */ 205 #define RX_PTHRESH 8 /**< Default value of RX prefetch threshold register. */ 206 #define RX_HTHRESH 8 /**< Default value of RX host threshold register. */ 207 #define RX_WTHRESH 0 /**< Default value of RX write-back threshold register. */ 208 209 #define TX_PTHRESH 32 /**< Default value of TX prefetch threshold register. */ 210 #define TX_HTHRESH 0 /**< Default value of TX host threshold register. */ 211 #define TX_WTHRESH 0 /**< Default value of TX write-back threshold register. */ 212 213 struct rte_eth_thresh rx_thresh = { 214 .pthresh = RX_PTHRESH, 215 .hthresh = RX_HTHRESH, 216 .wthresh = RX_WTHRESH, 217 }; 218 219 struct rte_eth_thresh tx_thresh = { 220 .pthresh = TX_PTHRESH, 221 .hthresh = TX_HTHRESH, 222 .wthresh = TX_WTHRESH, 223 }; 224 225 /* 226 * Configurable value of RX free threshold. 227 */ 228 uint16_t rx_free_thresh = 32; /* Refill RX descriptors once every 32 packets, 229 This setting is needed for ixgbe to enable bulk alloc or vector 230 receive functionality. */ 231 232 /* 233 * Configurable value of RX drop enable. 234 */ 235 uint8_t rx_drop_en = 0; /* Drop packets when no descriptors for queue. */ 236 237 /* 238 * Configurable value of TX free threshold. 239 */ 240 uint16_t tx_free_thresh = 0; /* Use default values. */ 241 242 /* 243 * Configurable value of TX RS bit threshold. 244 */ 245 uint16_t tx_rs_thresh = 0; /* Use default values. */ 246 247 /* 248 * Configurable value of TX queue flags. 249 */ 250 uint32_t txq_flags = 0; /* No flags set. */ 251 252 /* 253 * Receive Side Scaling (RSS) configuration. 254 */ 255 uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */ 256 257 /* 258 * Port topology configuration 259 */ 260 uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */ 261 262 /* 263 * Avoids to flush all the RX streams before starts forwarding. 264 */ 265 uint8_t no_flush_rx = 0; /* flush by default */ 266 267 /* 268 * Avoids to check link status when starting/stopping a port. 269 */ 270 uint8_t no_link_check = 0; /* check by default */ 271 272 /* 273 * NIC bypass mode configuration options. 274 */ 275 #ifdef RTE_NIC_BYPASS 276 277 /* The NIC bypass watchdog timeout. */ 278 uint32_t bypass_timeout = RTE_BYPASS_TMT_OFF; 279 280 #endif 281 282 /* 283 * Ethernet device configuration. 284 */ 285 struct rte_eth_rxmode rx_mode = { 286 .max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */ 287 .split_hdr_size = 0, 288 .header_split = 0, /**< Header Split disabled. */ 289 .hw_ip_checksum = 0, /**< IP checksum offload disabled. */ 290 .hw_vlan_filter = 1, /**< VLAN filtering enabled. */ 291 .hw_vlan_strip = 1, /**< VLAN strip enabled. */ 292 .hw_vlan_extend = 0, /**< Extended VLAN disabled. */ 293 .jumbo_frame = 0, /**< Jumbo Frame Support disabled. */ 294 .hw_strip_crc = 0, /**< CRC stripping by hardware disabled. */ 295 }; 296 297 struct rte_fdir_conf fdir_conf = { 298 .mode = RTE_FDIR_MODE_NONE, 299 .pballoc = RTE_FDIR_PBALLOC_64K, 300 .status = RTE_FDIR_REPORT_STATUS, 301 .flexbytes_offset = 0x6, 302 .drop_queue = 127, 303 }; 304 305 volatile int test_done = 1; /* stop packet forwarding when set to 1. */ 306 307 struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS]; 308 struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS]; 309 310 struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array; 311 struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array; 312 313 uint16_t nb_tx_queue_stats_mappings = 0; 314 uint16_t nb_rx_queue_stats_mappings = 0; 315 316 /* Forward function declarations */ 317 static void map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port); 318 static void check_all_ports_link_status(uint8_t port_num, uint32_t port_mask); 319 320 /* 321 * Check if all the ports are started. 322 * If yes, return positive value. If not, return zero. 323 */ 324 static int all_ports_started(void); 325 326 /* 327 * Setup default configuration. 328 */ 329 static void 330 set_default_fwd_lcores_config(void) 331 { 332 unsigned int i; 333 unsigned int nb_lc; 334 335 nb_lc = 0; 336 for (i = 0; i < RTE_MAX_LCORE; i++) { 337 if (! rte_lcore_is_enabled(i)) 338 continue; 339 if (i == rte_get_master_lcore()) 340 continue; 341 fwd_lcores_cpuids[nb_lc++] = i; 342 } 343 nb_lcores = (lcoreid_t) nb_lc; 344 nb_cfg_lcores = nb_lcores; 345 nb_fwd_lcores = 1; 346 } 347 348 static void 349 set_def_peer_eth_addrs(void) 350 { 351 portid_t i; 352 353 for (i = 0; i < RTE_MAX_ETHPORTS; i++) { 354 peer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR; 355 peer_eth_addrs[i].addr_bytes[5] = i; 356 } 357 } 358 359 static void 360 set_default_fwd_ports_config(void) 361 { 362 portid_t pt_id; 363 364 for (pt_id = 0; pt_id < nb_ports; pt_id++) 365 fwd_ports_ids[pt_id] = pt_id; 366 367 nb_cfg_ports = nb_ports; 368 nb_fwd_ports = nb_ports; 369 } 370 371 void 372 set_def_fwd_config(void) 373 { 374 set_default_fwd_lcores_config(); 375 set_def_peer_eth_addrs(); 376 set_default_fwd_ports_config(); 377 } 378 379 /* 380 * Configuration initialisation done once at init time. 381 */ 382 struct mbuf_ctor_arg { 383 uint16_t seg_buf_offset; /**< offset of data in data segment of mbuf. */ 384 uint16_t seg_buf_size; /**< size of data segment in mbuf. */ 385 }; 386 387 struct mbuf_pool_ctor_arg { 388 uint16_t seg_buf_size; /**< size of data segment in mbuf. */ 389 }; 390 391 static void 392 testpmd_mbuf_ctor(struct rte_mempool *mp, 393 void *opaque_arg, 394 void *raw_mbuf, 395 __attribute__((unused)) unsigned i) 396 { 397 struct mbuf_ctor_arg *mb_ctor_arg; 398 struct rte_mbuf *mb; 399 400 mb_ctor_arg = (struct mbuf_ctor_arg *) opaque_arg; 401 mb = (struct rte_mbuf *) raw_mbuf; 402 403 mb->pool = mp; 404 mb->buf_addr = (void *) ((char *)mb + mb_ctor_arg->seg_buf_offset); 405 mb->buf_physaddr = (uint64_t) (rte_mempool_virt2phy(mp, mb) + 406 mb_ctor_arg->seg_buf_offset); 407 mb->buf_len = mb_ctor_arg->seg_buf_size; 408 mb->ol_flags = 0; 409 mb->data_off = RTE_PKTMBUF_HEADROOM; 410 mb->nb_segs = 1; 411 mb->l2_l3_len = 0; 412 mb->vlan_tci = 0; 413 mb->hash.rss = 0; 414 } 415 416 static void 417 testpmd_mbuf_pool_ctor(struct rte_mempool *mp, 418 void *opaque_arg) 419 { 420 struct mbuf_pool_ctor_arg *mbp_ctor_arg; 421 struct rte_pktmbuf_pool_private *mbp_priv; 422 423 if (mp->private_data_size < sizeof(struct rte_pktmbuf_pool_private)) { 424 printf("%s(%s) private_data_size %d < %d\n", 425 __func__, mp->name, (int) mp->private_data_size, 426 (int) sizeof(struct rte_pktmbuf_pool_private)); 427 return; 428 } 429 mbp_ctor_arg = (struct mbuf_pool_ctor_arg *) opaque_arg; 430 mbp_priv = rte_mempool_get_priv(mp); 431 mbp_priv->mbuf_data_room_size = mbp_ctor_arg->seg_buf_size; 432 } 433 434 static void 435 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf, 436 unsigned int socket_id) 437 { 438 char pool_name[RTE_MEMPOOL_NAMESIZE]; 439 struct rte_mempool *rte_mp; 440 struct mbuf_pool_ctor_arg mbp_ctor_arg; 441 struct mbuf_ctor_arg mb_ctor_arg; 442 uint32_t mb_size; 443 444 mbp_ctor_arg.seg_buf_size = (uint16_t) (RTE_PKTMBUF_HEADROOM + 445 mbuf_seg_size); 446 mb_ctor_arg.seg_buf_offset = 447 (uint16_t) CACHE_LINE_ROUNDUP(sizeof(struct rte_mbuf)); 448 mb_ctor_arg.seg_buf_size = mbp_ctor_arg.seg_buf_size; 449 mb_size = mb_ctor_arg.seg_buf_offset + mb_ctor_arg.seg_buf_size; 450 mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name)); 451 452 #ifdef RTE_LIBRTE_PMD_XENVIRT 453 rte_mp = rte_mempool_gntalloc_create(pool_name, nb_mbuf, mb_size, 454 (unsigned) mb_mempool_cache, 455 sizeof(struct rte_pktmbuf_pool_private), 456 testpmd_mbuf_pool_ctor, &mbp_ctor_arg, 457 testpmd_mbuf_ctor, &mb_ctor_arg, 458 socket_id, 0); 459 460 461 462 #else 463 if (mp_anon != 0) 464 rte_mp = mempool_anon_create(pool_name, nb_mbuf, mb_size, 465 (unsigned) mb_mempool_cache, 466 sizeof(struct rte_pktmbuf_pool_private), 467 testpmd_mbuf_pool_ctor, &mbp_ctor_arg, 468 testpmd_mbuf_ctor, &mb_ctor_arg, 469 socket_id, 0); 470 else 471 rte_mp = rte_mempool_create(pool_name, nb_mbuf, mb_size, 472 (unsigned) mb_mempool_cache, 473 sizeof(struct rte_pktmbuf_pool_private), 474 testpmd_mbuf_pool_ctor, &mbp_ctor_arg, 475 testpmd_mbuf_ctor, &mb_ctor_arg, 476 socket_id, 0); 477 478 #endif 479 480 if (rte_mp == NULL) { 481 rte_exit(EXIT_FAILURE, "Creation of mbuf pool for socket %u " 482 "failed\n", socket_id); 483 } else if (verbose_level > 0) { 484 rte_mempool_dump(stdout, rte_mp); 485 } 486 } 487 488 /* 489 * Check given socket id is valid or not with NUMA mode, 490 * if valid, return 0, else return -1 491 */ 492 static int 493 check_socket_id(const unsigned int socket_id) 494 { 495 static int warning_once = 0; 496 497 if (socket_id >= MAX_SOCKET) { 498 if (!warning_once && numa_support) 499 printf("Warning: NUMA should be configured manually by" 500 " using --port-numa-config and" 501 " --ring-numa-config parameters along with" 502 " --numa.\n"); 503 warning_once = 1; 504 return -1; 505 } 506 return 0; 507 } 508 509 static void 510 init_config(void) 511 { 512 portid_t pid; 513 struct rte_port *port; 514 struct rte_mempool *mbp; 515 unsigned int nb_mbuf_per_pool; 516 lcoreid_t lc_id; 517 uint8_t port_per_socket[MAX_SOCKET]; 518 519 memset(port_per_socket,0,MAX_SOCKET); 520 /* Configuration of logical cores. */ 521 fwd_lcores = rte_zmalloc("testpmd: fwd_lcores", 522 sizeof(struct fwd_lcore *) * nb_lcores, 523 CACHE_LINE_SIZE); 524 if (fwd_lcores == NULL) { 525 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) " 526 "failed\n", nb_lcores); 527 } 528 for (lc_id = 0; lc_id < nb_lcores; lc_id++) { 529 fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore", 530 sizeof(struct fwd_lcore), 531 CACHE_LINE_SIZE); 532 if (fwd_lcores[lc_id] == NULL) { 533 rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) " 534 "failed\n"); 535 } 536 fwd_lcores[lc_id]->cpuid_idx = lc_id; 537 } 538 539 /* 540 * Create pools of mbuf. 541 * If NUMA support is disabled, create a single pool of mbuf in 542 * socket 0 memory by default. 543 * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1. 544 * 545 * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and 546 * nb_txd can be configured at run time. 547 */ 548 if (param_total_num_mbufs) 549 nb_mbuf_per_pool = param_total_num_mbufs; 550 else { 551 nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX + (nb_lcores * mb_mempool_cache) 552 + RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST; 553 554 if (!numa_support) 555 nb_mbuf_per_pool = (nb_mbuf_per_pool * nb_ports); 556 } 557 558 if (!numa_support) { 559 if (socket_num == UMA_NO_CONFIG) 560 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0); 561 else 562 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 563 socket_num); 564 } 565 566 /* Configuration of Ethernet ports. */ 567 ports = rte_zmalloc("testpmd: ports", 568 sizeof(struct rte_port) * nb_ports, 569 CACHE_LINE_SIZE); 570 if (ports == NULL) { 571 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d struct rte_port) " 572 "failed\n", nb_ports); 573 } 574 575 for (pid = 0; pid < nb_ports; pid++) { 576 port = &ports[pid]; 577 rte_eth_dev_info_get(pid, &port->dev_info); 578 579 if (numa_support) { 580 if (port_numa[pid] != NUMA_NO_CONFIG) 581 port_per_socket[port_numa[pid]]++; 582 else { 583 uint32_t socket_id = rte_eth_dev_socket_id(pid); 584 585 /* if socket_id is invalid, set to 0 */ 586 if (check_socket_id(socket_id) < 0) 587 socket_id = 0; 588 port_per_socket[socket_id]++; 589 } 590 } 591 592 /* set flag to initialize port/queue */ 593 port->need_reconfig = 1; 594 port->need_reconfig_queues = 1; 595 } 596 597 if (numa_support) { 598 uint8_t i; 599 unsigned int nb_mbuf; 600 601 if (param_total_num_mbufs) 602 nb_mbuf_per_pool = nb_mbuf_per_pool/nb_ports; 603 604 for (i = 0; i < MAX_SOCKET; i++) { 605 nb_mbuf = (nb_mbuf_per_pool * 606 port_per_socket[i]); 607 if (nb_mbuf) 608 mbuf_pool_create(mbuf_data_size, 609 nb_mbuf,i); 610 } 611 } 612 init_port_config(); 613 614 /* 615 * Records which Mbuf pool to use by each logical core, if needed. 616 */ 617 for (lc_id = 0; lc_id < nb_lcores; lc_id++) { 618 mbp = mbuf_pool_find( 619 rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id])); 620 621 if (mbp == NULL) 622 mbp = mbuf_pool_find(0); 623 fwd_lcores[lc_id]->mbp = mbp; 624 } 625 626 /* Configuration of packet forwarding streams. */ 627 if (init_fwd_streams() < 0) 628 rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n"); 629 } 630 631 632 void 633 reconfig(portid_t new_port_id, unsigned socket_id) 634 { 635 struct rte_port *port; 636 637 /* Reconfiguration of Ethernet ports. */ 638 ports = rte_realloc(ports, 639 sizeof(struct rte_port) * nb_ports, 640 CACHE_LINE_SIZE); 641 if (ports == NULL) { 642 rte_exit(EXIT_FAILURE, "rte_realloc(%d struct rte_port) failed\n", 643 nb_ports); 644 } 645 646 port = &ports[new_port_id]; 647 rte_eth_dev_info_get(new_port_id, &port->dev_info); 648 649 /* set flag to initialize port/queue */ 650 port->need_reconfig = 1; 651 port->need_reconfig_queues = 1; 652 port->socket_id = socket_id; 653 654 init_port_config(); 655 } 656 657 658 int 659 init_fwd_streams(void) 660 { 661 portid_t pid; 662 struct rte_port *port; 663 streamid_t sm_id, nb_fwd_streams_new; 664 665 /* set socket id according to numa or not */ 666 for (pid = 0; pid < nb_ports; pid++) { 667 port = &ports[pid]; 668 if (nb_rxq > port->dev_info.max_rx_queues) { 669 printf("Fail: nb_rxq(%d) is greater than " 670 "max_rx_queues(%d)\n", nb_rxq, 671 port->dev_info.max_rx_queues); 672 return -1; 673 } 674 if (nb_txq > port->dev_info.max_tx_queues) { 675 printf("Fail: nb_txq(%d) is greater than " 676 "max_tx_queues(%d)\n", nb_txq, 677 port->dev_info.max_tx_queues); 678 return -1; 679 } 680 if (numa_support) { 681 if (port_numa[pid] != NUMA_NO_CONFIG) 682 port->socket_id = port_numa[pid]; 683 else { 684 port->socket_id = rte_eth_dev_socket_id(pid); 685 686 /* if socket_id is invalid, set to 0 */ 687 if (check_socket_id(port->socket_id) < 0) 688 port->socket_id = 0; 689 } 690 } 691 else { 692 if (socket_num == UMA_NO_CONFIG) 693 port->socket_id = 0; 694 else 695 port->socket_id = socket_num; 696 } 697 } 698 699 nb_fwd_streams_new = (streamid_t)(nb_ports * nb_rxq); 700 if (nb_fwd_streams_new == nb_fwd_streams) 701 return 0; 702 /* clear the old */ 703 if (fwd_streams != NULL) { 704 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) { 705 if (fwd_streams[sm_id] == NULL) 706 continue; 707 rte_free(fwd_streams[sm_id]); 708 fwd_streams[sm_id] = NULL; 709 } 710 rte_free(fwd_streams); 711 fwd_streams = NULL; 712 } 713 714 /* init new */ 715 nb_fwd_streams = nb_fwd_streams_new; 716 fwd_streams = rte_zmalloc("testpmd: fwd_streams", 717 sizeof(struct fwd_stream *) * nb_fwd_streams, CACHE_LINE_SIZE); 718 if (fwd_streams == NULL) 719 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_stream *)) " 720 "failed\n", nb_fwd_streams); 721 722 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) { 723 fwd_streams[sm_id] = rte_zmalloc("testpmd: struct fwd_stream", 724 sizeof(struct fwd_stream), CACHE_LINE_SIZE); 725 if (fwd_streams[sm_id] == NULL) 726 rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_stream)" 727 " failed\n"); 728 } 729 730 return 0; 731 } 732 733 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS 734 static void 735 pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs) 736 { 737 unsigned int total_burst; 738 unsigned int nb_burst; 739 unsigned int burst_stats[3]; 740 uint16_t pktnb_stats[3]; 741 uint16_t nb_pkt; 742 int burst_percent[3]; 743 744 /* 745 * First compute the total number of packet bursts and the 746 * two highest numbers of bursts of the same number of packets. 747 */ 748 total_burst = 0; 749 burst_stats[0] = burst_stats[1] = burst_stats[2] = 0; 750 pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0; 751 for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) { 752 nb_burst = pbs->pkt_burst_spread[nb_pkt]; 753 if (nb_burst == 0) 754 continue; 755 total_burst += nb_burst; 756 if (nb_burst > burst_stats[0]) { 757 burst_stats[1] = burst_stats[0]; 758 pktnb_stats[1] = pktnb_stats[0]; 759 burst_stats[0] = nb_burst; 760 pktnb_stats[0] = nb_pkt; 761 } 762 } 763 if (total_burst == 0) 764 return; 765 burst_percent[0] = (burst_stats[0] * 100) / total_burst; 766 printf(" %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst, 767 burst_percent[0], (int) pktnb_stats[0]); 768 if (burst_stats[0] == total_burst) { 769 printf("]\n"); 770 return; 771 } 772 if (burst_stats[0] + burst_stats[1] == total_burst) { 773 printf(" + %d%% of %d pkts]\n", 774 100 - burst_percent[0], pktnb_stats[1]); 775 return; 776 } 777 burst_percent[1] = (burst_stats[1] * 100) / total_burst; 778 burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]); 779 if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) { 780 printf(" + %d%% of others]\n", 100 - burst_percent[0]); 781 return; 782 } 783 printf(" + %d%% of %d pkts + %d%% of others]\n", 784 burst_percent[1], (int) pktnb_stats[1], burst_percent[2]); 785 } 786 #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */ 787 788 static void 789 fwd_port_stats_display(portid_t port_id, struct rte_eth_stats *stats) 790 { 791 struct rte_port *port; 792 uint8_t i; 793 794 static const char *fwd_stats_border = "----------------------"; 795 796 port = &ports[port_id]; 797 printf("\n %s Forward statistics for port %-2d %s\n", 798 fwd_stats_border, port_id, fwd_stats_border); 799 800 if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) { 801 printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: " 802 "%-"PRIu64"\n", 803 stats->ipackets, stats->imissed, 804 (uint64_t) (stats->ipackets + stats->imissed)); 805 806 if (cur_fwd_eng == &csum_fwd_engine) 807 printf(" Bad-ipcsum: %-14"PRIu64" Bad-l4csum: %-14"PRIu64" \n", 808 port->rx_bad_ip_csum, port->rx_bad_l4_csum); 809 if (((stats->ierrors - stats->imissed) + stats->rx_nombuf) > 0) { 810 printf(" RX-badcrc: %-14"PRIu64" RX-badlen: %-14"PRIu64 811 "RX-error: %-"PRIu64"\n", 812 stats->ibadcrc, stats->ibadlen, stats->ierrors); 813 printf(" RX-nombufs: %-14"PRIu64"\n", stats->rx_nombuf); 814 } 815 816 printf(" TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: " 817 "%-"PRIu64"\n", 818 stats->opackets, port->tx_dropped, 819 (uint64_t) (stats->opackets + port->tx_dropped)); 820 } 821 else { 822 printf(" RX-packets: %14"PRIu64" RX-dropped:%14"PRIu64" RX-total:" 823 "%14"PRIu64"\n", 824 stats->ipackets, stats->imissed, 825 (uint64_t) (stats->ipackets + stats->imissed)); 826 827 if (cur_fwd_eng == &csum_fwd_engine) 828 printf(" Bad-ipcsum:%14"PRIu64" Bad-l4csum:%14"PRIu64"\n", 829 port->rx_bad_ip_csum, port->rx_bad_l4_csum); 830 if (((stats->ierrors - stats->imissed) + stats->rx_nombuf) > 0) { 831 printf(" RX-badcrc: %14"PRIu64" RX-badlen: %14"PRIu64 832 " RX-error:%"PRIu64"\n", 833 stats->ibadcrc, stats->ibadlen, stats->ierrors); 834 printf(" RX-nombufs: %14"PRIu64"\n", 835 stats->rx_nombuf); 836 } 837 838 printf(" TX-packets: %14"PRIu64" TX-dropped:%14"PRIu64" TX-total:" 839 "%14"PRIu64"\n", 840 stats->opackets, port->tx_dropped, 841 (uint64_t) (stats->opackets + port->tx_dropped)); 842 } 843 844 /* Display statistics of XON/XOFF pause frames, if any. */ 845 if ((stats->tx_pause_xon | stats->rx_pause_xon | 846 stats->tx_pause_xoff | stats->rx_pause_xoff) > 0) { 847 printf(" RX-XOFF: %-14"PRIu64" RX-XON: %-14"PRIu64"\n", 848 stats->rx_pause_xoff, stats->rx_pause_xon); 849 printf(" TX-XOFF: %-14"PRIu64" TX-XON: %-14"PRIu64"\n", 850 stats->tx_pause_xoff, stats->tx_pause_xon); 851 } 852 853 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS 854 if (port->rx_stream) 855 pkt_burst_stats_display("RX", 856 &port->rx_stream->rx_burst_stats); 857 if (port->tx_stream) 858 pkt_burst_stats_display("TX", 859 &port->tx_stream->tx_burst_stats); 860 #endif 861 /* stats fdir */ 862 if (fdir_conf.mode != RTE_FDIR_MODE_NONE) 863 printf(" Fdirmiss:%14"PRIu64" Fdirmatch:%14"PRIu64"\n", 864 stats->fdirmiss, 865 stats->fdirmatch); 866 867 if (port->rx_queue_stats_mapping_enabled) { 868 printf("\n"); 869 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) { 870 printf(" Stats reg %2d RX-packets:%14"PRIu64 871 " RX-errors:%14"PRIu64 872 " RX-bytes:%14"PRIu64"\n", 873 i, stats->q_ipackets[i], stats->q_errors[i], stats->q_ibytes[i]); 874 } 875 printf("\n"); 876 } 877 if (port->tx_queue_stats_mapping_enabled) { 878 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) { 879 printf(" Stats reg %2d TX-packets:%14"PRIu64 880 " TX-bytes:%14"PRIu64"\n", 881 i, stats->q_opackets[i], stats->q_obytes[i]); 882 } 883 } 884 885 printf(" %s--------------------------------%s\n", 886 fwd_stats_border, fwd_stats_border); 887 } 888 889 static void 890 fwd_stream_stats_display(streamid_t stream_id) 891 { 892 struct fwd_stream *fs; 893 static const char *fwd_top_stats_border = "-------"; 894 895 fs = fwd_streams[stream_id]; 896 if ((fs->rx_packets == 0) && (fs->tx_packets == 0) && 897 (fs->fwd_dropped == 0)) 898 return; 899 printf("\n %s Forward Stats for RX Port=%2d/Queue=%2d -> " 900 "TX Port=%2d/Queue=%2d %s\n", 901 fwd_top_stats_border, fs->rx_port, fs->rx_queue, 902 fs->tx_port, fs->tx_queue, fwd_top_stats_border); 903 printf(" RX-packets: %-14u TX-packets: %-14u TX-dropped: %-14u", 904 fs->rx_packets, fs->tx_packets, fs->fwd_dropped); 905 906 /* if checksum mode */ 907 if (cur_fwd_eng == &csum_fwd_engine) { 908 printf(" RX- bad IP checksum: %-14u Rx- bad L4 checksum: " 909 "%-14u\n", fs->rx_bad_ip_csum, fs->rx_bad_l4_csum); 910 } 911 912 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS 913 pkt_burst_stats_display("RX", &fs->rx_burst_stats); 914 pkt_burst_stats_display("TX", &fs->tx_burst_stats); 915 #endif 916 } 917 918 static void 919 flush_fwd_rx_queues(void) 920 { 921 struct rte_mbuf *pkts_burst[MAX_PKT_BURST]; 922 portid_t rxp; 923 portid_t port_id; 924 queueid_t rxq; 925 uint16_t nb_rx; 926 uint16_t i; 927 uint8_t j; 928 929 for (j = 0; j < 2; j++) { 930 for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) { 931 for (rxq = 0; rxq < nb_rxq; rxq++) { 932 port_id = fwd_ports_ids[rxp]; 933 do { 934 nb_rx = rte_eth_rx_burst(port_id, rxq, 935 pkts_burst, MAX_PKT_BURST); 936 for (i = 0; i < nb_rx; i++) 937 rte_pktmbuf_free(pkts_burst[i]); 938 } while (nb_rx > 0); 939 } 940 } 941 rte_delay_ms(10); /* wait 10 milli-seconds before retrying */ 942 } 943 } 944 945 static void 946 run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd) 947 { 948 struct fwd_stream **fsm; 949 streamid_t nb_fs; 950 streamid_t sm_id; 951 952 fsm = &fwd_streams[fc->stream_idx]; 953 nb_fs = fc->stream_nb; 954 do { 955 for (sm_id = 0; sm_id < nb_fs; sm_id++) 956 (*pkt_fwd)(fsm[sm_id]); 957 } while (! fc->stopped); 958 } 959 960 static int 961 start_pkt_forward_on_core(void *fwd_arg) 962 { 963 run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg, 964 cur_fwd_config.fwd_eng->packet_fwd); 965 return 0; 966 } 967 968 /* 969 * Run the TXONLY packet forwarding engine to send a single burst of packets. 970 * Used to start communication flows in network loopback test configurations. 971 */ 972 static int 973 run_one_txonly_burst_on_core(void *fwd_arg) 974 { 975 struct fwd_lcore *fwd_lc; 976 struct fwd_lcore tmp_lcore; 977 978 fwd_lc = (struct fwd_lcore *) fwd_arg; 979 tmp_lcore = *fwd_lc; 980 tmp_lcore.stopped = 1; 981 run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd); 982 return 0; 983 } 984 985 /* 986 * Launch packet forwarding: 987 * - Setup per-port forwarding context. 988 * - launch logical cores with their forwarding configuration. 989 */ 990 static void 991 launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore) 992 { 993 port_fwd_begin_t port_fwd_begin; 994 unsigned int i; 995 unsigned int lc_id; 996 int diag; 997 998 port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin; 999 if (port_fwd_begin != NULL) { 1000 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) 1001 (*port_fwd_begin)(fwd_ports_ids[i]); 1002 } 1003 for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) { 1004 lc_id = fwd_lcores_cpuids[i]; 1005 if ((interactive == 0) || (lc_id != rte_lcore_id())) { 1006 fwd_lcores[i]->stopped = 0; 1007 diag = rte_eal_remote_launch(pkt_fwd_on_lcore, 1008 fwd_lcores[i], lc_id); 1009 if (diag != 0) 1010 printf("launch lcore %u failed - diag=%d\n", 1011 lc_id, diag); 1012 } 1013 } 1014 } 1015 1016 /* 1017 * Launch packet forwarding configuration. 1018 */ 1019 void 1020 start_packet_forwarding(int with_tx_first) 1021 { 1022 port_fwd_begin_t port_fwd_begin; 1023 port_fwd_end_t port_fwd_end; 1024 struct rte_port *port; 1025 unsigned int i; 1026 portid_t pt_id; 1027 streamid_t sm_id; 1028 1029 if (all_ports_started() == 0) { 1030 printf("Not all ports were started\n"); 1031 return; 1032 } 1033 if (test_done == 0) { 1034 printf("Packet forwarding already started\n"); 1035 return; 1036 } 1037 if(dcb_test) { 1038 for (i = 0; i < nb_fwd_ports; i++) { 1039 pt_id = fwd_ports_ids[i]; 1040 port = &ports[pt_id]; 1041 if (!port->dcb_flag) { 1042 printf("In DCB mode, all forwarding ports must " 1043 "be configured in this mode.\n"); 1044 return; 1045 } 1046 } 1047 if (nb_fwd_lcores == 1) { 1048 printf("In DCB mode,the nb forwarding cores " 1049 "should be larger than 1.\n"); 1050 return; 1051 } 1052 } 1053 test_done = 0; 1054 1055 if(!no_flush_rx) 1056 flush_fwd_rx_queues(); 1057 1058 fwd_config_setup(); 1059 rxtx_config_display(); 1060 1061 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) { 1062 pt_id = fwd_ports_ids[i]; 1063 port = &ports[pt_id]; 1064 rte_eth_stats_get(pt_id, &port->stats); 1065 port->tx_dropped = 0; 1066 1067 map_port_queue_stats_mapping_registers(pt_id, port); 1068 } 1069 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) { 1070 fwd_streams[sm_id]->rx_packets = 0; 1071 fwd_streams[sm_id]->tx_packets = 0; 1072 fwd_streams[sm_id]->fwd_dropped = 0; 1073 fwd_streams[sm_id]->rx_bad_ip_csum = 0; 1074 fwd_streams[sm_id]->rx_bad_l4_csum = 0; 1075 1076 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS 1077 memset(&fwd_streams[sm_id]->rx_burst_stats, 0, 1078 sizeof(fwd_streams[sm_id]->rx_burst_stats)); 1079 memset(&fwd_streams[sm_id]->tx_burst_stats, 0, 1080 sizeof(fwd_streams[sm_id]->tx_burst_stats)); 1081 #endif 1082 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES 1083 fwd_streams[sm_id]->core_cycles = 0; 1084 #endif 1085 } 1086 if (with_tx_first) { 1087 port_fwd_begin = tx_only_engine.port_fwd_begin; 1088 if (port_fwd_begin != NULL) { 1089 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) 1090 (*port_fwd_begin)(fwd_ports_ids[i]); 1091 } 1092 launch_packet_forwarding(run_one_txonly_burst_on_core); 1093 rte_eal_mp_wait_lcore(); 1094 port_fwd_end = tx_only_engine.port_fwd_end; 1095 if (port_fwd_end != NULL) { 1096 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) 1097 (*port_fwd_end)(fwd_ports_ids[i]); 1098 } 1099 } 1100 launch_packet_forwarding(start_pkt_forward_on_core); 1101 } 1102 1103 void 1104 stop_packet_forwarding(void) 1105 { 1106 struct rte_eth_stats stats; 1107 struct rte_port *port; 1108 port_fwd_end_t port_fwd_end; 1109 int i; 1110 portid_t pt_id; 1111 streamid_t sm_id; 1112 lcoreid_t lc_id; 1113 uint64_t total_recv; 1114 uint64_t total_xmit; 1115 uint64_t total_rx_dropped; 1116 uint64_t total_tx_dropped; 1117 uint64_t total_rx_nombuf; 1118 uint64_t tx_dropped; 1119 uint64_t rx_bad_ip_csum; 1120 uint64_t rx_bad_l4_csum; 1121 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES 1122 uint64_t fwd_cycles; 1123 #endif 1124 static const char *acc_stats_border = "+++++++++++++++"; 1125 1126 if (all_ports_started() == 0) { 1127 printf("Not all ports were started\n"); 1128 return; 1129 } 1130 if (test_done) { 1131 printf("Packet forwarding not started\n"); 1132 return; 1133 } 1134 printf("Telling cores to stop..."); 1135 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) 1136 fwd_lcores[lc_id]->stopped = 1; 1137 printf("\nWaiting for lcores to finish...\n"); 1138 rte_eal_mp_wait_lcore(); 1139 port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end; 1140 if (port_fwd_end != NULL) { 1141 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) { 1142 pt_id = fwd_ports_ids[i]; 1143 (*port_fwd_end)(pt_id); 1144 } 1145 } 1146 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES 1147 fwd_cycles = 0; 1148 #endif 1149 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) { 1150 if (cur_fwd_config.nb_fwd_streams > 1151 cur_fwd_config.nb_fwd_ports) { 1152 fwd_stream_stats_display(sm_id); 1153 ports[fwd_streams[sm_id]->tx_port].tx_stream = NULL; 1154 ports[fwd_streams[sm_id]->rx_port].rx_stream = NULL; 1155 } else { 1156 ports[fwd_streams[sm_id]->tx_port].tx_stream = 1157 fwd_streams[sm_id]; 1158 ports[fwd_streams[sm_id]->rx_port].rx_stream = 1159 fwd_streams[sm_id]; 1160 } 1161 tx_dropped = ports[fwd_streams[sm_id]->tx_port].tx_dropped; 1162 tx_dropped = (uint64_t) (tx_dropped + 1163 fwd_streams[sm_id]->fwd_dropped); 1164 ports[fwd_streams[sm_id]->tx_port].tx_dropped = tx_dropped; 1165 1166 rx_bad_ip_csum = 1167 ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum; 1168 rx_bad_ip_csum = (uint64_t) (rx_bad_ip_csum + 1169 fwd_streams[sm_id]->rx_bad_ip_csum); 1170 ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum = 1171 rx_bad_ip_csum; 1172 1173 rx_bad_l4_csum = 1174 ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum; 1175 rx_bad_l4_csum = (uint64_t) (rx_bad_l4_csum + 1176 fwd_streams[sm_id]->rx_bad_l4_csum); 1177 ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum = 1178 rx_bad_l4_csum; 1179 1180 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES 1181 fwd_cycles = (uint64_t) (fwd_cycles + 1182 fwd_streams[sm_id]->core_cycles); 1183 #endif 1184 } 1185 total_recv = 0; 1186 total_xmit = 0; 1187 total_rx_dropped = 0; 1188 total_tx_dropped = 0; 1189 total_rx_nombuf = 0; 1190 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) { 1191 pt_id = fwd_ports_ids[i]; 1192 1193 port = &ports[pt_id]; 1194 rte_eth_stats_get(pt_id, &stats); 1195 stats.ipackets -= port->stats.ipackets; 1196 port->stats.ipackets = 0; 1197 stats.opackets -= port->stats.opackets; 1198 port->stats.opackets = 0; 1199 stats.ibytes -= port->stats.ibytes; 1200 port->stats.ibytes = 0; 1201 stats.obytes -= port->stats.obytes; 1202 port->stats.obytes = 0; 1203 stats.imissed -= port->stats.imissed; 1204 port->stats.imissed = 0; 1205 stats.oerrors -= port->stats.oerrors; 1206 port->stats.oerrors = 0; 1207 stats.rx_nombuf -= port->stats.rx_nombuf; 1208 port->stats.rx_nombuf = 0; 1209 stats.fdirmatch -= port->stats.fdirmatch; 1210 port->stats.rx_nombuf = 0; 1211 stats.fdirmiss -= port->stats.fdirmiss; 1212 port->stats.rx_nombuf = 0; 1213 1214 total_recv += stats.ipackets; 1215 total_xmit += stats.opackets; 1216 total_rx_dropped += stats.imissed; 1217 total_tx_dropped += port->tx_dropped; 1218 total_rx_nombuf += stats.rx_nombuf; 1219 1220 fwd_port_stats_display(pt_id, &stats); 1221 } 1222 printf("\n %s Accumulated forward statistics for all ports" 1223 "%s\n", 1224 acc_stats_border, acc_stats_border); 1225 printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: " 1226 "%-"PRIu64"\n" 1227 " TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: " 1228 "%-"PRIu64"\n", 1229 total_recv, total_rx_dropped, total_recv + total_rx_dropped, 1230 total_xmit, total_tx_dropped, total_xmit + total_tx_dropped); 1231 if (total_rx_nombuf > 0) 1232 printf(" RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf); 1233 printf(" %s++++++++++++++++++++++++++++++++++++++++++++++" 1234 "%s\n", 1235 acc_stats_border, acc_stats_border); 1236 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES 1237 if (total_recv > 0) 1238 printf("\n CPU cycles/packet=%u (total cycles=" 1239 "%"PRIu64" / total RX packets=%"PRIu64")\n", 1240 (unsigned int)(fwd_cycles / total_recv), 1241 fwd_cycles, total_recv); 1242 #endif 1243 printf("\nDone.\n"); 1244 test_done = 1; 1245 } 1246 1247 void 1248 dev_set_link_up(portid_t pid) 1249 { 1250 if (rte_eth_dev_set_link_up((uint8_t)pid) < 0) 1251 printf("\nSet link up fail.\n"); 1252 } 1253 1254 void 1255 dev_set_link_down(portid_t pid) 1256 { 1257 if (rte_eth_dev_set_link_down((uint8_t)pid) < 0) 1258 printf("\nSet link down fail.\n"); 1259 } 1260 1261 static int 1262 all_ports_started(void) 1263 { 1264 portid_t pi; 1265 struct rte_port *port; 1266 1267 for (pi = 0; pi < nb_ports; pi++) { 1268 port = &ports[pi]; 1269 /* Check if there is a port which is not started */ 1270 if (port->port_status != RTE_PORT_STARTED) 1271 return 0; 1272 } 1273 1274 /* No port is not started */ 1275 return 1; 1276 } 1277 1278 int 1279 start_port(portid_t pid) 1280 { 1281 int diag, need_check_link_status = 0; 1282 portid_t pi; 1283 queueid_t qi; 1284 struct rte_port *port; 1285 struct ether_addr mac_addr; 1286 1287 if (test_done == 0) { 1288 printf("Please stop forwarding first\n"); 1289 return -1; 1290 } 1291 1292 if (init_fwd_streams() < 0) { 1293 printf("Fail from init_fwd_streams()\n"); 1294 return -1; 1295 } 1296 1297 if(dcb_config) 1298 dcb_test = 1; 1299 for (pi = 0; pi < nb_ports; pi++) { 1300 if (pid < nb_ports && pid != pi) 1301 continue; 1302 1303 port = &ports[pi]; 1304 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED, 1305 RTE_PORT_HANDLING) == 0) { 1306 printf("Port %d is now not stopped\n", pi); 1307 continue; 1308 } 1309 1310 if (port->need_reconfig > 0) { 1311 port->need_reconfig = 0; 1312 1313 printf("Configuring Port %d (socket %u)\n", pi, 1314 port->socket_id); 1315 /* configure port */ 1316 diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq, 1317 &(port->dev_conf)); 1318 if (diag != 0) { 1319 if (rte_atomic16_cmpset(&(port->port_status), 1320 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0) 1321 printf("Port %d can not be set back " 1322 "to stopped\n", pi); 1323 printf("Fail to configure port %d\n", pi); 1324 /* try to reconfigure port next time */ 1325 port->need_reconfig = 1; 1326 return -1; 1327 } 1328 } 1329 if (port->need_reconfig_queues > 0) { 1330 port->need_reconfig_queues = 0; 1331 /* setup tx queues */ 1332 for (qi = 0; qi < nb_txq; qi++) { 1333 if ((numa_support) && 1334 (txring_numa[pi] != NUMA_NO_CONFIG)) 1335 diag = rte_eth_tx_queue_setup(pi, qi, 1336 nb_txd,txring_numa[pi], 1337 &(port->tx_conf)); 1338 else 1339 diag = rte_eth_tx_queue_setup(pi, qi, 1340 nb_txd,port->socket_id, 1341 &(port->tx_conf)); 1342 1343 if (diag == 0) 1344 continue; 1345 1346 /* Fail to setup tx queue, return */ 1347 if (rte_atomic16_cmpset(&(port->port_status), 1348 RTE_PORT_HANDLING, 1349 RTE_PORT_STOPPED) == 0) 1350 printf("Port %d can not be set back " 1351 "to stopped\n", pi); 1352 printf("Fail to configure port %d tx queues\n", pi); 1353 /* try to reconfigure queues next time */ 1354 port->need_reconfig_queues = 1; 1355 return -1; 1356 } 1357 /* setup rx queues */ 1358 for (qi = 0; qi < nb_rxq; qi++) { 1359 if ((numa_support) && 1360 (rxring_numa[pi] != NUMA_NO_CONFIG)) { 1361 struct rte_mempool * mp = 1362 mbuf_pool_find(rxring_numa[pi]); 1363 if (mp == NULL) { 1364 printf("Failed to setup RX queue:" 1365 "No mempool allocation" 1366 "on the socket %d\n", 1367 rxring_numa[pi]); 1368 return -1; 1369 } 1370 1371 diag = rte_eth_rx_queue_setup(pi, qi, 1372 nb_rxd,rxring_numa[pi], 1373 &(port->rx_conf),mp); 1374 } 1375 else 1376 diag = rte_eth_rx_queue_setup(pi, qi, 1377 nb_rxd,port->socket_id, 1378 &(port->rx_conf), 1379 mbuf_pool_find(port->socket_id)); 1380 1381 if (diag == 0) 1382 continue; 1383 1384 1385 /* Fail to setup rx queue, return */ 1386 if (rte_atomic16_cmpset(&(port->port_status), 1387 RTE_PORT_HANDLING, 1388 RTE_PORT_STOPPED) == 0) 1389 printf("Port %d can not be set back " 1390 "to stopped\n", pi); 1391 printf("Fail to configure port %d rx queues\n", pi); 1392 /* try to reconfigure queues next time */ 1393 port->need_reconfig_queues = 1; 1394 return -1; 1395 } 1396 } 1397 /* start port */ 1398 if (rte_eth_dev_start(pi) < 0) { 1399 printf("Fail to start port %d\n", pi); 1400 1401 /* Fail to setup rx queue, return */ 1402 if (rte_atomic16_cmpset(&(port->port_status), 1403 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0) 1404 printf("Port %d can not be set back to " 1405 "stopped\n", pi); 1406 continue; 1407 } 1408 1409 if (rte_atomic16_cmpset(&(port->port_status), 1410 RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0) 1411 printf("Port %d can not be set into started\n", pi); 1412 1413 rte_eth_macaddr_get(pi, &mac_addr); 1414 printf("Port %d: %02X:%02X:%02X:%02X:%02X:%02X\n", pi, 1415 mac_addr.addr_bytes[0], mac_addr.addr_bytes[1], 1416 mac_addr.addr_bytes[2], mac_addr.addr_bytes[3], 1417 mac_addr.addr_bytes[4], mac_addr.addr_bytes[5]); 1418 1419 /* at least one port started, need checking link status */ 1420 need_check_link_status = 1; 1421 } 1422 1423 if (need_check_link_status && !no_link_check) 1424 check_all_ports_link_status(nb_ports, RTE_PORT_ALL); 1425 else 1426 printf("Please stop the ports first\n"); 1427 1428 printf("Done\n"); 1429 return 0; 1430 } 1431 1432 void 1433 stop_port(portid_t pid) 1434 { 1435 portid_t pi; 1436 struct rte_port *port; 1437 int need_check_link_status = 0; 1438 1439 if (test_done == 0) { 1440 printf("Please stop forwarding first\n"); 1441 return; 1442 } 1443 if (dcb_test) { 1444 dcb_test = 0; 1445 dcb_config = 0; 1446 } 1447 printf("Stopping ports...\n"); 1448 1449 for (pi = 0; pi < nb_ports; pi++) { 1450 if (pid < nb_ports && pid != pi) 1451 continue; 1452 1453 port = &ports[pi]; 1454 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED, 1455 RTE_PORT_HANDLING) == 0) 1456 continue; 1457 1458 rte_eth_dev_stop(pi); 1459 1460 if (rte_atomic16_cmpset(&(port->port_status), 1461 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0) 1462 printf("Port %d can not be set into stopped\n", pi); 1463 need_check_link_status = 1; 1464 } 1465 if (need_check_link_status && !no_link_check) 1466 check_all_ports_link_status(nb_ports, RTE_PORT_ALL); 1467 1468 printf("Done\n"); 1469 } 1470 1471 void 1472 close_port(portid_t pid) 1473 { 1474 portid_t pi; 1475 struct rte_port *port; 1476 1477 if (test_done == 0) { 1478 printf("Please stop forwarding first\n"); 1479 return; 1480 } 1481 1482 printf("Closing ports...\n"); 1483 1484 for (pi = 0; pi < nb_ports; pi++) { 1485 if (pid < nb_ports && pid != pi) 1486 continue; 1487 1488 port = &ports[pi]; 1489 if (rte_atomic16_cmpset(&(port->port_status), 1490 RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) { 1491 printf("Port %d is now not stopped\n", pi); 1492 continue; 1493 } 1494 1495 rte_eth_dev_close(pi); 1496 1497 if (rte_atomic16_cmpset(&(port->port_status), 1498 RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0) 1499 printf("Port %d can not be set into stopped\n", pi); 1500 } 1501 1502 printf("Done\n"); 1503 } 1504 1505 int 1506 all_ports_stopped(void) 1507 { 1508 portid_t pi; 1509 struct rte_port *port; 1510 1511 for (pi = 0; pi < nb_ports; pi++) { 1512 port = &ports[pi]; 1513 if (port->port_status != RTE_PORT_STOPPED) 1514 return 0; 1515 } 1516 1517 return 1; 1518 } 1519 1520 int 1521 port_is_started(portid_t port_id) 1522 { 1523 if (port_id_is_invalid(port_id)) 1524 return -1; 1525 1526 if (ports[port_id].port_status != RTE_PORT_STARTED) 1527 return 0; 1528 1529 return 1; 1530 } 1531 1532 void 1533 pmd_test_exit(void) 1534 { 1535 portid_t pt_id; 1536 1537 for (pt_id = 0; pt_id < nb_ports; pt_id++) { 1538 printf("Stopping port %d...", pt_id); 1539 fflush(stdout); 1540 rte_eth_dev_close(pt_id); 1541 printf("done\n"); 1542 } 1543 printf("bye...\n"); 1544 } 1545 1546 typedef void (*cmd_func_t)(void); 1547 struct pmd_test_command { 1548 const char *cmd_name; 1549 cmd_func_t cmd_func; 1550 }; 1551 1552 #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0])) 1553 1554 /* Check the link status of all ports in up to 9s, and print them finally */ 1555 static void 1556 check_all_ports_link_status(uint8_t port_num, uint32_t port_mask) 1557 { 1558 #define CHECK_INTERVAL 100 /* 100ms */ 1559 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */ 1560 uint8_t portid, count, all_ports_up, print_flag = 0; 1561 struct rte_eth_link link; 1562 1563 printf("Checking link statuses...\n"); 1564 fflush(stdout); 1565 for (count = 0; count <= MAX_CHECK_TIME; count++) { 1566 all_ports_up = 1; 1567 for (portid = 0; portid < port_num; portid++) { 1568 if ((port_mask & (1 << portid)) == 0) 1569 continue; 1570 memset(&link, 0, sizeof(link)); 1571 rte_eth_link_get_nowait(portid, &link); 1572 /* print link status if flag set */ 1573 if (print_flag == 1) { 1574 if (link.link_status) 1575 printf("Port %d Link Up - speed %u " 1576 "Mbps - %s\n", (uint8_t)portid, 1577 (unsigned)link.link_speed, 1578 (link.link_duplex == ETH_LINK_FULL_DUPLEX) ? 1579 ("full-duplex") : ("half-duplex\n")); 1580 else 1581 printf("Port %d Link Down\n", 1582 (uint8_t)portid); 1583 continue; 1584 } 1585 /* clear all_ports_up flag if any link down */ 1586 if (link.link_status == 0) { 1587 all_ports_up = 0; 1588 break; 1589 } 1590 } 1591 /* after finally printing all link status, get out */ 1592 if (print_flag == 1) 1593 break; 1594 1595 if (all_ports_up == 0) { 1596 fflush(stdout); 1597 rte_delay_ms(CHECK_INTERVAL); 1598 } 1599 1600 /* set the print_flag if all ports up or timeout */ 1601 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) { 1602 print_flag = 1; 1603 } 1604 } 1605 } 1606 1607 static int 1608 set_tx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port) 1609 { 1610 uint16_t i; 1611 int diag; 1612 uint8_t mapping_found = 0; 1613 1614 for (i = 0; i < nb_tx_queue_stats_mappings; i++) { 1615 if ((tx_queue_stats_mappings[i].port_id == port_id) && 1616 (tx_queue_stats_mappings[i].queue_id < nb_txq )) { 1617 diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id, 1618 tx_queue_stats_mappings[i].queue_id, 1619 tx_queue_stats_mappings[i].stats_counter_id); 1620 if (diag != 0) 1621 return diag; 1622 mapping_found = 1; 1623 } 1624 } 1625 if (mapping_found) 1626 port->tx_queue_stats_mapping_enabled = 1; 1627 return 0; 1628 } 1629 1630 static int 1631 set_rx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port) 1632 { 1633 uint16_t i; 1634 int diag; 1635 uint8_t mapping_found = 0; 1636 1637 for (i = 0; i < nb_rx_queue_stats_mappings; i++) { 1638 if ((rx_queue_stats_mappings[i].port_id == port_id) && 1639 (rx_queue_stats_mappings[i].queue_id < nb_rxq )) { 1640 diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id, 1641 rx_queue_stats_mappings[i].queue_id, 1642 rx_queue_stats_mappings[i].stats_counter_id); 1643 if (diag != 0) 1644 return diag; 1645 mapping_found = 1; 1646 } 1647 } 1648 if (mapping_found) 1649 port->rx_queue_stats_mapping_enabled = 1; 1650 return 0; 1651 } 1652 1653 static void 1654 map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port) 1655 { 1656 int diag = 0; 1657 1658 diag = set_tx_queue_stats_mapping_registers(pi, port); 1659 if (diag != 0) { 1660 if (diag == -ENOTSUP) { 1661 port->tx_queue_stats_mapping_enabled = 0; 1662 printf("TX queue stats mapping not supported port id=%d\n", pi); 1663 } 1664 else 1665 rte_exit(EXIT_FAILURE, 1666 "set_tx_queue_stats_mapping_registers " 1667 "failed for port id=%d diag=%d\n", 1668 pi, diag); 1669 } 1670 1671 diag = set_rx_queue_stats_mapping_registers(pi, port); 1672 if (diag != 0) { 1673 if (diag == -ENOTSUP) { 1674 port->rx_queue_stats_mapping_enabled = 0; 1675 printf("RX queue stats mapping not supported port id=%d\n", pi); 1676 } 1677 else 1678 rte_exit(EXIT_FAILURE, 1679 "set_rx_queue_stats_mapping_registers " 1680 "failed for port id=%d diag=%d\n", 1681 pi, diag); 1682 } 1683 } 1684 1685 void 1686 init_port_config(void) 1687 { 1688 portid_t pid; 1689 struct rte_port *port; 1690 1691 for (pid = 0; pid < nb_ports; pid++) { 1692 port = &ports[pid]; 1693 port->dev_conf.rxmode = rx_mode; 1694 port->dev_conf.fdir_conf = fdir_conf; 1695 if (nb_rxq > 1) { 1696 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL; 1697 port->dev_conf.rx_adv_conf.rss_conf.rss_hf = rss_hf; 1698 } else { 1699 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL; 1700 port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0; 1701 } 1702 1703 /* In SR-IOV mode, RSS mode is not available */ 1704 if (port->dcb_flag == 0 && port->dev_info.max_vfs == 0) { 1705 if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0) 1706 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_RSS; 1707 else 1708 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE; 1709 } 1710 1711 port->rx_conf.rx_thresh = rx_thresh; 1712 port->rx_conf.rx_free_thresh = rx_free_thresh; 1713 port->rx_conf.rx_drop_en = rx_drop_en; 1714 port->tx_conf.tx_thresh = tx_thresh; 1715 port->tx_conf.tx_rs_thresh = tx_rs_thresh; 1716 port->tx_conf.tx_free_thresh = tx_free_thresh; 1717 port->tx_conf.txq_flags = txq_flags; 1718 1719 rte_eth_macaddr_get(pid, &port->eth_addr); 1720 1721 map_port_queue_stats_mapping_registers(pid, port); 1722 #ifdef RTE_NIC_BYPASS 1723 rte_eth_dev_bypass_init(pid); 1724 #endif 1725 } 1726 } 1727 1728 const uint16_t vlan_tags[] = { 1729 0, 1, 2, 3, 4, 5, 6, 7, 1730 8, 9, 10, 11, 12, 13, 14, 15, 1731 16, 17, 18, 19, 20, 21, 22, 23, 1732 24, 25, 26, 27, 28, 29, 30, 31 1733 }; 1734 1735 static int 1736 get_eth_dcb_conf(struct rte_eth_conf *eth_conf, struct dcb_config *dcb_conf) 1737 { 1738 uint8_t i; 1739 1740 /* 1741 * Builds up the correct configuration for dcb+vt based on the vlan tags array 1742 * given above, and the number of traffic classes available for use. 1743 */ 1744 if (dcb_conf->dcb_mode == DCB_VT_ENABLED) { 1745 struct rte_eth_vmdq_dcb_conf vmdq_rx_conf; 1746 struct rte_eth_vmdq_dcb_tx_conf vmdq_tx_conf; 1747 1748 /* VMDQ+DCB RX and TX configrations */ 1749 vmdq_rx_conf.enable_default_pool = 0; 1750 vmdq_rx_conf.default_pool = 0; 1751 vmdq_rx_conf.nb_queue_pools = 1752 (dcb_conf->num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS); 1753 vmdq_tx_conf.nb_queue_pools = 1754 (dcb_conf->num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS); 1755 1756 vmdq_rx_conf.nb_pool_maps = sizeof( vlan_tags )/sizeof( vlan_tags[ 0 ]); 1757 for (i = 0; i < vmdq_rx_conf.nb_pool_maps; i++) { 1758 vmdq_rx_conf.pool_map[i].vlan_id = vlan_tags[ i ]; 1759 vmdq_rx_conf.pool_map[i].pools = 1 << (i % vmdq_rx_conf.nb_queue_pools); 1760 } 1761 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) { 1762 vmdq_rx_conf.dcb_queue[i] = i; 1763 vmdq_tx_conf.dcb_queue[i] = i; 1764 } 1765 1766 /*set DCB mode of RX and TX of multiple queues*/ 1767 eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB; 1768 eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB; 1769 if (dcb_conf->pfc_en) 1770 eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT|ETH_DCB_PFC_SUPPORT; 1771 else 1772 eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT; 1773 1774 (void)(rte_memcpy(ð_conf->rx_adv_conf.vmdq_dcb_conf, &vmdq_rx_conf, 1775 sizeof(struct rte_eth_vmdq_dcb_conf))); 1776 (void)(rte_memcpy(ð_conf->tx_adv_conf.vmdq_dcb_tx_conf, &vmdq_tx_conf, 1777 sizeof(struct rte_eth_vmdq_dcb_tx_conf))); 1778 } 1779 else { 1780 struct rte_eth_dcb_rx_conf rx_conf; 1781 struct rte_eth_dcb_tx_conf tx_conf; 1782 1783 /* queue mapping configuration of DCB RX and TX */ 1784 if (dcb_conf->num_tcs == ETH_4_TCS) 1785 dcb_q_mapping = DCB_4_TCS_Q_MAPPING; 1786 else 1787 dcb_q_mapping = DCB_8_TCS_Q_MAPPING; 1788 1789 rx_conf.nb_tcs = dcb_conf->num_tcs; 1790 tx_conf.nb_tcs = dcb_conf->num_tcs; 1791 1792 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++){ 1793 rx_conf.dcb_queue[i] = i; 1794 tx_conf.dcb_queue[i] = i; 1795 } 1796 eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB; 1797 eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB; 1798 if (dcb_conf->pfc_en) 1799 eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT|ETH_DCB_PFC_SUPPORT; 1800 else 1801 eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT; 1802 1803 (void)(rte_memcpy(ð_conf->rx_adv_conf.dcb_rx_conf, &rx_conf, 1804 sizeof(struct rte_eth_dcb_rx_conf))); 1805 (void)(rte_memcpy(ð_conf->tx_adv_conf.dcb_tx_conf, &tx_conf, 1806 sizeof(struct rte_eth_dcb_tx_conf))); 1807 } 1808 1809 return 0; 1810 } 1811 1812 int 1813 init_port_dcb_config(portid_t pid,struct dcb_config *dcb_conf) 1814 { 1815 struct rte_eth_conf port_conf; 1816 struct rte_port *rte_port; 1817 int retval; 1818 uint16_t nb_vlan; 1819 uint16_t i; 1820 1821 /* rxq and txq configuration in dcb mode */ 1822 nb_rxq = 128; 1823 nb_txq = 128; 1824 rx_free_thresh = 64; 1825 1826 memset(&port_conf,0,sizeof(struct rte_eth_conf)); 1827 /* Enter DCB configuration status */ 1828 dcb_config = 1; 1829 1830 nb_vlan = sizeof( vlan_tags )/sizeof( vlan_tags[ 0 ]); 1831 /*set configuration of DCB in vt mode and DCB in non-vt mode*/ 1832 retval = get_eth_dcb_conf(&port_conf, dcb_conf); 1833 if (retval < 0) 1834 return retval; 1835 1836 rte_port = &ports[pid]; 1837 memcpy(&rte_port->dev_conf, &port_conf,sizeof(struct rte_eth_conf)); 1838 1839 rte_port->rx_conf.rx_thresh = rx_thresh; 1840 rte_port->rx_conf.rx_free_thresh = rx_free_thresh; 1841 rte_port->tx_conf.tx_thresh = tx_thresh; 1842 rte_port->tx_conf.tx_rs_thresh = tx_rs_thresh; 1843 rte_port->tx_conf.tx_free_thresh = tx_free_thresh; 1844 /* VLAN filter */ 1845 rte_port->dev_conf.rxmode.hw_vlan_filter = 1; 1846 for (i = 0; i < nb_vlan; i++){ 1847 rx_vft_set(pid, vlan_tags[i], 1); 1848 } 1849 1850 rte_eth_macaddr_get(pid, &rte_port->eth_addr); 1851 map_port_queue_stats_mapping_registers(pid, rte_port); 1852 1853 rte_port->dcb_flag = 1; 1854 1855 return 0; 1856 } 1857 1858 #ifdef RTE_EXEC_ENV_BAREMETAL 1859 #define main _main 1860 #endif 1861 1862 int 1863 main(int argc, char** argv) 1864 { 1865 int diag; 1866 uint8_t port_id; 1867 1868 diag = rte_eal_init(argc, argv); 1869 if (diag < 0) 1870 rte_panic("Cannot init EAL\n"); 1871 1872 nb_ports = (portid_t) rte_eth_dev_count(); 1873 if (nb_ports == 0) 1874 rte_exit(EXIT_FAILURE, "No probed ethernet device\n"); 1875 1876 set_def_fwd_config(); 1877 if (nb_lcores == 0) 1878 rte_panic("Empty set of forwarding logical cores - check the " 1879 "core mask supplied in the command parameters\n"); 1880 1881 argc -= diag; 1882 argv += diag; 1883 if (argc > 1) 1884 launch_args_parse(argc, argv); 1885 1886 if (nb_rxq > nb_txq) 1887 printf("Warning: nb_rxq=%d enables RSS configuration, " 1888 "but nb_txq=%d will prevent to fully test it.\n", 1889 nb_rxq, nb_txq); 1890 1891 init_config(); 1892 if (start_port(RTE_PORT_ALL) != 0) 1893 rte_exit(EXIT_FAILURE, "Start ports failed\n"); 1894 1895 /* set all ports to promiscuous mode by default */ 1896 for (port_id = 0; port_id < nb_ports; port_id++) 1897 rte_eth_promiscuous_enable(port_id); 1898 1899 #ifdef RTE_LIBRTE_CMDLINE 1900 if (interactive == 1) { 1901 if (auto_start) { 1902 printf("Start automatic packet forwarding\n"); 1903 start_packet_forwarding(0); 1904 } 1905 prompt(); 1906 } else 1907 #endif 1908 { 1909 char c; 1910 int rc; 1911 1912 printf("No commandline core given, start packet forwarding\n"); 1913 start_packet_forwarding(0); 1914 printf("Press enter to exit\n"); 1915 rc = read(0, &c, 1); 1916 if (rc < 0) 1917 return 1; 1918 } 1919 1920 return 0; 1921 } 1922