1 /*- 2 * BSD LICENSE 3 * 4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of Intel Corporation nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include <stdarg.h> 35 #include <stdio.h> 36 #include <stdlib.h> 37 #include <signal.h> 38 #include <string.h> 39 #include <time.h> 40 #include <fcntl.h> 41 #include <sys/types.h> 42 #include <errno.h> 43 44 #include <sys/queue.h> 45 #include <sys/stat.h> 46 47 #include <stdint.h> 48 #include <unistd.h> 49 #include <inttypes.h> 50 51 #include <rte_common.h> 52 #include <rte_byteorder.h> 53 #include <rte_log.h> 54 #include <rte_debug.h> 55 #include <rte_cycles.h> 56 #include <rte_memory.h> 57 #include <rte_memcpy.h> 58 #include <rte_memzone.h> 59 #include <rte_launch.h> 60 #include <rte_tailq.h> 61 #include <rte_eal.h> 62 #include <rte_per_lcore.h> 63 #include <rte_lcore.h> 64 #include <rte_atomic.h> 65 #include <rte_branch_prediction.h> 66 #include <rte_ring.h> 67 #include <rte_mempool.h> 68 #include <rte_malloc.h> 69 #include <rte_mbuf.h> 70 #include <rte_interrupts.h> 71 #include <rte_pci.h> 72 #include <rte_ether.h> 73 #include <rte_ethdev.h> 74 #include <rte_string_fns.h> 75 #ifdef RTE_LIBRTE_PMD_XENVIRT 76 #include <rte_eth_xenvirt.h> 77 #endif 78 79 #include "testpmd.h" 80 #include "mempool_osdep.h" 81 82 uint16_t verbose_level = 0; /**< Silent by default. */ 83 84 /* use master core for command line ? */ 85 uint8_t interactive = 0; 86 uint8_t auto_start = 0; 87 88 /* 89 * NUMA support configuration. 90 * When set, the NUMA support attempts to dispatch the allocation of the 91 * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the 92 * probed ports among the CPU sockets 0 and 1. 93 * Otherwise, all memory is allocated from CPU socket 0. 94 */ 95 uint8_t numa_support = 0; /**< No numa support by default */ 96 97 /* 98 * In UMA mode,all memory is allocated from socket 0 if --socket-num is 99 * not configured. 100 */ 101 uint8_t socket_num = UMA_NO_CONFIG; 102 103 /* 104 * Use ANONYMOUS mapped memory (might be not physically continuous) for mbufs. 105 */ 106 uint8_t mp_anon = 0; 107 108 /* 109 * Record the Ethernet address of peer target ports to which packets are 110 * forwarded. 111 * Must be instanciated with the ethernet addresses of peer traffic generator 112 * ports. 113 */ 114 struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS]; 115 portid_t nb_peer_eth_addrs = 0; 116 117 /* 118 * Probed Target Environment. 119 */ 120 struct rte_port *ports; /**< For all probed ethernet ports. */ 121 portid_t nb_ports; /**< Number of probed ethernet ports. */ 122 struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */ 123 lcoreid_t nb_lcores; /**< Number of probed logical cores. */ 124 125 /* 126 * Test Forwarding Configuration. 127 * nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores 128 * nb_fwd_ports <= nb_cfg_ports <= nb_ports 129 */ 130 lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */ 131 lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */ 132 portid_t nb_cfg_ports; /**< Number of configured ports. */ 133 portid_t nb_fwd_ports; /**< Number of forwarding ports. */ 134 135 unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */ 136 portid_t fwd_ports_ids[RTE_MAX_ETHPORTS]; /**< Port ids configuration. */ 137 138 struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */ 139 streamid_t nb_fwd_streams; /**< Is equal to (nb_ports * nb_rxq). */ 140 141 /* 142 * Forwarding engines. 143 */ 144 struct fwd_engine * fwd_engines[] = { 145 &io_fwd_engine, 146 &mac_fwd_engine, 147 &mac_retry_fwd_engine, 148 &mac_swap_engine, 149 &flow_gen_engine, 150 &rx_only_engine, 151 &tx_only_engine, 152 &csum_fwd_engine, 153 &icmp_echo_engine, 154 #ifdef RTE_LIBRTE_IEEE1588 155 &ieee1588_fwd_engine, 156 #endif 157 NULL, 158 }; 159 160 struct fwd_config cur_fwd_config; 161 struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */ 162 163 uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */ 164 uint32_t param_total_num_mbufs = 0; /**< number of mbufs in all pools - if 165 * specified on command-line. */ 166 167 /* 168 * Configuration of packet segments used by the "txonly" processing engine. 169 */ 170 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */ 171 uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = { 172 TXONLY_DEF_PACKET_LEN, 173 }; 174 uint8_t tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */ 175 176 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */ 177 uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */ 178 179 /* current configuration is in DCB or not,0 means it is not in DCB mode */ 180 uint8_t dcb_config = 0; 181 182 /* Whether the dcb is in testing status */ 183 uint8_t dcb_test = 0; 184 185 /* DCB on and VT on mapping is default */ 186 enum dcb_queue_mapping_mode dcb_q_mapping = DCB_VT_Q_MAPPING; 187 188 /* 189 * Configurable number of RX/TX queues. 190 */ 191 queueid_t nb_rxq = 1; /**< Number of RX queues per port. */ 192 queueid_t nb_txq = 1; /**< Number of TX queues per port. */ 193 194 /* 195 * Configurable number of RX/TX ring descriptors. 196 */ 197 #define RTE_TEST_RX_DESC_DEFAULT 128 198 #define RTE_TEST_TX_DESC_DEFAULT 512 199 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */ 200 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */ 201 202 /* 203 * Configurable values of RX and TX ring threshold registers. 204 */ 205 #define RX_PTHRESH 8 /**< Default value of RX prefetch threshold register. */ 206 #define RX_HTHRESH 8 /**< Default value of RX host threshold register. */ 207 #define RX_WTHRESH 0 /**< Default value of RX write-back threshold register. */ 208 209 #define TX_PTHRESH 32 /**< Default value of TX prefetch threshold register. */ 210 #define TX_HTHRESH 0 /**< Default value of TX host threshold register. */ 211 #define TX_WTHRESH 0 /**< Default value of TX write-back threshold register. */ 212 213 struct rte_eth_thresh rx_thresh = { 214 .pthresh = RX_PTHRESH, 215 .hthresh = RX_HTHRESH, 216 .wthresh = RX_WTHRESH, 217 }; 218 219 struct rte_eth_thresh tx_thresh = { 220 .pthresh = TX_PTHRESH, 221 .hthresh = TX_HTHRESH, 222 .wthresh = TX_WTHRESH, 223 }; 224 225 /* 226 * Configurable value of RX free threshold. 227 */ 228 uint16_t rx_free_thresh = 32; /* Refill RX descriptors once every 32 packets, 229 This setting is needed for ixgbe to enable bulk alloc or vector 230 receive functionality. */ 231 232 /* 233 * Configurable value of RX drop enable. 234 */ 235 uint8_t rx_drop_en = 0; /* Drop packets when no descriptors for queue. */ 236 237 /* 238 * Configurable value of TX free threshold. 239 */ 240 uint16_t tx_free_thresh = 0; /* Use default values. */ 241 242 /* 243 * Configurable value of TX RS bit threshold. 244 */ 245 uint16_t tx_rs_thresh = 0; /* Use default values. */ 246 247 /* 248 * Configurable value of TX queue flags. 249 */ 250 uint32_t txq_flags = 0; /* No flags set. */ 251 252 /* 253 * Receive Side Scaling (RSS) configuration. 254 */ 255 uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */ 256 257 /* 258 * Port topology configuration 259 */ 260 uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */ 261 262 /* 263 * Avoids to flush all the RX streams before starts forwarding. 264 */ 265 uint8_t no_flush_rx = 0; /* flush by default */ 266 267 /* 268 * Avoids to check link status when starting/stopping a port. 269 */ 270 uint8_t no_link_check = 0; /* check by default */ 271 272 /* 273 * NIC bypass mode configuration options. 274 */ 275 #ifdef RTE_NIC_BYPASS 276 277 /* The NIC bypass watchdog timeout. */ 278 uint32_t bypass_timeout = RTE_BYPASS_TMT_OFF; 279 280 #endif 281 282 /* 283 * Ethernet device configuration. 284 */ 285 struct rte_eth_rxmode rx_mode = { 286 .max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */ 287 .split_hdr_size = 0, 288 .header_split = 0, /**< Header Split disabled. */ 289 .hw_ip_checksum = 0, /**< IP checksum offload disabled. */ 290 .hw_vlan_filter = 1, /**< VLAN filtering enabled. */ 291 .hw_vlan_strip = 1, /**< VLAN strip enabled. */ 292 .hw_vlan_extend = 0, /**< Extended VLAN disabled. */ 293 .jumbo_frame = 0, /**< Jumbo Frame Support disabled. */ 294 .hw_strip_crc = 0, /**< CRC stripping by hardware disabled. */ 295 }; 296 297 struct rte_fdir_conf fdir_conf = { 298 .mode = RTE_FDIR_MODE_NONE, 299 .pballoc = RTE_FDIR_PBALLOC_64K, 300 .status = RTE_FDIR_REPORT_STATUS, 301 .flexbytes_offset = 0x6, 302 .drop_queue = 127, 303 }; 304 305 volatile int test_done = 1; /* stop packet forwarding when set to 1. */ 306 307 struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS]; 308 struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS]; 309 310 struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array; 311 struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array; 312 313 uint16_t nb_tx_queue_stats_mappings = 0; 314 uint16_t nb_rx_queue_stats_mappings = 0; 315 316 /* Forward function declarations */ 317 static void map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port); 318 static void check_all_ports_link_status(uint8_t port_num, uint32_t port_mask); 319 320 /* 321 * Check if all the ports are started. 322 * If yes, return positive value. If not, return zero. 323 */ 324 static int all_ports_started(void); 325 326 /* 327 * Setup default configuration. 328 */ 329 static void 330 set_default_fwd_lcores_config(void) 331 { 332 unsigned int i; 333 unsigned int nb_lc; 334 335 nb_lc = 0; 336 for (i = 0; i < RTE_MAX_LCORE; i++) { 337 if (! rte_lcore_is_enabled(i)) 338 continue; 339 if (i == rte_get_master_lcore()) 340 continue; 341 fwd_lcores_cpuids[nb_lc++] = i; 342 } 343 nb_lcores = (lcoreid_t) nb_lc; 344 nb_cfg_lcores = nb_lcores; 345 nb_fwd_lcores = 1; 346 } 347 348 static void 349 set_def_peer_eth_addrs(void) 350 { 351 portid_t i; 352 353 for (i = 0; i < RTE_MAX_ETHPORTS; i++) { 354 peer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR; 355 peer_eth_addrs[i].addr_bytes[5] = i; 356 } 357 } 358 359 static void 360 set_default_fwd_ports_config(void) 361 { 362 portid_t pt_id; 363 364 for (pt_id = 0; pt_id < nb_ports; pt_id++) 365 fwd_ports_ids[pt_id] = pt_id; 366 367 nb_cfg_ports = nb_ports; 368 nb_fwd_ports = nb_ports; 369 } 370 371 void 372 set_def_fwd_config(void) 373 { 374 set_default_fwd_lcores_config(); 375 set_def_peer_eth_addrs(); 376 set_default_fwd_ports_config(); 377 } 378 379 /* 380 * Configuration initialisation done once at init time. 381 */ 382 struct mbuf_ctor_arg { 383 uint16_t seg_buf_offset; /**< offset of data in data segment of mbuf. */ 384 uint16_t seg_buf_size; /**< size of data segment in mbuf. */ 385 }; 386 387 struct mbuf_pool_ctor_arg { 388 uint16_t seg_buf_size; /**< size of data segment in mbuf. */ 389 }; 390 391 static void 392 testpmd_mbuf_ctor(struct rte_mempool *mp, 393 void *opaque_arg, 394 void *raw_mbuf, 395 __attribute__((unused)) unsigned i) 396 { 397 struct mbuf_ctor_arg *mb_ctor_arg; 398 struct rte_mbuf *mb; 399 400 mb_ctor_arg = (struct mbuf_ctor_arg *) opaque_arg; 401 mb = (struct rte_mbuf *) raw_mbuf; 402 403 mb->pool = mp; 404 mb->buf_addr = (void *) ((char *)mb + mb_ctor_arg->seg_buf_offset); 405 mb->buf_physaddr = (uint64_t) (rte_mempool_virt2phy(mp, mb) + 406 mb_ctor_arg->seg_buf_offset); 407 mb->buf_len = mb_ctor_arg->seg_buf_size; 408 mb->ol_flags = 0; 409 mb->data_off = RTE_PKTMBUF_HEADROOM; 410 mb->nb_segs = 1; 411 mb->l2_l3_len = 0; 412 mb->vlan_tci = 0; 413 mb->hash.rss = 0; 414 } 415 416 static void 417 testpmd_mbuf_pool_ctor(struct rte_mempool *mp, 418 void *opaque_arg) 419 { 420 struct mbuf_pool_ctor_arg *mbp_ctor_arg; 421 struct rte_pktmbuf_pool_private *mbp_priv; 422 423 if (mp->private_data_size < sizeof(struct rte_pktmbuf_pool_private)) { 424 printf("%s(%s) private_data_size %d < %d\n", 425 __func__, mp->name, (int) mp->private_data_size, 426 (int) sizeof(struct rte_pktmbuf_pool_private)); 427 return; 428 } 429 mbp_ctor_arg = (struct mbuf_pool_ctor_arg *) opaque_arg; 430 mbp_priv = rte_mempool_get_priv(mp); 431 mbp_priv->mbuf_data_room_size = mbp_ctor_arg->seg_buf_size; 432 } 433 434 static void 435 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf, 436 unsigned int socket_id) 437 { 438 char pool_name[RTE_MEMPOOL_NAMESIZE]; 439 struct rte_mempool *rte_mp; 440 struct mbuf_pool_ctor_arg mbp_ctor_arg; 441 struct mbuf_ctor_arg mb_ctor_arg; 442 uint32_t mb_size; 443 444 mbp_ctor_arg.seg_buf_size = (uint16_t) (RTE_PKTMBUF_HEADROOM + 445 mbuf_seg_size); 446 mb_ctor_arg.seg_buf_offset = 447 (uint16_t) CACHE_LINE_ROUNDUP(sizeof(struct rte_mbuf)); 448 mb_ctor_arg.seg_buf_size = mbp_ctor_arg.seg_buf_size; 449 mb_size = mb_ctor_arg.seg_buf_offset + mb_ctor_arg.seg_buf_size; 450 mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name)); 451 452 #ifdef RTE_LIBRTE_PMD_XENVIRT 453 rte_mp = rte_mempool_gntalloc_create(pool_name, nb_mbuf, mb_size, 454 (unsigned) mb_mempool_cache, 455 sizeof(struct rte_pktmbuf_pool_private), 456 testpmd_mbuf_pool_ctor, &mbp_ctor_arg, 457 testpmd_mbuf_ctor, &mb_ctor_arg, 458 socket_id, 0); 459 460 461 462 #else 463 if (mp_anon != 0) 464 rte_mp = mempool_anon_create(pool_name, nb_mbuf, mb_size, 465 (unsigned) mb_mempool_cache, 466 sizeof(struct rte_pktmbuf_pool_private), 467 testpmd_mbuf_pool_ctor, &mbp_ctor_arg, 468 testpmd_mbuf_ctor, &mb_ctor_arg, 469 socket_id, 0); 470 else 471 rte_mp = rte_mempool_create(pool_name, nb_mbuf, mb_size, 472 (unsigned) mb_mempool_cache, 473 sizeof(struct rte_pktmbuf_pool_private), 474 testpmd_mbuf_pool_ctor, &mbp_ctor_arg, 475 testpmd_mbuf_ctor, &mb_ctor_arg, 476 socket_id, 0); 477 478 #endif 479 480 if (rte_mp == NULL) { 481 rte_exit(EXIT_FAILURE, "Creation of mbuf pool for socket %u " 482 "failed\n", socket_id); 483 } else if (verbose_level > 0) { 484 rte_mempool_dump(stdout, rte_mp); 485 } 486 } 487 488 /* 489 * Check given socket id is valid or not with NUMA mode, 490 * if valid, return 0, else return -1 491 */ 492 static int 493 check_socket_id(const unsigned int socket_id) 494 { 495 static int warning_once = 0; 496 497 if (socket_id >= MAX_SOCKET) { 498 if (!warning_once && numa_support) 499 printf("Warning: NUMA should be configured manually by" 500 " using --port-numa-config and" 501 " --ring-numa-config parameters along with" 502 " --numa.\n"); 503 warning_once = 1; 504 return -1; 505 } 506 return 0; 507 } 508 509 static void 510 init_config(void) 511 { 512 portid_t pid; 513 struct rte_port *port; 514 struct rte_mempool *mbp; 515 unsigned int nb_mbuf_per_pool; 516 lcoreid_t lc_id; 517 uint8_t port_per_socket[MAX_SOCKET]; 518 519 memset(port_per_socket,0,MAX_SOCKET); 520 /* Configuration of logical cores. */ 521 fwd_lcores = rte_zmalloc("testpmd: fwd_lcores", 522 sizeof(struct fwd_lcore *) * nb_lcores, 523 CACHE_LINE_SIZE); 524 if (fwd_lcores == NULL) { 525 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) " 526 "failed\n", nb_lcores); 527 } 528 for (lc_id = 0; lc_id < nb_lcores; lc_id++) { 529 fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore", 530 sizeof(struct fwd_lcore), 531 CACHE_LINE_SIZE); 532 if (fwd_lcores[lc_id] == NULL) { 533 rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) " 534 "failed\n"); 535 } 536 fwd_lcores[lc_id]->cpuid_idx = lc_id; 537 } 538 539 /* 540 * Create pools of mbuf. 541 * If NUMA support is disabled, create a single pool of mbuf in 542 * socket 0 memory by default. 543 * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1. 544 * 545 * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and 546 * nb_txd can be configured at run time. 547 */ 548 if (param_total_num_mbufs) 549 nb_mbuf_per_pool = param_total_num_mbufs; 550 else { 551 nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX + (nb_lcores * mb_mempool_cache) 552 + RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST; 553 554 if (!numa_support) 555 nb_mbuf_per_pool = (nb_mbuf_per_pool * nb_ports); 556 } 557 558 if (!numa_support) { 559 if (socket_num == UMA_NO_CONFIG) 560 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0); 561 else 562 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 563 socket_num); 564 } 565 566 /* Configuration of Ethernet ports. */ 567 ports = rte_zmalloc("testpmd: ports", 568 sizeof(struct rte_port) * nb_ports, 569 CACHE_LINE_SIZE); 570 if (ports == NULL) { 571 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d struct rte_port) " 572 "failed\n", nb_ports); 573 } 574 575 for (pid = 0; pid < nb_ports; pid++) { 576 port = &ports[pid]; 577 rte_eth_dev_info_get(pid, &port->dev_info); 578 579 if (numa_support) { 580 if (port_numa[pid] != NUMA_NO_CONFIG) 581 port_per_socket[port_numa[pid]]++; 582 else { 583 uint32_t socket_id = rte_eth_dev_socket_id(pid); 584 585 /* if socket_id is invalid, set to 0 */ 586 if (check_socket_id(socket_id) < 0) 587 socket_id = 0; 588 port_per_socket[socket_id]++; 589 } 590 } 591 592 /* set flag to initialize port/queue */ 593 port->need_reconfig = 1; 594 port->need_reconfig_queues = 1; 595 } 596 597 if (numa_support) { 598 uint8_t i; 599 unsigned int nb_mbuf; 600 601 if (param_total_num_mbufs) 602 nb_mbuf_per_pool = nb_mbuf_per_pool/nb_ports; 603 604 for (i = 0; i < MAX_SOCKET; i++) { 605 nb_mbuf = (nb_mbuf_per_pool * 606 port_per_socket[i]); 607 if (nb_mbuf) 608 mbuf_pool_create(mbuf_data_size, 609 nb_mbuf,i); 610 } 611 } 612 init_port_config(); 613 614 /* 615 * Records which Mbuf pool to use by each logical core, if needed. 616 */ 617 for (lc_id = 0; lc_id < nb_lcores; lc_id++) { 618 mbp = mbuf_pool_find( 619 rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id])); 620 621 if (mbp == NULL) 622 mbp = mbuf_pool_find(0); 623 fwd_lcores[lc_id]->mbp = mbp; 624 } 625 626 /* Configuration of packet forwarding streams. */ 627 if (init_fwd_streams() < 0) 628 rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n"); 629 } 630 631 632 void 633 reconfig(portid_t new_port_id) 634 { 635 struct rte_port *port; 636 637 /* Reconfiguration of Ethernet ports. */ 638 ports = rte_realloc(ports, 639 sizeof(struct rte_port) * nb_ports, 640 CACHE_LINE_SIZE); 641 if (ports == NULL) { 642 rte_exit(EXIT_FAILURE, "rte_realloc(%d struct rte_port) failed\n", 643 nb_ports); 644 } 645 646 port = &ports[new_port_id]; 647 rte_eth_dev_info_get(new_port_id, &port->dev_info); 648 649 /* set flag to initialize port/queue */ 650 port->need_reconfig = 1; 651 port->need_reconfig_queues = 1; 652 653 init_port_config(); 654 } 655 656 657 int 658 init_fwd_streams(void) 659 { 660 portid_t pid; 661 struct rte_port *port; 662 streamid_t sm_id, nb_fwd_streams_new; 663 664 /* set socket id according to numa or not */ 665 for (pid = 0; pid < nb_ports; pid++) { 666 port = &ports[pid]; 667 if (nb_rxq > port->dev_info.max_rx_queues) { 668 printf("Fail: nb_rxq(%d) is greater than " 669 "max_rx_queues(%d)\n", nb_rxq, 670 port->dev_info.max_rx_queues); 671 return -1; 672 } 673 if (nb_txq > port->dev_info.max_tx_queues) { 674 printf("Fail: nb_txq(%d) is greater than " 675 "max_tx_queues(%d)\n", nb_txq, 676 port->dev_info.max_tx_queues); 677 return -1; 678 } 679 if (numa_support) { 680 if (port_numa[pid] != NUMA_NO_CONFIG) 681 port->socket_id = port_numa[pid]; 682 else { 683 port->socket_id = rte_eth_dev_socket_id(pid); 684 685 /* if socket_id is invalid, set to 0 */ 686 if (check_socket_id(port->socket_id) < 0) 687 port->socket_id = 0; 688 } 689 } 690 else { 691 if (socket_num == UMA_NO_CONFIG) 692 port->socket_id = 0; 693 else 694 port->socket_id = socket_num; 695 } 696 } 697 698 nb_fwd_streams_new = (streamid_t)(nb_ports * nb_rxq); 699 if (nb_fwd_streams_new == nb_fwd_streams) 700 return 0; 701 /* clear the old */ 702 if (fwd_streams != NULL) { 703 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) { 704 if (fwd_streams[sm_id] == NULL) 705 continue; 706 rte_free(fwd_streams[sm_id]); 707 fwd_streams[sm_id] = NULL; 708 } 709 rte_free(fwd_streams); 710 fwd_streams = NULL; 711 } 712 713 /* init new */ 714 nb_fwd_streams = nb_fwd_streams_new; 715 fwd_streams = rte_zmalloc("testpmd: fwd_streams", 716 sizeof(struct fwd_stream *) * nb_fwd_streams, CACHE_LINE_SIZE); 717 if (fwd_streams == NULL) 718 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_stream *)) " 719 "failed\n", nb_fwd_streams); 720 721 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) { 722 fwd_streams[sm_id] = rte_zmalloc("testpmd: struct fwd_stream", 723 sizeof(struct fwd_stream), CACHE_LINE_SIZE); 724 if (fwd_streams[sm_id] == NULL) 725 rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_stream)" 726 " failed\n"); 727 } 728 729 return 0; 730 } 731 732 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS 733 static void 734 pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs) 735 { 736 unsigned int total_burst; 737 unsigned int nb_burst; 738 unsigned int burst_stats[3]; 739 uint16_t pktnb_stats[3]; 740 uint16_t nb_pkt; 741 int burst_percent[3]; 742 743 /* 744 * First compute the total number of packet bursts and the 745 * two highest numbers of bursts of the same number of packets. 746 */ 747 total_burst = 0; 748 burst_stats[0] = burst_stats[1] = burst_stats[2] = 0; 749 pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0; 750 for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) { 751 nb_burst = pbs->pkt_burst_spread[nb_pkt]; 752 if (nb_burst == 0) 753 continue; 754 total_burst += nb_burst; 755 if (nb_burst > burst_stats[0]) { 756 burst_stats[1] = burst_stats[0]; 757 pktnb_stats[1] = pktnb_stats[0]; 758 burst_stats[0] = nb_burst; 759 pktnb_stats[0] = nb_pkt; 760 } 761 } 762 if (total_burst == 0) 763 return; 764 burst_percent[0] = (burst_stats[0] * 100) / total_burst; 765 printf(" %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst, 766 burst_percent[0], (int) pktnb_stats[0]); 767 if (burst_stats[0] == total_burst) { 768 printf("]\n"); 769 return; 770 } 771 if (burst_stats[0] + burst_stats[1] == total_burst) { 772 printf(" + %d%% of %d pkts]\n", 773 100 - burst_percent[0], pktnb_stats[1]); 774 return; 775 } 776 burst_percent[1] = (burst_stats[1] * 100) / total_burst; 777 burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]); 778 if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) { 779 printf(" + %d%% of others]\n", 100 - burst_percent[0]); 780 return; 781 } 782 printf(" + %d%% of %d pkts + %d%% of others]\n", 783 burst_percent[1], (int) pktnb_stats[1], burst_percent[2]); 784 } 785 #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */ 786 787 static void 788 fwd_port_stats_display(portid_t port_id, struct rte_eth_stats *stats) 789 { 790 struct rte_port *port; 791 uint8_t i; 792 793 static const char *fwd_stats_border = "----------------------"; 794 795 port = &ports[port_id]; 796 printf("\n %s Forward statistics for port %-2d %s\n", 797 fwd_stats_border, port_id, fwd_stats_border); 798 799 if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) { 800 printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: " 801 "%-"PRIu64"\n", 802 stats->ipackets, stats->imissed, 803 (uint64_t) (stats->ipackets + stats->imissed)); 804 805 if (cur_fwd_eng == &csum_fwd_engine) 806 printf(" Bad-ipcsum: %-14"PRIu64" Bad-l4csum: %-14"PRIu64" \n", 807 port->rx_bad_ip_csum, port->rx_bad_l4_csum); 808 if (((stats->ierrors - stats->imissed) + stats->rx_nombuf) > 0) { 809 printf(" RX-badcrc: %-14"PRIu64" RX-badlen: %-14"PRIu64 810 "RX-error: %-"PRIu64"\n", 811 stats->ibadcrc, stats->ibadlen, stats->ierrors); 812 printf(" RX-nombufs: %-14"PRIu64"\n", stats->rx_nombuf); 813 } 814 815 printf(" TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: " 816 "%-"PRIu64"\n", 817 stats->opackets, port->tx_dropped, 818 (uint64_t) (stats->opackets + port->tx_dropped)); 819 } 820 else { 821 printf(" RX-packets: %14"PRIu64" RX-dropped:%14"PRIu64" RX-total:" 822 "%14"PRIu64"\n", 823 stats->ipackets, stats->imissed, 824 (uint64_t) (stats->ipackets + stats->imissed)); 825 826 if (cur_fwd_eng == &csum_fwd_engine) 827 printf(" Bad-ipcsum:%14"PRIu64" Bad-l4csum:%14"PRIu64"\n", 828 port->rx_bad_ip_csum, port->rx_bad_l4_csum); 829 if (((stats->ierrors - stats->imissed) + stats->rx_nombuf) > 0) { 830 printf(" RX-badcrc: %14"PRIu64" RX-badlen: %14"PRIu64 831 " RX-error:%"PRIu64"\n", 832 stats->ibadcrc, stats->ibadlen, stats->ierrors); 833 printf(" RX-nombufs: %14"PRIu64"\n", 834 stats->rx_nombuf); 835 } 836 837 printf(" TX-packets: %14"PRIu64" TX-dropped:%14"PRIu64" TX-total:" 838 "%14"PRIu64"\n", 839 stats->opackets, port->tx_dropped, 840 (uint64_t) (stats->opackets + port->tx_dropped)); 841 } 842 843 /* Display statistics of XON/XOFF pause frames, if any. */ 844 if ((stats->tx_pause_xon | stats->rx_pause_xon | 845 stats->tx_pause_xoff | stats->rx_pause_xoff) > 0) { 846 printf(" RX-XOFF: %-14"PRIu64" RX-XON: %-14"PRIu64"\n", 847 stats->rx_pause_xoff, stats->rx_pause_xon); 848 printf(" TX-XOFF: %-14"PRIu64" TX-XON: %-14"PRIu64"\n", 849 stats->tx_pause_xoff, stats->tx_pause_xon); 850 } 851 852 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS 853 if (port->rx_stream) 854 pkt_burst_stats_display("RX", 855 &port->rx_stream->rx_burst_stats); 856 if (port->tx_stream) 857 pkt_burst_stats_display("TX", 858 &port->tx_stream->tx_burst_stats); 859 #endif 860 /* stats fdir */ 861 if (fdir_conf.mode != RTE_FDIR_MODE_NONE) 862 printf(" Fdirmiss:%14"PRIu64" Fdirmatch:%14"PRIu64"\n", 863 stats->fdirmiss, 864 stats->fdirmatch); 865 866 if (port->rx_queue_stats_mapping_enabled) { 867 printf("\n"); 868 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) { 869 printf(" Stats reg %2d RX-packets:%14"PRIu64 870 " RX-errors:%14"PRIu64 871 " RX-bytes:%14"PRIu64"\n", 872 i, stats->q_ipackets[i], stats->q_errors[i], stats->q_ibytes[i]); 873 } 874 printf("\n"); 875 } 876 if (port->tx_queue_stats_mapping_enabled) { 877 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) { 878 printf(" Stats reg %2d TX-packets:%14"PRIu64 879 " TX-bytes:%14"PRIu64"\n", 880 i, stats->q_opackets[i], stats->q_obytes[i]); 881 } 882 } 883 884 printf(" %s--------------------------------%s\n", 885 fwd_stats_border, fwd_stats_border); 886 } 887 888 static void 889 fwd_stream_stats_display(streamid_t stream_id) 890 { 891 struct fwd_stream *fs; 892 static const char *fwd_top_stats_border = "-------"; 893 894 fs = fwd_streams[stream_id]; 895 if ((fs->rx_packets == 0) && (fs->tx_packets == 0) && 896 (fs->fwd_dropped == 0)) 897 return; 898 printf("\n %s Forward Stats for RX Port=%2d/Queue=%2d -> " 899 "TX Port=%2d/Queue=%2d %s\n", 900 fwd_top_stats_border, fs->rx_port, fs->rx_queue, 901 fs->tx_port, fs->tx_queue, fwd_top_stats_border); 902 printf(" RX-packets: %-14u TX-packets: %-14u TX-dropped: %-14u", 903 fs->rx_packets, fs->tx_packets, fs->fwd_dropped); 904 905 /* if checksum mode */ 906 if (cur_fwd_eng == &csum_fwd_engine) { 907 printf(" RX- bad IP checksum: %-14u Rx- bad L4 checksum: " 908 "%-14u\n", fs->rx_bad_ip_csum, fs->rx_bad_l4_csum); 909 } 910 911 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS 912 pkt_burst_stats_display("RX", &fs->rx_burst_stats); 913 pkt_burst_stats_display("TX", &fs->tx_burst_stats); 914 #endif 915 } 916 917 static void 918 flush_fwd_rx_queues(void) 919 { 920 struct rte_mbuf *pkts_burst[MAX_PKT_BURST]; 921 portid_t rxp; 922 portid_t port_id; 923 queueid_t rxq; 924 uint16_t nb_rx; 925 uint16_t i; 926 uint8_t j; 927 928 for (j = 0; j < 2; j++) { 929 for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) { 930 for (rxq = 0; rxq < nb_rxq; rxq++) { 931 port_id = fwd_ports_ids[rxp]; 932 do { 933 nb_rx = rte_eth_rx_burst(port_id, rxq, 934 pkts_burst, MAX_PKT_BURST); 935 for (i = 0; i < nb_rx; i++) 936 rte_pktmbuf_free(pkts_burst[i]); 937 } while (nb_rx > 0); 938 } 939 } 940 rte_delay_ms(10); /* wait 10 milli-seconds before retrying */ 941 } 942 } 943 944 static void 945 run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd) 946 { 947 struct fwd_stream **fsm; 948 streamid_t nb_fs; 949 streamid_t sm_id; 950 951 fsm = &fwd_streams[fc->stream_idx]; 952 nb_fs = fc->stream_nb; 953 do { 954 for (sm_id = 0; sm_id < nb_fs; sm_id++) 955 (*pkt_fwd)(fsm[sm_id]); 956 } while (! fc->stopped); 957 } 958 959 static int 960 start_pkt_forward_on_core(void *fwd_arg) 961 { 962 run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg, 963 cur_fwd_config.fwd_eng->packet_fwd); 964 return 0; 965 } 966 967 /* 968 * Run the TXONLY packet forwarding engine to send a single burst of packets. 969 * Used to start communication flows in network loopback test configurations. 970 */ 971 static int 972 run_one_txonly_burst_on_core(void *fwd_arg) 973 { 974 struct fwd_lcore *fwd_lc; 975 struct fwd_lcore tmp_lcore; 976 977 fwd_lc = (struct fwd_lcore *) fwd_arg; 978 tmp_lcore = *fwd_lc; 979 tmp_lcore.stopped = 1; 980 run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd); 981 return 0; 982 } 983 984 /* 985 * Launch packet forwarding: 986 * - Setup per-port forwarding context. 987 * - launch logical cores with their forwarding configuration. 988 */ 989 static void 990 launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore) 991 { 992 port_fwd_begin_t port_fwd_begin; 993 unsigned int i; 994 unsigned int lc_id; 995 int diag; 996 997 port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin; 998 if (port_fwd_begin != NULL) { 999 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) 1000 (*port_fwd_begin)(fwd_ports_ids[i]); 1001 } 1002 for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) { 1003 lc_id = fwd_lcores_cpuids[i]; 1004 if ((interactive == 0) || (lc_id != rte_lcore_id())) { 1005 fwd_lcores[i]->stopped = 0; 1006 diag = rte_eal_remote_launch(pkt_fwd_on_lcore, 1007 fwd_lcores[i], lc_id); 1008 if (diag != 0) 1009 printf("launch lcore %u failed - diag=%d\n", 1010 lc_id, diag); 1011 } 1012 } 1013 } 1014 1015 /* 1016 * Launch packet forwarding configuration. 1017 */ 1018 void 1019 start_packet_forwarding(int with_tx_first) 1020 { 1021 port_fwd_begin_t port_fwd_begin; 1022 port_fwd_end_t port_fwd_end; 1023 struct rte_port *port; 1024 unsigned int i; 1025 portid_t pt_id; 1026 streamid_t sm_id; 1027 1028 if (all_ports_started() == 0) { 1029 printf("Not all ports were started\n"); 1030 return; 1031 } 1032 if (test_done == 0) { 1033 printf("Packet forwarding already started\n"); 1034 return; 1035 } 1036 if(dcb_test) { 1037 for (i = 0; i < nb_fwd_ports; i++) { 1038 pt_id = fwd_ports_ids[i]; 1039 port = &ports[pt_id]; 1040 if (!port->dcb_flag) { 1041 printf("In DCB mode, all forwarding ports must " 1042 "be configured in this mode.\n"); 1043 return; 1044 } 1045 } 1046 if (nb_fwd_lcores == 1) { 1047 printf("In DCB mode,the nb forwarding cores " 1048 "should be larger than 1.\n"); 1049 return; 1050 } 1051 } 1052 test_done = 0; 1053 1054 if(!no_flush_rx) 1055 flush_fwd_rx_queues(); 1056 1057 fwd_config_setup(); 1058 rxtx_config_display(); 1059 1060 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) { 1061 pt_id = fwd_ports_ids[i]; 1062 port = &ports[pt_id]; 1063 rte_eth_stats_get(pt_id, &port->stats); 1064 port->tx_dropped = 0; 1065 1066 map_port_queue_stats_mapping_registers(pt_id, port); 1067 } 1068 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) { 1069 fwd_streams[sm_id]->rx_packets = 0; 1070 fwd_streams[sm_id]->tx_packets = 0; 1071 fwd_streams[sm_id]->fwd_dropped = 0; 1072 fwd_streams[sm_id]->rx_bad_ip_csum = 0; 1073 fwd_streams[sm_id]->rx_bad_l4_csum = 0; 1074 1075 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS 1076 memset(&fwd_streams[sm_id]->rx_burst_stats, 0, 1077 sizeof(fwd_streams[sm_id]->rx_burst_stats)); 1078 memset(&fwd_streams[sm_id]->tx_burst_stats, 0, 1079 sizeof(fwd_streams[sm_id]->tx_burst_stats)); 1080 #endif 1081 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES 1082 fwd_streams[sm_id]->core_cycles = 0; 1083 #endif 1084 } 1085 if (with_tx_first) { 1086 port_fwd_begin = tx_only_engine.port_fwd_begin; 1087 if (port_fwd_begin != NULL) { 1088 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) 1089 (*port_fwd_begin)(fwd_ports_ids[i]); 1090 } 1091 launch_packet_forwarding(run_one_txonly_burst_on_core); 1092 rte_eal_mp_wait_lcore(); 1093 port_fwd_end = tx_only_engine.port_fwd_end; 1094 if (port_fwd_end != NULL) { 1095 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) 1096 (*port_fwd_end)(fwd_ports_ids[i]); 1097 } 1098 } 1099 launch_packet_forwarding(start_pkt_forward_on_core); 1100 } 1101 1102 void 1103 stop_packet_forwarding(void) 1104 { 1105 struct rte_eth_stats stats; 1106 struct rte_port *port; 1107 port_fwd_end_t port_fwd_end; 1108 int i; 1109 portid_t pt_id; 1110 streamid_t sm_id; 1111 lcoreid_t lc_id; 1112 uint64_t total_recv; 1113 uint64_t total_xmit; 1114 uint64_t total_rx_dropped; 1115 uint64_t total_tx_dropped; 1116 uint64_t total_rx_nombuf; 1117 uint64_t tx_dropped; 1118 uint64_t rx_bad_ip_csum; 1119 uint64_t rx_bad_l4_csum; 1120 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES 1121 uint64_t fwd_cycles; 1122 #endif 1123 static const char *acc_stats_border = "+++++++++++++++"; 1124 1125 if (all_ports_started() == 0) { 1126 printf("Not all ports were started\n"); 1127 return; 1128 } 1129 if (test_done) { 1130 printf("Packet forwarding not started\n"); 1131 return; 1132 } 1133 printf("Telling cores to stop..."); 1134 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) 1135 fwd_lcores[lc_id]->stopped = 1; 1136 printf("\nWaiting for lcores to finish...\n"); 1137 rte_eal_mp_wait_lcore(); 1138 port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end; 1139 if (port_fwd_end != NULL) { 1140 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) { 1141 pt_id = fwd_ports_ids[i]; 1142 (*port_fwd_end)(pt_id); 1143 } 1144 } 1145 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES 1146 fwd_cycles = 0; 1147 #endif 1148 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) { 1149 if (cur_fwd_config.nb_fwd_streams > 1150 cur_fwd_config.nb_fwd_ports) { 1151 fwd_stream_stats_display(sm_id); 1152 ports[fwd_streams[sm_id]->tx_port].tx_stream = NULL; 1153 ports[fwd_streams[sm_id]->rx_port].rx_stream = NULL; 1154 } else { 1155 ports[fwd_streams[sm_id]->tx_port].tx_stream = 1156 fwd_streams[sm_id]; 1157 ports[fwd_streams[sm_id]->rx_port].rx_stream = 1158 fwd_streams[sm_id]; 1159 } 1160 tx_dropped = ports[fwd_streams[sm_id]->tx_port].tx_dropped; 1161 tx_dropped = (uint64_t) (tx_dropped + 1162 fwd_streams[sm_id]->fwd_dropped); 1163 ports[fwd_streams[sm_id]->tx_port].tx_dropped = tx_dropped; 1164 1165 rx_bad_ip_csum = 1166 ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum; 1167 rx_bad_ip_csum = (uint64_t) (rx_bad_ip_csum + 1168 fwd_streams[sm_id]->rx_bad_ip_csum); 1169 ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum = 1170 rx_bad_ip_csum; 1171 1172 rx_bad_l4_csum = 1173 ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum; 1174 rx_bad_l4_csum = (uint64_t) (rx_bad_l4_csum + 1175 fwd_streams[sm_id]->rx_bad_l4_csum); 1176 ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum = 1177 rx_bad_l4_csum; 1178 1179 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES 1180 fwd_cycles = (uint64_t) (fwd_cycles + 1181 fwd_streams[sm_id]->core_cycles); 1182 #endif 1183 } 1184 total_recv = 0; 1185 total_xmit = 0; 1186 total_rx_dropped = 0; 1187 total_tx_dropped = 0; 1188 total_rx_nombuf = 0; 1189 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) { 1190 pt_id = fwd_ports_ids[i]; 1191 1192 port = &ports[pt_id]; 1193 rte_eth_stats_get(pt_id, &stats); 1194 stats.ipackets -= port->stats.ipackets; 1195 port->stats.ipackets = 0; 1196 stats.opackets -= port->stats.opackets; 1197 port->stats.opackets = 0; 1198 stats.ibytes -= port->stats.ibytes; 1199 port->stats.ibytes = 0; 1200 stats.obytes -= port->stats.obytes; 1201 port->stats.obytes = 0; 1202 stats.imissed -= port->stats.imissed; 1203 port->stats.imissed = 0; 1204 stats.oerrors -= port->stats.oerrors; 1205 port->stats.oerrors = 0; 1206 stats.rx_nombuf -= port->stats.rx_nombuf; 1207 port->stats.rx_nombuf = 0; 1208 stats.fdirmatch -= port->stats.fdirmatch; 1209 port->stats.rx_nombuf = 0; 1210 stats.fdirmiss -= port->stats.fdirmiss; 1211 port->stats.rx_nombuf = 0; 1212 1213 total_recv += stats.ipackets; 1214 total_xmit += stats.opackets; 1215 total_rx_dropped += stats.imissed; 1216 total_tx_dropped += port->tx_dropped; 1217 total_rx_nombuf += stats.rx_nombuf; 1218 1219 fwd_port_stats_display(pt_id, &stats); 1220 } 1221 printf("\n %s Accumulated forward statistics for all ports" 1222 "%s\n", 1223 acc_stats_border, acc_stats_border); 1224 printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: " 1225 "%-"PRIu64"\n" 1226 " TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: " 1227 "%-"PRIu64"\n", 1228 total_recv, total_rx_dropped, total_recv + total_rx_dropped, 1229 total_xmit, total_tx_dropped, total_xmit + total_tx_dropped); 1230 if (total_rx_nombuf > 0) 1231 printf(" RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf); 1232 printf(" %s++++++++++++++++++++++++++++++++++++++++++++++" 1233 "%s\n", 1234 acc_stats_border, acc_stats_border); 1235 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES 1236 if (total_recv > 0) 1237 printf("\n CPU cycles/packet=%u (total cycles=" 1238 "%"PRIu64" / total RX packets=%"PRIu64")\n", 1239 (unsigned int)(fwd_cycles / total_recv), 1240 fwd_cycles, total_recv); 1241 #endif 1242 printf("\nDone.\n"); 1243 test_done = 1; 1244 } 1245 1246 void 1247 dev_set_link_up(portid_t pid) 1248 { 1249 if (rte_eth_dev_set_link_up((uint8_t)pid) < 0) 1250 printf("\nSet link up fail.\n"); 1251 } 1252 1253 void 1254 dev_set_link_down(portid_t pid) 1255 { 1256 if (rte_eth_dev_set_link_down((uint8_t)pid) < 0) 1257 printf("\nSet link down fail.\n"); 1258 } 1259 1260 static int 1261 all_ports_started(void) 1262 { 1263 portid_t pi; 1264 struct rte_port *port; 1265 1266 for (pi = 0; pi < nb_ports; pi++) { 1267 port = &ports[pi]; 1268 /* Check if there is a port which is not started */ 1269 if (port->port_status != RTE_PORT_STARTED) 1270 return 0; 1271 } 1272 1273 /* No port is not started */ 1274 return 1; 1275 } 1276 1277 int 1278 start_port(portid_t pid) 1279 { 1280 int diag, need_check_link_status = 0; 1281 portid_t pi; 1282 queueid_t qi; 1283 struct rte_port *port; 1284 struct ether_addr mac_addr; 1285 1286 if (test_done == 0) { 1287 printf("Please stop forwarding first\n"); 1288 return -1; 1289 } 1290 1291 if (init_fwd_streams() < 0) { 1292 printf("Fail from init_fwd_streams()\n"); 1293 return -1; 1294 } 1295 1296 if(dcb_config) 1297 dcb_test = 1; 1298 for (pi = 0; pi < nb_ports; pi++) { 1299 if (pid < nb_ports && pid != pi) 1300 continue; 1301 1302 port = &ports[pi]; 1303 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED, 1304 RTE_PORT_HANDLING) == 0) { 1305 printf("Port %d is now not stopped\n", pi); 1306 continue; 1307 } 1308 1309 if (port->need_reconfig > 0) { 1310 port->need_reconfig = 0; 1311 1312 printf("Configuring Port %d (socket %u)\n", pi, 1313 port->socket_id); 1314 /* configure port */ 1315 diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq, 1316 &(port->dev_conf)); 1317 if (diag != 0) { 1318 if (rte_atomic16_cmpset(&(port->port_status), 1319 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0) 1320 printf("Port %d can not be set back " 1321 "to stopped\n", pi); 1322 printf("Fail to configure port %d\n", pi); 1323 /* try to reconfigure port next time */ 1324 port->need_reconfig = 1; 1325 return -1; 1326 } 1327 } 1328 if (port->need_reconfig_queues > 0) { 1329 port->need_reconfig_queues = 0; 1330 /* setup tx queues */ 1331 for (qi = 0; qi < nb_txq; qi++) { 1332 if ((numa_support) && 1333 (txring_numa[pi] != NUMA_NO_CONFIG)) 1334 diag = rte_eth_tx_queue_setup(pi, qi, 1335 nb_txd,txring_numa[pi], 1336 &(port->tx_conf)); 1337 else 1338 diag = rte_eth_tx_queue_setup(pi, qi, 1339 nb_txd,port->socket_id, 1340 &(port->tx_conf)); 1341 1342 if (diag == 0) 1343 continue; 1344 1345 /* Fail to setup tx queue, return */ 1346 if (rte_atomic16_cmpset(&(port->port_status), 1347 RTE_PORT_HANDLING, 1348 RTE_PORT_STOPPED) == 0) 1349 printf("Port %d can not be set back " 1350 "to stopped\n", pi); 1351 printf("Fail to configure port %d tx queues\n", pi); 1352 /* try to reconfigure queues next time */ 1353 port->need_reconfig_queues = 1; 1354 return -1; 1355 } 1356 /* setup rx queues */ 1357 for (qi = 0; qi < nb_rxq; qi++) { 1358 if ((numa_support) && 1359 (rxring_numa[pi] != NUMA_NO_CONFIG)) { 1360 struct rte_mempool * mp = 1361 mbuf_pool_find(rxring_numa[pi]); 1362 if (mp == NULL) { 1363 printf("Failed to setup RX queue:" 1364 "No mempool allocation" 1365 "on the socket %d\n", 1366 rxring_numa[pi]); 1367 return -1; 1368 } 1369 1370 diag = rte_eth_rx_queue_setup(pi, qi, 1371 nb_rxd,rxring_numa[pi], 1372 &(port->rx_conf),mp); 1373 } 1374 else 1375 diag = rte_eth_rx_queue_setup(pi, qi, 1376 nb_rxd,port->socket_id, 1377 &(port->rx_conf), 1378 mbuf_pool_find(port->socket_id)); 1379 1380 if (diag == 0) 1381 continue; 1382 1383 1384 /* Fail to setup rx queue, return */ 1385 if (rte_atomic16_cmpset(&(port->port_status), 1386 RTE_PORT_HANDLING, 1387 RTE_PORT_STOPPED) == 0) 1388 printf("Port %d can not be set back " 1389 "to stopped\n", pi); 1390 printf("Fail to configure port %d rx queues\n", pi); 1391 /* try to reconfigure queues next time */ 1392 port->need_reconfig_queues = 1; 1393 return -1; 1394 } 1395 } 1396 /* start port */ 1397 if (rte_eth_dev_start(pi) < 0) { 1398 printf("Fail to start port %d\n", pi); 1399 1400 /* Fail to setup rx queue, return */ 1401 if (rte_atomic16_cmpset(&(port->port_status), 1402 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0) 1403 printf("Port %d can not be set back to " 1404 "stopped\n", pi); 1405 continue; 1406 } 1407 1408 if (rte_atomic16_cmpset(&(port->port_status), 1409 RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0) 1410 printf("Port %d can not be set into started\n", pi); 1411 1412 rte_eth_macaddr_get(pi, &mac_addr); 1413 printf("Port %d: %02X:%02X:%02X:%02X:%02X:%02X\n", pi, 1414 mac_addr.addr_bytes[0], mac_addr.addr_bytes[1], 1415 mac_addr.addr_bytes[2], mac_addr.addr_bytes[3], 1416 mac_addr.addr_bytes[4], mac_addr.addr_bytes[5]); 1417 1418 /* at least one port started, need checking link status */ 1419 need_check_link_status = 1; 1420 } 1421 1422 if (need_check_link_status && !no_link_check) 1423 check_all_ports_link_status(nb_ports, RTE_PORT_ALL); 1424 else 1425 printf("Please stop the ports first\n"); 1426 1427 printf("Done\n"); 1428 return 0; 1429 } 1430 1431 void 1432 stop_port(portid_t pid) 1433 { 1434 portid_t pi; 1435 struct rte_port *port; 1436 int need_check_link_status = 0; 1437 1438 if (test_done == 0) { 1439 printf("Please stop forwarding first\n"); 1440 return; 1441 } 1442 if (dcb_test) { 1443 dcb_test = 0; 1444 dcb_config = 0; 1445 } 1446 printf("Stopping ports...\n"); 1447 1448 for (pi = 0; pi < nb_ports; pi++) { 1449 if (pid < nb_ports && pid != pi) 1450 continue; 1451 1452 port = &ports[pi]; 1453 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED, 1454 RTE_PORT_HANDLING) == 0) 1455 continue; 1456 1457 rte_eth_dev_stop(pi); 1458 1459 if (rte_atomic16_cmpset(&(port->port_status), 1460 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0) 1461 printf("Port %d can not be set into stopped\n", pi); 1462 need_check_link_status = 1; 1463 } 1464 if (need_check_link_status && !no_link_check) 1465 check_all_ports_link_status(nb_ports, RTE_PORT_ALL); 1466 1467 printf("Done\n"); 1468 } 1469 1470 void 1471 close_port(portid_t pid) 1472 { 1473 portid_t pi; 1474 struct rte_port *port; 1475 1476 if (test_done == 0) { 1477 printf("Please stop forwarding first\n"); 1478 return; 1479 } 1480 1481 printf("Closing ports...\n"); 1482 1483 for (pi = 0; pi < nb_ports; pi++) { 1484 if (pid < nb_ports && pid != pi) 1485 continue; 1486 1487 port = &ports[pi]; 1488 if (rte_atomic16_cmpset(&(port->port_status), 1489 RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) { 1490 printf("Port %d is now not stopped\n", pi); 1491 continue; 1492 } 1493 1494 rte_eth_dev_close(pi); 1495 1496 if (rte_atomic16_cmpset(&(port->port_status), 1497 RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0) 1498 printf("Port %d can not be set into stopped\n", pi); 1499 } 1500 1501 printf("Done\n"); 1502 } 1503 1504 int 1505 all_ports_stopped(void) 1506 { 1507 portid_t pi; 1508 struct rte_port *port; 1509 1510 for (pi = 0; pi < nb_ports; pi++) { 1511 port = &ports[pi]; 1512 if (port->port_status != RTE_PORT_STOPPED) 1513 return 0; 1514 } 1515 1516 return 1; 1517 } 1518 1519 int 1520 port_is_started(portid_t port_id) 1521 { 1522 if (port_id_is_invalid(port_id)) 1523 return -1; 1524 1525 if (ports[port_id].port_status != RTE_PORT_STARTED) 1526 return 0; 1527 1528 return 1; 1529 } 1530 1531 void 1532 pmd_test_exit(void) 1533 { 1534 portid_t pt_id; 1535 1536 for (pt_id = 0; pt_id < nb_ports; pt_id++) { 1537 printf("Stopping port %d...", pt_id); 1538 fflush(stdout); 1539 rte_eth_dev_close(pt_id); 1540 printf("done\n"); 1541 } 1542 printf("bye...\n"); 1543 } 1544 1545 typedef void (*cmd_func_t)(void); 1546 struct pmd_test_command { 1547 const char *cmd_name; 1548 cmd_func_t cmd_func; 1549 }; 1550 1551 #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0])) 1552 1553 /* Check the link status of all ports in up to 9s, and print them finally */ 1554 static void 1555 check_all_ports_link_status(uint8_t port_num, uint32_t port_mask) 1556 { 1557 #define CHECK_INTERVAL 100 /* 100ms */ 1558 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */ 1559 uint8_t portid, count, all_ports_up, print_flag = 0; 1560 struct rte_eth_link link; 1561 1562 printf("Checking link statuses...\n"); 1563 fflush(stdout); 1564 for (count = 0; count <= MAX_CHECK_TIME; count++) { 1565 all_ports_up = 1; 1566 for (portid = 0; portid < port_num; portid++) { 1567 if ((port_mask & (1 << portid)) == 0) 1568 continue; 1569 memset(&link, 0, sizeof(link)); 1570 rte_eth_link_get_nowait(portid, &link); 1571 /* print link status if flag set */ 1572 if (print_flag == 1) { 1573 if (link.link_status) 1574 printf("Port %d Link Up - speed %u " 1575 "Mbps - %s\n", (uint8_t)portid, 1576 (unsigned)link.link_speed, 1577 (link.link_duplex == ETH_LINK_FULL_DUPLEX) ? 1578 ("full-duplex") : ("half-duplex\n")); 1579 else 1580 printf("Port %d Link Down\n", 1581 (uint8_t)portid); 1582 continue; 1583 } 1584 /* clear all_ports_up flag if any link down */ 1585 if (link.link_status == 0) { 1586 all_ports_up = 0; 1587 break; 1588 } 1589 } 1590 /* after finally printing all link status, get out */ 1591 if (print_flag == 1) 1592 break; 1593 1594 if (all_ports_up == 0) { 1595 fflush(stdout); 1596 rte_delay_ms(CHECK_INTERVAL); 1597 } 1598 1599 /* set the print_flag if all ports up or timeout */ 1600 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) { 1601 print_flag = 1; 1602 } 1603 } 1604 } 1605 1606 static int 1607 set_tx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port) 1608 { 1609 uint16_t i; 1610 int diag; 1611 uint8_t mapping_found = 0; 1612 1613 for (i = 0; i < nb_tx_queue_stats_mappings; i++) { 1614 if ((tx_queue_stats_mappings[i].port_id == port_id) && 1615 (tx_queue_stats_mappings[i].queue_id < nb_txq )) { 1616 diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id, 1617 tx_queue_stats_mappings[i].queue_id, 1618 tx_queue_stats_mappings[i].stats_counter_id); 1619 if (diag != 0) 1620 return diag; 1621 mapping_found = 1; 1622 } 1623 } 1624 if (mapping_found) 1625 port->tx_queue_stats_mapping_enabled = 1; 1626 return 0; 1627 } 1628 1629 static int 1630 set_rx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port) 1631 { 1632 uint16_t i; 1633 int diag; 1634 uint8_t mapping_found = 0; 1635 1636 for (i = 0; i < nb_rx_queue_stats_mappings; i++) { 1637 if ((rx_queue_stats_mappings[i].port_id == port_id) && 1638 (rx_queue_stats_mappings[i].queue_id < nb_rxq )) { 1639 diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id, 1640 rx_queue_stats_mappings[i].queue_id, 1641 rx_queue_stats_mappings[i].stats_counter_id); 1642 if (diag != 0) 1643 return diag; 1644 mapping_found = 1; 1645 } 1646 } 1647 if (mapping_found) 1648 port->rx_queue_stats_mapping_enabled = 1; 1649 return 0; 1650 } 1651 1652 static void 1653 map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port) 1654 { 1655 int diag = 0; 1656 1657 diag = set_tx_queue_stats_mapping_registers(pi, port); 1658 if (diag != 0) { 1659 if (diag == -ENOTSUP) { 1660 port->tx_queue_stats_mapping_enabled = 0; 1661 printf("TX queue stats mapping not supported port id=%d\n", pi); 1662 } 1663 else 1664 rte_exit(EXIT_FAILURE, 1665 "set_tx_queue_stats_mapping_registers " 1666 "failed for port id=%d diag=%d\n", 1667 pi, diag); 1668 } 1669 1670 diag = set_rx_queue_stats_mapping_registers(pi, port); 1671 if (diag != 0) { 1672 if (diag == -ENOTSUP) { 1673 port->rx_queue_stats_mapping_enabled = 0; 1674 printf("RX queue stats mapping not supported port id=%d\n", pi); 1675 } 1676 else 1677 rte_exit(EXIT_FAILURE, 1678 "set_rx_queue_stats_mapping_registers " 1679 "failed for port id=%d diag=%d\n", 1680 pi, diag); 1681 } 1682 } 1683 1684 void 1685 init_port_config(void) 1686 { 1687 portid_t pid; 1688 struct rte_port *port; 1689 1690 for (pid = 0; pid < nb_ports; pid++) { 1691 port = &ports[pid]; 1692 port->dev_conf.rxmode = rx_mode; 1693 port->dev_conf.fdir_conf = fdir_conf; 1694 if (nb_rxq > 1) { 1695 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL; 1696 port->dev_conf.rx_adv_conf.rss_conf.rss_hf = rss_hf; 1697 } else { 1698 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL; 1699 port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0; 1700 } 1701 1702 /* In SR-IOV mode, RSS mode is not available */ 1703 if (port->dcb_flag == 0 && port->dev_info.max_vfs == 0) { 1704 if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0) 1705 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_RSS; 1706 else 1707 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE; 1708 } 1709 1710 port->rx_conf.rx_thresh = rx_thresh; 1711 port->rx_conf.rx_free_thresh = rx_free_thresh; 1712 port->rx_conf.rx_drop_en = rx_drop_en; 1713 port->tx_conf.tx_thresh = tx_thresh; 1714 port->tx_conf.tx_rs_thresh = tx_rs_thresh; 1715 port->tx_conf.tx_free_thresh = tx_free_thresh; 1716 port->tx_conf.txq_flags = txq_flags; 1717 1718 rte_eth_macaddr_get(pid, &port->eth_addr); 1719 1720 map_port_queue_stats_mapping_registers(pid, port); 1721 #ifdef RTE_NIC_BYPASS 1722 rte_eth_dev_bypass_init(pid); 1723 #endif 1724 } 1725 } 1726 1727 const uint16_t vlan_tags[] = { 1728 0, 1, 2, 3, 4, 5, 6, 7, 1729 8, 9, 10, 11, 12, 13, 14, 15, 1730 16, 17, 18, 19, 20, 21, 22, 23, 1731 24, 25, 26, 27, 28, 29, 30, 31 1732 }; 1733 1734 static int 1735 get_eth_dcb_conf(struct rte_eth_conf *eth_conf, struct dcb_config *dcb_conf) 1736 { 1737 uint8_t i; 1738 1739 /* 1740 * Builds up the correct configuration for dcb+vt based on the vlan tags array 1741 * given above, and the number of traffic classes available for use. 1742 */ 1743 if (dcb_conf->dcb_mode == DCB_VT_ENABLED) { 1744 struct rte_eth_vmdq_dcb_conf vmdq_rx_conf; 1745 struct rte_eth_vmdq_dcb_tx_conf vmdq_tx_conf; 1746 1747 /* VMDQ+DCB RX and TX configrations */ 1748 vmdq_rx_conf.enable_default_pool = 0; 1749 vmdq_rx_conf.default_pool = 0; 1750 vmdq_rx_conf.nb_queue_pools = 1751 (dcb_conf->num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS); 1752 vmdq_tx_conf.nb_queue_pools = 1753 (dcb_conf->num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS); 1754 1755 vmdq_rx_conf.nb_pool_maps = sizeof( vlan_tags )/sizeof( vlan_tags[ 0 ]); 1756 for (i = 0; i < vmdq_rx_conf.nb_pool_maps; i++) { 1757 vmdq_rx_conf.pool_map[i].vlan_id = vlan_tags[ i ]; 1758 vmdq_rx_conf.pool_map[i].pools = 1 << (i % vmdq_rx_conf.nb_queue_pools); 1759 } 1760 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) { 1761 vmdq_rx_conf.dcb_queue[i] = i; 1762 vmdq_tx_conf.dcb_queue[i] = i; 1763 } 1764 1765 /*set DCB mode of RX and TX of multiple queues*/ 1766 eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB; 1767 eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB; 1768 if (dcb_conf->pfc_en) 1769 eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT|ETH_DCB_PFC_SUPPORT; 1770 else 1771 eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT; 1772 1773 (void)(rte_memcpy(ð_conf->rx_adv_conf.vmdq_dcb_conf, &vmdq_rx_conf, 1774 sizeof(struct rte_eth_vmdq_dcb_conf))); 1775 (void)(rte_memcpy(ð_conf->tx_adv_conf.vmdq_dcb_tx_conf, &vmdq_tx_conf, 1776 sizeof(struct rte_eth_vmdq_dcb_tx_conf))); 1777 } 1778 else { 1779 struct rte_eth_dcb_rx_conf rx_conf; 1780 struct rte_eth_dcb_tx_conf tx_conf; 1781 1782 /* queue mapping configuration of DCB RX and TX */ 1783 if (dcb_conf->num_tcs == ETH_4_TCS) 1784 dcb_q_mapping = DCB_4_TCS_Q_MAPPING; 1785 else 1786 dcb_q_mapping = DCB_8_TCS_Q_MAPPING; 1787 1788 rx_conf.nb_tcs = dcb_conf->num_tcs; 1789 tx_conf.nb_tcs = dcb_conf->num_tcs; 1790 1791 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++){ 1792 rx_conf.dcb_queue[i] = i; 1793 tx_conf.dcb_queue[i] = i; 1794 } 1795 eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB; 1796 eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB; 1797 if (dcb_conf->pfc_en) 1798 eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT|ETH_DCB_PFC_SUPPORT; 1799 else 1800 eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT; 1801 1802 (void)(rte_memcpy(ð_conf->rx_adv_conf.dcb_rx_conf, &rx_conf, 1803 sizeof(struct rte_eth_dcb_rx_conf))); 1804 (void)(rte_memcpy(ð_conf->tx_adv_conf.dcb_tx_conf, &tx_conf, 1805 sizeof(struct rte_eth_dcb_tx_conf))); 1806 } 1807 1808 return 0; 1809 } 1810 1811 int 1812 init_port_dcb_config(portid_t pid,struct dcb_config *dcb_conf) 1813 { 1814 struct rte_eth_conf port_conf; 1815 struct rte_port *rte_port; 1816 int retval; 1817 uint16_t nb_vlan; 1818 uint16_t i; 1819 1820 /* rxq and txq configuration in dcb mode */ 1821 nb_rxq = 128; 1822 nb_txq = 128; 1823 rx_free_thresh = 64; 1824 1825 memset(&port_conf,0,sizeof(struct rte_eth_conf)); 1826 /* Enter DCB configuration status */ 1827 dcb_config = 1; 1828 1829 nb_vlan = sizeof( vlan_tags )/sizeof( vlan_tags[ 0 ]); 1830 /*set configuration of DCB in vt mode and DCB in non-vt mode*/ 1831 retval = get_eth_dcb_conf(&port_conf, dcb_conf); 1832 if (retval < 0) 1833 return retval; 1834 1835 rte_port = &ports[pid]; 1836 memcpy(&rte_port->dev_conf, &port_conf,sizeof(struct rte_eth_conf)); 1837 1838 rte_port->rx_conf.rx_thresh = rx_thresh; 1839 rte_port->rx_conf.rx_free_thresh = rx_free_thresh; 1840 rte_port->tx_conf.tx_thresh = tx_thresh; 1841 rte_port->tx_conf.tx_rs_thresh = tx_rs_thresh; 1842 rte_port->tx_conf.tx_free_thresh = tx_free_thresh; 1843 /* VLAN filter */ 1844 rte_port->dev_conf.rxmode.hw_vlan_filter = 1; 1845 for (i = 0; i < nb_vlan; i++){ 1846 rx_vft_set(pid, vlan_tags[i], 1); 1847 } 1848 1849 rte_eth_macaddr_get(pid, &rte_port->eth_addr); 1850 map_port_queue_stats_mapping_registers(pid, rte_port); 1851 1852 rte_port->dcb_flag = 1; 1853 1854 return 0; 1855 } 1856 1857 #ifdef RTE_EXEC_ENV_BAREMETAL 1858 #define main _main 1859 #endif 1860 1861 int 1862 main(int argc, char** argv) 1863 { 1864 int diag; 1865 uint8_t port_id; 1866 1867 diag = rte_eal_init(argc, argv); 1868 if (diag < 0) 1869 rte_panic("Cannot init EAL\n"); 1870 1871 nb_ports = (portid_t) rte_eth_dev_count(); 1872 if (nb_ports == 0) 1873 rte_exit(EXIT_FAILURE, "No probed ethernet devices - " 1874 "check that " 1875 "CONFIG_RTE_LIBRTE_IGB_PMD=y and that " 1876 "CONFIG_RTE_LIBRTE_EM_PMD=y and that " 1877 "CONFIG_RTE_LIBRTE_IXGBE_PMD=y in your " 1878 "configuration file\n"); 1879 1880 set_def_fwd_config(); 1881 if (nb_lcores == 0) 1882 rte_panic("Empty set of forwarding logical cores - check the " 1883 "core mask supplied in the command parameters\n"); 1884 1885 argc -= diag; 1886 argv += diag; 1887 if (argc > 1) 1888 launch_args_parse(argc, argv); 1889 1890 if (nb_rxq > nb_txq) 1891 printf("Warning: nb_rxq=%d enables RSS configuration, " 1892 "but nb_txq=%d will prevent to fully test it.\n", 1893 nb_rxq, nb_txq); 1894 1895 init_config(); 1896 if (start_port(RTE_PORT_ALL) != 0) 1897 rte_exit(EXIT_FAILURE, "Start ports failed\n"); 1898 1899 /* set all ports to promiscuous mode by default */ 1900 for (port_id = 0; port_id < nb_ports; port_id++) 1901 rte_eth_promiscuous_enable(port_id); 1902 1903 #ifdef RTE_LIBRTE_CMDLINE 1904 if (interactive == 1) { 1905 if (auto_start) { 1906 printf("Start automatic packet forwarding\n"); 1907 start_packet_forwarding(0); 1908 } 1909 prompt(); 1910 } else 1911 #endif 1912 { 1913 char c; 1914 int rc; 1915 1916 printf("No commandline core given, start packet forwarding\n"); 1917 start_packet_forwarding(0); 1918 printf("Press enter to exit\n"); 1919 rc = read(0, &c, 1); 1920 if (rc < 0) 1921 return 1; 1922 } 1923 1924 return 0; 1925 } 1926