1 /*- 2 * BSD LICENSE 3 * 4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of Intel Corporation nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include <stdarg.h> 35 #include <stdio.h> 36 #include <stdlib.h> 37 #include <signal.h> 38 #include <string.h> 39 #include <time.h> 40 #include <fcntl.h> 41 #include <sys/types.h> 42 #include <errno.h> 43 44 #include <sys/queue.h> 45 #include <sys/stat.h> 46 47 #include <stdint.h> 48 #include <unistd.h> 49 #include <inttypes.h> 50 51 #include <rte_common.h> 52 #include <rte_byteorder.h> 53 #include <rte_log.h> 54 #include <rte_debug.h> 55 #include <rte_cycles.h> 56 #include <rte_memory.h> 57 #include <rte_memcpy.h> 58 #include <rte_memzone.h> 59 #include <rte_launch.h> 60 #include <rte_tailq.h> 61 #include <rte_eal.h> 62 #include <rte_per_lcore.h> 63 #include <rte_lcore.h> 64 #include <rte_atomic.h> 65 #include <rte_branch_prediction.h> 66 #include <rte_ring.h> 67 #include <rte_mempool.h> 68 #include <rte_malloc.h> 69 #include <rte_mbuf.h> 70 #include <rte_interrupts.h> 71 #include <rte_pci.h> 72 #include <rte_ether.h> 73 #include <rte_ethdev.h> 74 #include <rte_string_fns.h> 75 #ifdef RTE_LIBRTE_PMD_XENVIRT 76 #include <rte_eth_xenvirt.h> 77 #endif 78 79 #include "testpmd.h" 80 #include "mempool_osdep.h" 81 82 uint16_t verbose_level = 0; /**< Silent by default. */ 83 84 /* use master core for command line ? */ 85 uint8_t interactive = 0; 86 uint8_t auto_start = 0; 87 88 /* 89 * NUMA support configuration. 90 * When set, the NUMA support attempts to dispatch the allocation of the 91 * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the 92 * probed ports among the CPU sockets 0 and 1. 93 * Otherwise, all memory is allocated from CPU socket 0. 94 */ 95 uint8_t numa_support = 0; /**< No numa support by default */ 96 97 /* 98 * In UMA mode,all memory is allocated from socket 0 if --socket-num is 99 * not configured. 100 */ 101 uint8_t socket_num = UMA_NO_CONFIG; 102 103 /* 104 * Use ANONYMOUS mapped memory (might be not physically continuous) for mbufs. 105 */ 106 uint8_t mp_anon = 0; 107 108 /* 109 * Record the Ethernet address of peer target ports to which packets are 110 * forwarded. 111 * Must be instanciated with the ethernet addresses of peer traffic generator 112 * ports. 113 */ 114 struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS]; 115 portid_t nb_peer_eth_addrs = 0; 116 117 /* 118 * Probed Target Environment. 119 */ 120 struct rte_port *ports; /**< For all probed ethernet ports. */ 121 portid_t nb_ports; /**< Number of probed ethernet ports. */ 122 struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */ 123 lcoreid_t nb_lcores; /**< Number of probed logical cores. */ 124 125 /* 126 * Test Forwarding Configuration. 127 * nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores 128 * nb_fwd_ports <= nb_cfg_ports <= nb_ports 129 */ 130 lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */ 131 lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */ 132 portid_t nb_cfg_ports; /**< Number of configured ports. */ 133 portid_t nb_fwd_ports; /**< Number of forwarding ports. */ 134 135 unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */ 136 portid_t fwd_ports_ids[RTE_MAX_ETHPORTS]; /**< Port ids configuration. */ 137 138 struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */ 139 streamid_t nb_fwd_streams; /**< Is equal to (nb_ports * nb_rxq). */ 140 141 /* 142 * Forwarding engines. 143 */ 144 struct fwd_engine * fwd_engines[] = { 145 &io_fwd_engine, 146 &mac_fwd_engine, 147 &mac_retry_fwd_engine, 148 &mac_swap_engine, 149 &flow_gen_engine, 150 &rx_only_engine, 151 &tx_only_engine, 152 &csum_fwd_engine, 153 &icmp_echo_engine, 154 #ifdef RTE_LIBRTE_IEEE1588 155 &ieee1588_fwd_engine, 156 #endif 157 NULL, 158 }; 159 160 struct fwd_config cur_fwd_config; 161 struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */ 162 163 uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */ 164 uint32_t param_total_num_mbufs = 0; /**< number of mbufs in all pools - if 165 * specified on command-line. */ 166 167 /* 168 * Configuration of packet segments used by the "txonly" processing engine. 169 */ 170 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */ 171 uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = { 172 TXONLY_DEF_PACKET_LEN, 173 }; 174 uint8_t tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */ 175 176 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */ 177 uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */ 178 179 /* current configuration is in DCB or not,0 means it is not in DCB mode */ 180 uint8_t dcb_config = 0; 181 182 /* Whether the dcb is in testing status */ 183 uint8_t dcb_test = 0; 184 185 /* DCB on and VT on mapping is default */ 186 enum dcb_queue_mapping_mode dcb_q_mapping = DCB_VT_Q_MAPPING; 187 188 /* 189 * Configurable number of RX/TX queues. 190 */ 191 queueid_t nb_rxq = 1; /**< Number of RX queues per port. */ 192 queueid_t nb_txq = 1; /**< Number of TX queues per port. */ 193 194 /* 195 * Configurable number of RX/TX ring descriptors. 196 */ 197 #define RTE_TEST_RX_DESC_DEFAULT 128 198 #define RTE_TEST_TX_DESC_DEFAULT 512 199 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */ 200 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */ 201 202 /* 203 * Configurable values of RX and TX ring threshold registers. 204 */ 205 #define RX_PTHRESH 8 /**< Default value of RX prefetch threshold register. */ 206 #define RX_HTHRESH 8 /**< Default value of RX host threshold register. */ 207 #define RX_WTHRESH 0 /**< Default value of RX write-back threshold register. */ 208 209 #define TX_PTHRESH 32 /**< Default value of TX prefetch threshold register. */ 210 #define TX_HTHRESH 0 /**< Default value of TX host threshold register. */ 211 #define TX_WTHRESH 0 /**< Default value of TX write-back threshold register. */ 212 213 struct rte_eth_thresh rx_thresh = { 214 .pthresh = RX_PTHRESH, 215 .hthresh = RX_HTHRESH, 216 .wthresh = RX_WTHRESH, 217 }; 218 219 struct rte_eth_thresh tx_thresh = { 220 .pthresh = TX_PTHRESH, 221 .hthresh = TX_HTHRESH, 222 .wthresh = TX_WTHRESH, 223 }; 224 225 /* 226 * Configurable value of RX free threshold. 227 */ 228 uint16_t rx_free_thresh = 0; /* Immediately free RX descriptors by default. */ 229 230 /* 231 * Configurable value of RX drop enable. 232 */ 233 uint8_t rx_drop_en = 0; /* Drop packets when no descriptors for queue. */ 234 235 /* 236 * Configurable value of TX free threshold. 237 */ 238 uint16_t tx_free_thresh = 0; /* Use default values. */ 239 240 /* 241 * Configurable value of TX RS bit threshold. 242 */ 243 uint16_t tx_rs_thresh = 0; /* Use default values. */ 244 245 /* 246 * Configurable value of TX queue flags. 247 */ 248 uint32_t txq_flags = 0; /* No flags set. */ 249 250 /* 251 * Receive Side Scaling (RSS) configuration. 252 */ 253 uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */ 254 255 /* 256 * Port topology configuration 257 */ 258 uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */ 259 260 /* 261 * Avoids to flush all the RX streams before starts forwarding. 262 */ 263 uint8_t no_flush_rx = 0; /* flush by default */ 264 265 /* 266 * Avoids to check link status when starting/stopping a port. 267 */ 268 uint8_t no_link_check = 0; /* check by default */ 269 270 /* 271 * NIC bypass mode configuration options. 272 */ 273 #ifdef RTE_NIC_BYPASS 274 275 /* The NIC bypass watchdog timeout. */ 276 uint32_t bypass_timeout = RTE_BYPASS_TMT_OFF; 277 278 #endif 279 280 /* 281 * Ethernet device configuration. 282 */ 283 struct rte_eth_rxmode rx_mode = { 284 .max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */ 285 .split_hdr_size = 0, 286 .header_split = 0, /**< Header Split disabled. */ 287 .hw_ip_checksum = 0, /**< IP checksum offload disabled. */ 288 .hw_vlan_filter = 1, /**< VLAN filtering enabled. */ 289 .hw_vlan_strip = 1, /**< VLAN strip enabled. */ 290 .hw_vlan_extend = 0, /**< Extended VLAN disabled. */ 291 .jumbo_frame = 0, /**< Jumbo Frame Support disabled. */ 292 .hw_strip_crc = 0, /**< CRC stripping by hardware disabled. */ 293 }; 294 295 struct rte_fdir_conf fdir_conf = { 296 .mode = RTE_FDIR_MODE_NONE, 297 .pballoc = RTE_FDIR_PBALLOC_64K, 298 .status = RTE_FDIR_REPORT_STATUS, 299 .flexbytes_offset = 0x6, 300 .drop_queue = 127, 301 }; 302 303 volatile int test_done = 1; /* stop packet forwarding when set to 1. */ 304 305 struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS]; 306 struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS]; 307 308 struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array; 309 struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array; 310 311 uint16_t nb_tx_queue_stats_mappings = 0; 312 uint16_t nb_rx_queue_stats_mappings = 0; 313 314 /* Forward function declarations */ 315 static void map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port); 316 static void check_all_ports_link_status(uint8_t port_num, uint32_t port_mask); 317 318 /* 319 * Check if all the ports are started. 320 * If yes, return positive value. If not, return zero. 321 */ 322 static int all_ports_started(void); 323 324 /* 325 * Setup default configuration. 326 */ 327 static void 328 set_default_fwd_lcores_config(void) 329 { 330 unsigned int i; 331 unsigned int nb_lc; 332 333 nb_lc = 0; 334 for (i = 0; i < RTE_MAX_LCORE; i++) { 335 if (! rte_lcore_is_enabled(i)) 336 continue; 337 if (i == rte_get_master_lcore()) 338 continue; 339 fwd_lcores_cpuids[nb_lc++] = i; 340 } 341 nb_lcores = (lcoreid_t) nb_lc; 342 nb_cfg_lcores = nb_lcores; 343 nb_fwd_lcores = 1; 344 } 345 346 static void 347 set_def_peer_eth_addrs(void) 348 { 349 portid_t i; 350 351 for (i = 0; i < RTE_MAX_ETHPORTS; i++) { 352 peer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR; 353 peer_eth_addrs[i].addr_bytes[5] = i; 354 } 355 } 356 357 static void 358 set_default_fwd_ports_config(void) 359 { 360 portid_t pt_id; 361 362 for (pt_id = 0; pt_id < nb_ports; pt_id++) 363 fwd_ports_ids[pt_id] = pt_id; 364 365 nb_cfg_ports = nb_ports; 366 nb_fwd_ports = nb_ports; 367 } 368 369 void 370 set_def_fwd_config(void) 371 { 372 set_default_fwd_lcores_config(); 373 set_def_peer_eth_addrs(); 374 set_default_fwd_ports_config(); 375 } 376 377 /* 378 * Configuration initialisation done once at init time. 379 */ 380 struct mbuf_ctor_arg { 381 uint16_t seg_buf_offset; /**< offset of data in data segment of mbuf. */ 382 uint16_t seg_buf_size; /**< size of data segment in mbuf. */ 383 }; 384 385 struct mbuf_pool_ctor_arg { 386 uint16_t seg_buf_size; /**< size of data segment in mbuf. */ 387 }; 388 389 static void 390 testpmd_mbuf_ctor(struct rte_mempool *mp, 391 void *opaque_arg, 392 void *raw_mbuf, 393 __attribute__((unused)) unsigned i) 394 { 395 struct mbuf_ctor_arg *mb_ctor_arg; 396 struct rte_mbuf *mb; 397 398 mb_ctor_arg = (struct mbuf_ctor_arg *) opaque_arg; 399 mb = (struct rte_mbuf *) raw_mbuf; 400 401 mb->pool = mp; 402 mb->buf_addr = (void *) ((char *)mb + mb_ctor_arg->seg_buf_offset); 403 mb->buf_physaddr = (uint64_t) (rte_mempool_virt2phy(mp, mb) + 404 mb_ctor_arg->seg_buf_offset); 405 mb->buf_len = mb_ctor_arg->seg_buf_size; 406 mb->ol_flags = 0; 407 mb->data_off = RTE_PKTMBUF_HEADROOM; 408 mb->nb_segs = 1; 409 mb->l2_l3_len = 0; 410 mb->vlan_tci = 0; 411 mb->hash.rss = 0; 412 } 413 414 static void 415 testpmd_mbuf_pool_ctor(struct rte_mempool *mp, 416 void *opaque_arg) 417 { 418 struct mbuf_pool_ctor_arg *mbp_ctor_arg; 419 struct rte_pktmbuf_pool_private *mbp_priv; 420 421 if (mp->private_data_size < sizeof(struct rte_pktmbuf_pool_private)) { 422 printf("%s(%s) private_data_size %d < %d\n", 423 __func__, mp->name, (int) mp->private_data_size, 424 (int) sizeof(struct rte_pktmbuf_pool_private)); 425 return; 426 } 427 mbp_ctor_arg = (struct mbuf_pool_ctor_arg *) opaque_arg; 428 mbp_priv = rte_mempool_get_priv(mp); 429 mbp_priv->mbuf_data_room_size = mbp_ctor_arg->seg_buf_size; 430 } 431 432 static void 433 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf, 434 unsigned int socket_id) 435 { 436 char pool_name[RTE_MEMPOOL_NAMESIZE]; 437 struct rte_mempool *rte_mp; 438 struct mbuf_pool_ctor_arg mbp_ctor_arg; 439 struct mbuf_ctor_arg mb_ctor_arg; 440 uint32_t mb_size; 441 442 mbp_ctor_arg.seg_buf_size = (uint16_t) (RTE_PKTMBUF_HEADROOM + 443 mbuf_seg_size); 444 mb_ctor_arg.seg_buf_offset = 445 (uint16_t) CACHE_LINE_ROUNDUP(sizeof(struct rte_mbuf)); 446 mb_ctor_arg.seg_buf_size = mbp_ctor_arg.seg_buf_size; 447 mb_size = mb_ctor_arg.seg_buf_offset + mb_ctor_arg.seg_buf_size; 448 mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name)); 449 450 #ifdef RTE_LIBRTE_PMD_XENVIRT 451 rte_mp = rte_mempool_gntalloc_create(pool_name, nb_mbuf, mb_size, 452 (unsigned) mb_mempool_cache, 453 sizeof(struct rte_pktmbuf_pool_private), 454 testpmd_mbuf_pool_ctor, &mbp_ctor_arg, 455 testpmd_mbuf_ctor, &mb_ctor_arg, 456 socket_id, 0); 457 458 459 460 #else 461 if (mp_anon != 0) 462 rte_mp = mempool_anon_create(pool_name, nb_mbuf, mb_size, 463 (unsigned) mb_mempool_cache, 464 sizeof(struct rte_pktmbuf_pool_private), 465 testpmd_mbuf_pool_ctor, &mbp_ctor_arg, 466 testpmd_mbuf_ctor, &mb_ctor_arg, 467 socket_id, 0); 468 else 469 rte_mp = rte_mempool_create(pool_name, nb_mbuf, mb_size, 470 (unsigned) mb_mempool_cache, 471 sizeof(struct rte_pktmbuf_pool_private), 472 testpmd_mbuf_pool_ctor, &mbp_ctor_arg, 473 testpmd_mbuf_ctor, &mb_ctor_arg, 474 socket_id, 0); 475 476 #endif 477 478 if (rte_mp == NULL) { 479 rte_exit(EXIT_FAILURE, "Creation of mbuf pool for socket %u " 480 "failed\n", socket_id); 481 } else if (verbose_level > 0) { 482 rte_mempool_dump(stdout, rte_mp); 483 } 484 } 485 486 /* 487 * Check given socket id is valid or not with NUMA mode, 488 * if valid, return 0, else return -1 489 */ 490 static int 491 check_socket_id(const unsigned int socket_id) 492 { 493 static int warning_once = 0; 494 495 if (socket_id >= MAX_SOCKET) { 496 if (!warning_once && numa_support) 497 printf("Warning: NUMA should be configured manually by" 498 " using --port-numa-config and" 499 " --ring-numa-config parameters along with" 500 " --numa.\n"); 501 warning_once = 1; 502 return -1; 503 } 504 return 0; 505 } 506 507 static void 508 init_config(void) 509 { 510 portid_t pid; 511 struct rte_port *port; 512 struct rte_mempool *mbp; 513 unsigned int nb_mbuf_per_pool; 514 lcoreid_t lc_id; 515 uint8_t port_per_socket[MAX_SOCKET]; 516 517 memset(port_per_socket,0,MAX_SOCKET); 518 /* Configuration of logical cores. */ 519 fwd_lcores = rte_zmalloc("testpmd: fwd_lcores", 520 sizeof(struct fwd_lcore *) * nb_lcores, 521 CACHE_LINE_SIZE); 522 if (fwd_lcores == NULL) { 523 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) " 524 "failed\n", nb_lcores); 525 } 526 for (lc_id = 0; lc_id < nb_lcores; lc_id++) { 527 fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore", 528 sizeof(struct fwd_lcore), 529 CACHE_LINE_SIZE); 530 if (fwd_lcores[lc_id] == NULL) { 531 rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) " 532 "failed\n"); 533 } 534 fwd_lcores[lc_id]->cpuid_idx = lc_id; 535 } 536 537 /* 538 * Create pools of mbuf. 539 * If NUMA support is disabled, create a single pool of mbuf in 540 * socket 0 memory by default. 541 * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1. 542 * 543 * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and 544 * nb_txd can be configured at run time. 545 */ 546 if (param_total_num_mbufs) 547 nb_mbuf_per_pool = param_total_num_mbufs; 548 else { 549 nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX + (nb_lcores * mb_mempool_cache) 550 + RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST; 551 552 if (!numa_support) 553 nb_mbuf_per_pool = (nb_mbuf_per_pool * nb_ports); 554 } 555 556 if (!numa_support) { 557 if (socket_num == UMA_NO_CONFIG) 558 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0); 559 else 560 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 561 socket_num); 562 } 563 564 /* Configuration of Ethernet ports. */ 565 ports = rte_zmalloc("testpmd: ports", 566 sizeof(struct rte_port) * nb_ports, 567 CACHE_LINE_SIZE); 568 if (ports == NULL) { 569 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d struct rte_port) " 570 "failed\n", nb_ports); 571 } 572 573 for (pid = 0; pid < nb_ports; pid++) { 574 port = &ports[pid]; 575 rte_eth_dev_info_get(pid, &port->dev_info); 576 577 if (numa_support) { 578 if (port_numa[pid] != NUMA_NO_CONFIG) 579 port_per_socket[port_numa[pid]]++; 580 else { 581 uint32_t socket_id = rte_eth_dev_socket_id(pid); 582 583 /* if socket_id is invalid, set to 0 */ 584 if (check_socket_id(socket_id) < 0) 585 socket_id = 0; 586 port_per_socket[socket_id]++; 587 } 588 } 589 590 /* set flag to initialize port/queue */ 591 port->need_reconfig = 1; 592 port->need_reconfig_queues = 1; 593 } 594 595 if (numa_support) { 596 uint8_t i; 597 unsigned int nb_mbuf; 598 599 if (param_total_num_mbufs) 600 nb_mbuf_per_pool = nb_mbuf_per_pool/nb_ports; 601 602 for (i = 0; i < MAX_SOCKET; i++) { 603 nb_mbuf = (nb_mbuf_per_pool * 604 port_per_socket[i]); 605 if (nb_mbuf) 606 mbuf_pool_create(mbuf_data_size, 607 nb_mbuf,i); 608 } 609 } 610 init_port_config(); 611 612 /* 613 * Records which Mbuf pool to use by each logical core, if needed. 614 */ 615 for (lc_id = 0; lc_id < nb_lcores; lc_id++) { 616 mbp = mbuf_pool_find( 617 rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id])); 618 619 if (mbp == NULL) 620 mbp = mbuf_pool_find(0); 621 fwd_lcores[lc_id]->mbp = mbp; 622 } 623 624 /* Configuration of packet forwarding streams. */ 625 if (init_fwd_streams() < 0) 626 rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n"); 627 } 628 629 630 void 631 reconfig(portid_t new_port_id) 632 { 633 struct rte_port *port; 634 635 /* Reconfiguration of Ethernet ports. */ 636 ports = rte_realloc(ports, 637 sizeof(struct rte_port) * nb_ports, 638 CACHE_LINE_SIZE); 639 if (ports == NULL) { 640 rte_exit(EXIT_FAILURE, "rte_realloc(%d struct rte_port) failed\n", 641 nb_ports); 642 } 643 644 port = &ports[new_port_id]; 645 rte_eth_dev_info_get(new_port_id, &port->dev_info); 646 647 /* set flag to initialize port/queue */ 648 port->need_reconfig = 1; 649 port->need_reconfig_queues = 1; 650 651 init_port_config(); 652 } 653 654 655 int 656 init_fwd_streams(void) 657 { 658 portid_t pid; 659 struct rte_port *port; 660 streamid_t sm_id, nb_fwd_streams_new; 661 662 /* set socket id according to numa or not */ 663 for (pid = 0; pid < nb_ports; pid++) { 664 port = &ports[pid]; 665 if (nb_rxq > port->dev_info.max_rx_queues) { 666 printf("Fail: nb_rxq(%d) is greater than " 667 "max_rx_queues(%d)\n", nb_rxq, 668 port->dev_info.max_rx_queues); 669 return -1; 670 } 671 if (nb_txq > port->dev_info.max_tx_queues) { 672 printf("Fail: nb_txq(%d) is greater than " 673 "max_tx_queues(%d)\n", nb_txq, 674 port->dev_info.max_tx_queues); 675 return -1; 676 } 677 if (numa_support) { 678 if (port_numa[pid] != NUMA_NO_CONFIG) 679 port->socket_id = port_numa[pid]; 680 else { 681 port->socket_id = rte_eth_dev_socket_id(pid); 682 683 /* if socket_id is invalid, set to 0 */ 684 if (check_socket_id(port->socket_id) < 0) 685 port->socket_id = 0; 686 } 687 } 688 else { 689 if (socket_num == UMA_NO_CONFIG) 690 port->socket_id = 0; 691 else 692 port->socket_id = socket_num; 693 } 694 } 695 696 nb_fwd_streams_new = (streamid_t)(nb_ports * nb_rxq); 697 if (nb_fwd_streams_new == nb_fwd_streams) 698 return 0; 699 /* clear the old */ 700 if (fwd_streams != NULL) { 701 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) { 702 if (fwd_streams[sm_id] == NULL) 703 continue; 704 rte_free(fwd_streams[sm_id]); 705 fwd_streams[sm_id] = NULL; 706 } 707 rte_free(fwd_streams); 708 fwd_streams = NULL; 709 } 710 711 /* init new */ 712 nb_fwd_streams = nb_fwd_streams_new; 713 fwd_streams = rte_zmalloc("testpmd: fwd_streams", 714 sizeof(struct fwd_stream *) * nb_fwd_streams, CACHE_LINE_SIZE); 715 if (fwd_streams == NULL) 716 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_stream *)) " 717 "failed\n", nb_fwd_streams); 718 719 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) { 720 fwd_streams[sm_id] = rte_zmalloc("testpmd: struct fwd_stream", 721 sizeof(struct fwd_stream), CACHE_LINE_SIZE); 722 if (fwd_streams[sm_id] == NULL) 723 rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_stream)" 724 " failed\n"); 725 } 726 727 return 0; 728 } 729 730 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS 731 static void 732 pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs) 733 { 734 unsigned int total_burst; 735 unsigned int nb_burst; 736 unsigned int burst_stats[3]; 737 uint16_t pktnb_stats[3]; 738 uint16_t nb_pkt; 739 int burst_percent[3]; 740 741 /* 742 * First compute the total number of packet bursts and the 743 * two highest numbers of bursts of the same number of packets. 744 */ 745 total_burst = 0; 746 burst_stats[0] = burst_stats[1] = burst_stats[2] = 0; 747 pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0; 748 for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) { 749 nb_burst = pbs->pkt_burst_spread[nb_pkt]; 750 if (nb_burst == 0) 751 continue; 752 total_burst += nb_burst; 753 if (nb_burst > burst_stats[0]) { 754 burst_stats[1] = burst_stats[0]; 755 pktnb_stats[1] = pktnb_stats[0]; 756 burst_stats[0] = nb_burst; 757 pktnb_stats[0] = nb_pkt; 758 } 759 } 760 if (total_burst == 0) 761 return; 762 burst_percent[0] = (burst_stats[0] * 100) / total_burst; 763 printf(" %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst, 764 burst_percent[0], (int) pktnb_stats[0]); 765 if (burst_stats[0] == total_burst) { 766 printf("]\n"); 767 return; 768 } 769 if (burst_stats[0] + burst_stats[1] == total_burst) { 770 printf(" + %d%% of %d pkts]\n", 771 100 - burst_percent[0], pktnb_stats[1]); 772 return; 773 } 774 burst_percent[1] = (burst_stats[1] * 100) / total_burst; 775 burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]); 776 if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) { 777 printf(" + %d%% of others]\n", 100 - burst_percent[0]); 778 return; 779 } 780 printf(" + %d%% of %d pkts + %d%% of others]\n", 781 burst_percent[1], (int) pktnb_stats[1], burst_percent[2]); 782 } 783 #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */ 784 785 static void 786 fwd_port_stats_display(portid_t port_id, struct rte_eth_stats *stats) 787 { 788 struct rte_port *port; 789 uint8_t i; 790 791 static const char *fwd_stats_border = "----------------------"; 792 793 port = &ports[port_id]; 794 printf("\n %s Forward statistics for port %-2d %s\n", 795 fwd_stats_border, port_id, fwd_stats_border); 796 797 if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) { 798 printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: " 799 "%-"PRIu64"\n", 800 stats->ipackets, stats->imissed, 801 (uint64_t) (stats->ipackets + stats->imissed)); 802 803 if (cur_fwd_eng == &csum_fwd_engine) 804 printf(" Bad-ipcsum: %-14"PRIu64" Bad-l4csum: %-14"PRIu64" \n", 805 port->rx_bad_ip_csum, port->rx_bad_l4_csum); 806 if (((stats->ierrors - stats->imissed) + stats->rx_nombuf) > 0) { 807 printf(" RX-badcrc: %-14"PRIu64" RX-badlen: %-14"PRIu64 808 "RX-error: %-"PRIu64"\n", 809 stats->ibadcrc, stats->ibadlen, stats->ierrors); 810 printf(" RX-nombufs: %-14"PRIu64"\n", stats->rx_nombuf); 811 } 812 813 printf(" TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: " 814 "%-"PRIu64"\n", 815 stats->opackets, port->tx_dropped, 816 (uint64_t) (stats->opackets + port->tx_dropped)); 817 } 818 else { 819 printf(" RX-packets: %14"PRIu64" RX-dropped:%14"PRIu64" RX-total:" 820 "%14"PRIu64"\n", 821 stats->ipackets, stats->imissed, 822 (uint64_t) (stats->ipackets + stats->imissed)); 823 824 if (cur_fwd_eng == &csum_fwd_engine) 825 printf(" Bad-ipcsum:%14"PRIu64" Bad-l4csum:%14"PRIu64"\n", 826 port->rx_bad_ip_csum, port->rx_bad_l4_csum); 827 if (((stats->ierrors - stats->imissed) + stats->rx_nombuf) > 0) { 828 printf(" RX-badcrc: %14"PRIu64" RX-badlen: %14"PRIu64 829 " RX-error:%"PRIu64"\n", 830 stats->ibadcrc, stats->ibadlen, stats->ierrors); 831 printf(" RX-nombufs: %14"PRIu64"\n", 832 stats->rx_nombuf); 833 } 834 835 printf(" TX-packets: %14"PRIu64" TX-dropped:%14"PRIu64" TX-total:" 836 "%14"PRIu64"\n", 837 stats->opackets, port->tx_dropped, 838 (uint64_t) (stats->opackets + port->tx_dropped)); 839 } 840 841 /* Display statistics of XON/XOFF pause frames, if any. */ 842 if ((stats->tx_pause_xon | stats->rx_pause_xon | 843 stats->tx_pause_xoff | stats->rx_pause_xoff) > 0) { 844 printf(" RX-XOFF: %-14"PRIu64" RX-XON: %-14"PRIu64"\n", 845 stats->rx_pause_xoff, stats->rx_pause_xon); 846 printf(" TX-XOFF: %-14"PRIu64" TX-XON: %-14"PRIu64"\n", 847 stats->tx_pause_xoff, stats->tx_pause_xon); 848 } 849 850 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS 851 if (port->rx_stream) 852 pkt_burst_stats_display("RX", 853 &port->rx_stream->rx_burst_stats); 854 if (port->tx_stream) 855 pkt_burst_stats_display("TX", 856 &port->tx_stream->tx_burst_stats); 857 #endif 858 /* stats fdir */ 859 if (fdir_conf.mode != RTE_FDIR_MODE_NONE) 860 printf(" Fdirmiss:%14"PRIu64" Fdirmatch:%14"PRIu64"\n", 861 stats->fdirmiss, 862 stats->fdirmatch); 863 864 if (port->rx_queue_stats_mapping_enabled) { 865 printf("\n"); 866 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) { 867 printf(" Stats reg %2d RX-packets:%14"PRIu64 868 " RX-errors:%14"PRIu64 869 " RX-bytes:%14"PRIu64"\n", 870 i, stats->q_ipackets[i], stats->q_errors[i], stats->q_ibytes[i]); 871 } 872 printf("\n"); 873 } 874 if (port->tx_queue_stats_mapping_enabled) { 875 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) { 876 printf(" Stats reg %2d TX-packets:%14"PRIu64 877 " TX-bytes:%14"PRIu64"\n", 878 i, stats->q_opackets[i], stats->q_obytes[i]); 879 } 880 } 881 882 printf(" %s--------------------------------%s\n", 883 fwd_stats_border, fwd_stats_border); 884 } 885 886 static void 887 fwd_stream_stats_display(streamid_t stream_id) 888 { 889 struct fwd_stream *fs; 890 static const char *fwd_top_stats_border = "-------"; 891 892 fs = fwd_streams[stream_id]; 893 if ((fs->rx_packets == 0) && (fs->tx_packets == 0) && 894 (fs->fwd_dropped == 0)) 895 return; 896 printf("\n %s Forward Stats for RX Port=%2d/Queue=%2d -> " 897 "TX Port=%2d/Queue=%2d %s\n", 898 fwd_top_stats_border, fs->rx_port, fs->rx_queue, 899 fs->tx_port, fs->tx_queue, fwd_top_stats_border); 900 printf(" RX-packets: %-14u TX-packets: %-14u TX-dropped: %-14u", 901 fs->rx_packets, fs->tx_packets, fs->fwd_dropped); 902 903 /* if checksum mode */ 904 if (cur_fwd_eng == &csum_fwd_engine) { 905 printf(" RX- bad IP checksum: %-14u Rx- bad L4 checksum: " 906 "%-14u\n", fs->rx_bad_ip_csum, fs->rx_bad_l4_csum); 907 } 908 909 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS 910 pkt_burst_stats_display("RX", &fs->rx_burst_stats); 911 pkt_burst_stats_display("TX", &fs->tx_burst_stats); 912 #endif 913 } 914 915 static void 916 flush_fwd_rx_queues(void) 917 { 918 struct rte_mbuf *pkts_burst[MAX_PKT_BURST]; 919 portid_t rxp; 920 portid_t port_id; 921 queueid_t rxq; 922 uint16_t nb_rx; 923 uint16_t i; 924 uint8_t j; 925 926 for (j = 0; j < 2; j++) { 927 for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) { 928 for (rxq = 0; rxq < nb_rxq; rxq++) { 929 port_id = fwd_ports_ids[rxp]; 930 do { 931 nb_rx = rte_eth_rx_burst(port_id, rxq, 932 pkts_burst, MAX_PKT_BURST); 933 for (i = 0; i < nb_rx; i++) 934 rte_pktmbuf_free(pkts_burst[i]); 935 } while (nb_rx > 0); 936 } 937 } 938 rte_delay_ms(10); /* wait 10 milli-seconds before retrying */ 939 } 940 } 941 942 static void 943 run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd) 944 { 945 struct fwd_stream **fsm; 946 streamid_t nb_fs; 947 streamid_t sm_id; 948 949 fsm = &fwd_streams[fc->stream_idx]; 950 nb_fs = fc->stream_nb; 951 do { 952 for (sm_id = 0; sm_id < nb_fs; sm_id++) 953 (*pkt_fwd)(fsm[sm_id]); 954 } while (! fc->stopped); 955 } 956 957 static int 958 start_pkt_forward_on_core(void *fwd_arg) 959 { 960 run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg, 961 cur_fwd_config.fwd_eng->packet_fwd); 962 return 0; 963 } 964 965 /* 966 * Run the TXONLY packet forwarding engine to send a single burst of packets. 967 * Used to start communication flows in network loopback test configurations. 968 */ 969 static int 970 run_one_txonly_burst_on_core(void *fwd_arg) 971 { 972 struct fwd_lcore *fwd_lc; 973 struct fwd_lcore tmp_lcore; 974 975 fwd_lc = (struct fwd_lcore *) fwd_arg; 976 tmp_lcore = *fwd_lc; 977 tmp_lcore.stopped = 1; 978 run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd); 979 return 0; 980 } 981 982 /* 983 * Launch packet forwarding: 984 * - Setup per-port forwarding context. 985 * - launch logical cores with their forwarding configuration. 986 */ 987 static void 988 launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore) 989 { 990 port_fwd_begin_t port_fwd_begin; 991 unsigned int i; 992 unsigned int lc_id; 993 int diag; 994 995 port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin; 996 if (port_fwd_begin != NULL) { 997 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) 998 (*port_fwd_begin)(fwd_ports_ids[i]); 999 } 1000 for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) { 1001 lc_id = fwd_lcores_cpuids[i]; 1002 if ((interactive == 0) || (lc_id != rte_lcore_id())) { 1003 fwd_lcores[i]->stopped = 0; 1004 diag = rte_eal_remote_launch(pkt_fwd_on_lcore, 1005 fwd_lcores[i], lc_id); 1006 if (diag != 0) 1007 printf("launch lcore %u failed - diag=%d\n", 1008 lc_id, diag); 1009 } 1010 } 1011 } 1012 1013 /* 1014 * Launch packet forwarding configuration. 1015 */ 1016 void 1017 start_packet_forwarding(int with_tx_first) 1018 { 1019 port_fwd_begin_t port_fwd_begin; 1020 port_fwd_end_t port_fwd_end; 1021 struct rte_port *port; 1022 unsigned int i; 1023 portid_t pt_id; 1024 streamid_t sm_id; 1025 1026 if (all_ports_started() == 0) { 1027 printf("Not all ports were started\n"); 1028 return; 1029 } 1030 if (test_done == 0) { 1031 printf("Packet forwarding already started\n"); 1032 return; 1033 } 1034 if(dcb_test) { 1035 for (i = 0; i < nb_fwd_ports; i++) { 1036 pt_id = fwd_ports_ids[i]; 1037 port = &ports[pt_id]; 1038 if (!port->dcb_flag) { 1039 printf("In DCB mode, all forwarding ports must " 1040 "be configured in this mode.\n"); 1041 return; 1042 } 1043 } 1044 if (nb_fwd_lcores == 1) { 1045 printf("In DCB mode,the nb forwarding cores " 1046 "should be larger than 1.\n"); 1047 return; 1048 } 1049 } 1050 test_done = 0; 1051 1052 if(!no_flush_rx) 1053 flush_fwd_rx_queues(); 1054 1055 fwd_config_setup(); 1056 rxtx_config_display(); 1057 1058 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) { 1059 pt_id = fwd_ports_ids[i]; 1060 port = &ports[pt_id]; 1061 rte_eth_stats_get(pt_id, &port->stats); 1062 port->tx_dropped = 0; 1063 1064 map_port_queue_stats_mapping_registers(pt_id, port); 1065 } 1066 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) { 1067 fwd_streams[sm_id]->rx_packets = 0; 1068 fwd_streams[sm_id]->tx_packets = 0; 1069 fwd_streams[sm_id]->fwd_dropped = 0; 1070 fwd_streams[sm_id]->rx_bad_ip_csum = 0; 1071 fwd_streams[sm_id]->rx_bad_l4_csum = 0; 1072 1073 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS 1074 memset(&fwd_streams[sm_id]->rx_burst_stats, 0, 1075 sizeof(fwd_streams[sm_id]->rx_burst_stats)); 1076 memset(&fwd_streams[sm_id]->tx_burst_stats, 0, 1077 sizeof(fwd_streams[sm_id]->tx_burst_stats)); 1078 #endif 1079 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES 1080 fwd_streams[sm_id]->core_cycles = 0; 1081 #endif 1082 } 1083 if (with_tx_first) { 1084 port_fwd_begin = tx_only_engine.port_fwd_begin; 1085 if (port_fwd_begin != NULL) { 1086 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) 1087 (*port_fwd_begin)(fwd_ports_ids[i]); 1088 } 1089 launch_packet_forwarding(run_one_txonly_burst_on_core); 1090 rte_eal_mp_wait_lcore(); 1091 port_fwd_end = tx_only_engine.port_fwd_end; 1092 if (port_fwd_end != NULL) { 1093 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) 1094 (*port_fwd_end)(fwd_ports_ids[i]); 1095 } 1096 } 1097 launch_packet_forwarding(start_pkt_forward_on_core); 1098 } 1099 1100 void 1101 stop_packet_forwarding(void) 1102 { 1103 struct rte_eth_stats stats; 1104 struct rte_port *port; 1105 port_fwd_end_t port_fwd_end; 1106 int i; 1107 portid_t pt_id; 1108 streamid_t sm_id; 1109 lcoreid_t lc_id; 1110 uint64_t total_recv; 1111 uint64_t total_xmit; 1112 uint64_t total_rx_dropped; 1113 uint64_t total_tx_dropped; 1114 uint64_t total_rx_nombuf; 1115 uint64_t tx_dropped; 1116 uint64_t rx_bad_ip_csum; 1117 uint64_t rx_bad_l4_csum; 1118 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES 1119 uint64_t fwd_cycles; 1120 #endif 1121 static const char *acc_stats_border = "+++++++++++++++"; 1122 1123 if (all_ports_started() == 0) { 1124 printf("Not all ports were started\n"); 1125 return; 1126 } 1127 if (test_done) { 1128 printf("Packet forwarding not started\n"); 1129 return; 1130 } 1131 printf("Telling cores to stop..."); 1132 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) 1133 fwd_lcores[lc_id]->stopped = 1; 1134 printf("\nWaiting for lcores to finish...\n"); 1135 rte_eal_mp_wait_lcore(); 1136 port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end; 1137 if (port_fwd_end != NULL) { 1138 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) { 1139 pt_id = fwd_ports_ids[i]; 1140 (*port_fwd_end)(pt_id); 1141 } 1142 } 1143 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES 1144 fwd_cycles = 0; 1145 #endif 1146 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) { 1147 if (cur_fwd_config.nb_fwd_streams > 1148 cur_fwd_config.nb_fwd_ports) { 1149 fwd_stream_stats_display(sm_id); 1150 ports[fwd_streams[sm_id]->tx_port].tx_stream = NULL; 1151 ports[fwd_streams[sm_id]->rx_port].rx_stream = NULL; 1152 } else { 1153 ports[fwd_streams[sm_id]->tx_port].tx_stream = 1154 fwd_streams[sm_id]; 1155 ports[fwd_streams[sm_id]->rx_port].rx_stream = 1156 fwd_streams[sm_id]; 1157 } 1158 tx_dropped = ports[fwd_streams[sm_id]->tx_port].tx_dropped; 1159 tx_dropped = (uint64_t) (tx_dropped + 1160 fwd_streams[sm_id]->fwd_dropped); 1161 ports[fwd_streams[sm_id]->tx_port].tx_dropped = tx_dropped; 1162 1163 rx_bad_ip_csum = 1164 ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum; 1165 rx_bad_ip_csum = (uint64_t) (rx_bad_ip_csum + 1166 fwd_streams[sm_id]->rx_bad_ip_csum); 1167 ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum = 1168 rx_bad_ip_csum; 1169 1170 rx_bad_l4_csum = 1171 ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum; 1172 rx_bad_l4_csum = (uint64_t) (rx_bad_l4_csum + 1173 fwd_streams[sm_id]->rx_bad_l4_csum); 1174 ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum = 1175 rx_bad_l4_csum; 1176 1177 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES 1178 fwd_cycles = (uint64_t) (fwd_cycles + 1179 fwd_streams[sm_id]->core_cycles); 1180 #endif 1181 } 1182 total_recv = 0; 1183 total_xmit = 0; 1184 total_rx_dropped = 0; 1185 total_tx_dropped = 0; 1186 total_rx_nombuf = 0; 1187 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) { 1188 pt_id = fwd_ports_ids[i]; 1189 1190 port = &ports[pt_id]; 1191 rte_eth_stats_get(pt_id, &stats); 1192 stats.ipackets -= port->stats.ipackets; 1193 port->stats.ipackets = 0; 1194 stats.opackets -= port->stats.opackets; 1195 port->stats.opackets = 0; 1196 stats.ibytes -= port->stats.ibytes; 1197 port->stats.ibytes = 0; 1198 stats.obytes -= port->stats.obytes; 1199 port->stats.obytes = 0; 1200 stats.imissed -= port->stats.imissed; 1201 port->stats.imissed = 0; 1202 stats.oerrors -= port->stats.oerrors; 1203 port->stats.oerrors = 0; 1204 stats.rx_nombuf -= port->stats.rx_nombuf; 1205 port->stats.rx_nombuf = 0; 1206 stats.fdirmatch -= port->stats.fdirmatch; 1207 port->stats.rx_nombuf = 0; 1208 stats.fdirmiss -= port->stats.fdirmiss; 1209 port->stats.rx_nombuf = 0; 1210 1211 total_recv += stats.ipackets; 1212 total_xmit += stats.opackets; 1213 total_rx_dropped += stats.imissed; 1214 total_tx_dropped += port->tx_dropped; 1215 total_rx_nombuf += stats.rx_nombuf; 1216 1217 fwd_port_stats_display(pt_id, &stats); 1218 } 1219 printf("\n %s Accumulated forward statistics for all ports" 1220 "%s\n", 1221 acc_stats_border, acc_stats_border); 1222 printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: " 1223 "%-"PRIu64"\n" 1224 " TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: " 1225 "%-"PRIu64"\n", 1226 total_recv, total_rx_dropped, total_recv + total_rx_dropped, 1227 total_xmit, total_tx_dropped, total_xmit + total_tx_dropped); 1228 if (total_rx_nombuf > 0) 1229 printf(" RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf); 1230 printf(" %s++++++++++++++++++++++++++++++++++++++++++++++" 1231 "%s\n", 1232 acc_stats_border, acc_stats_border); 1233 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES 1234 if (total_recv > 0) 1235 printf("\n CPU cycles/packet=%u (total cycles=" 1236 "%"PRIu64" / total RX packets=%"PRIu64")\n", 1237 (unsigned int)(fwd_cycles / total_recv), 1238 fwd_cycles, total_recv); 1239 #endif 1240 printf("\nDone.\n"); 1241 test_done = 1; 1242 } 1243 1244 void 1245 dev_set_link_up(portid_t pid) 1246 { 1247 if (rte_eth_dev_set_link_up((uint8_t)pid) < 0) 1248 printf("\nSet link up fail.\n"); 1249 } 1250 1251 void 1252 dev_set_link_down(portid_t pid) 1253 { 1254 if (rte_eth_dev_set_link_down((uint8_t)pid) < 0) 1255 printf("\nSet link down fail.\n"); 1256 } 1257 1258 static int 1259 all_ports_started(void) 1260 { 1261 portid_t pi; 1262 struct rte_port *port; 1263 1264 for (pi = 0; pi < nb_ports; pi++) { 1265 port = &ports[pi]; 1266 /* Check if there is a port which is not started */ 1267 if (port->port_status != RTE_PORT_STARTED) 1268 return 0; 1269 } 1270 1271 /* No port is not started */ 1272 return 1; 1273 } 1274 1275 int 1276 start_port(portid_t pid) 1277 { 1278 int diag, need_check_link_status = 0; 1279 portid_t pi; 1280 queueid_t qi; 1281 struct rte_port *port; 1282 struct ether_addr mac_addr; 1283 1284 if (test_done == 0) { 1285 printf("Please stop forwarding first\n"); 1286 return -1; 1287 } 1288 1289 if (init_fwd_streams() < 0) { 1290 printf("Fail from init_fwd_streams()\n"); 1291 return -1; 1292 } 1293 1294 if(dcb_config) 1295 dcb_test = 1; 1296 for (pi = 0; pi < nb_ports; pi++) { 1297 if (pid < nb_ports && pid != pi) 1298 continue; 1299 1300 port = &ports[pi]; 1301 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED, 1302 RTE_PORT_HANDLING) == 0) { 1303 printf("Port %d is now not stopped\n", pi); 1304 continue; 1305 } 1306 1307 if (port->need_reconfig > 0) { 1308 port->need_reconfig = 0; 1309 1310 printf("Configuring Port %d (socket %u)\n", pi, 1311 port->socket_id); 1312 /* configure port */ 1313 diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq, 1314 &(port->dev_conf)); 1315 if (diag != 0) { 1316 if (rte_atomic16_cmpset(&(port->port_status), 1317 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0) 1318 printf("Port %d can not be set back " 1319 "to stopped\n", pi); 1320 printf("Fail to configure port %d\n", pi); 1321 /* try to reconfigure port next time */ 1322 port->need_reconfig = 1; 1323 return -1; 1324 } 1325 } 1326 if (port->need_reconfig_queues > 0) { 1327 port->need_reconfig_queues = 0; 1328 /* setup tx queues */ 1329 for (qi = 0; qi < nb_txq; qi++) { 1330 if ((numa_support) && 1331 (txring_numa[pi] != NUMA_NO_CONFIG)) 1332 diag = rte_eth_tx_queue_setup(pi, qi, 1333 nb_txd,txring_numa[pi], 1334 &(port->tx_conf)); 1335 else 1336 diag = rte_eth_tx_queue_setup(pi, qi, 1337 nb_txd,port->socket_id, 1338 &(port->tx_conf)); 1339 1340 if (diag == 0) 1341 continue; 1342 1343 /* Fail to setup tx queue, return */ 1344 if (rte_atomic16_cmpset(&(port->port_status), 1345 RTE_PORT_HANDLING, 1346 RTE_PORT_STOPPED) == 0) 1347 printf("Port %d can not be set back " 1348 "to stopped\n", pi); 1349 printf("Fail to configure port %d tx queues\n", pi); 1350 /* try to reconfigure queues next time */ 1351 port->need_reconfig_queues = 1; 1352 return -1; 1353 } 1354 /* setup rx queues */ 1355 for (qi = 0; qi < nb_rxq; qi++) { 1356 if ((numa_support) && 1357 (rxring_numa[pi] != NUMA_NO_CONFIG)) { 1358 struct rte_mempool * mp = 1359 mbuf_pool_find(rxring_numa[pi]); 1360 if (mp == NULL) { 1361 printf("Failed to setup RX queue:" 1362 "No mempool allocation" 1363 "on the socket %d\n", 1364 rxring_numa[pi]); 1365 return -1; 1366 } 1367 1368 diag = rte_eth_rx_queue_setup(pi, qi, 1369 nb_rxd,rxring_numa[pi], 1370 &(port->rx_conf),mp); 1371 } 1372 else 1373 diag = rte_eth_rx_queue_setup(pi, qi, 1374 nb_rxd,port->socket_id, 1375 &(port->rx_conf), 1376 mbuf_pool_find(port->socket_id)); 1377 1378 if (diag == 0) 1379 continue; 1380 1381 1382 /* Fail to setup rx queue, return */ 1383 if (rte_atomic16_cmpset(&(port->port_status), 1384 RTE_PORT_HANDLING, 1385 RTE_PORT_STOPPED) == 0) 1386 printf("Port %d can not be set back " 1387 "to stopped\n", pi); 1388 printf("Fail to configure port %d rx queues\n", pi); 1389 /* try to reconfigure queues next time */ 1390 port->need_reconfig_queues = 1; 1391 return -1; 1392 } 1393 } 1394 /* start port */ 1395 if (rte_eth_dev_start(pi) < 0) { 1396 printf("Fail to start port %d\n", pi); 1397 1398 /* Fail to setup rx queue, return */ 1399 if (rte_atomic16_cmpset(&(port->port_status), 1400 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0) 1401 printf("Port %d can not be set back to " 1402 "stopped\n", pi); 1403 continue; 1404 } 1405 1406 if (rte_atomic16_cmpset(&(port->port_status), 1407 RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0) 1408 printf("Port %d can not be set into started\n", pi); 1409 1410 rte_eth_macaddr_get(pi, &mac_addr); 1411 printf("Port %d: %02X:%02X:%02X:%02X:%02X:%02X\n", pi, 1412 mac_addr.addr_bytes[0], mac_addr.addr_bytes[1], 1413 mac_addr.addr_bytes[2], mac_addr.addr_bytes[3], 1414 mac_addr.addr_bytes[4], mac_addr.addr_bytes[5]); 1415 1416 /* at least one port started, need checking link status */ 1417 need_check_link_status = 1; 1418 } 1419 1420 if (need_check_link_status && !no_link_check) 1421 check_all_ports_link_status(nb_ports, RTE_PORT_ALL); 1422 else 1423 printf("Please stop the ports first\n"); 1424 1425 printf("Done\n"); 1426 return 0; 1427 } 1428 1429 void 1430 stop_port(portid_t pid) 1431 { 1432 portid_t pi; 1433 struct rte_port *port; 1434 int need_check_link_status = 0; 1435 1436 if (test_done == 0) { 1437 printf("Please stop forwarding first\n"); 1438 return; 1439 } 1440 if (dcb_test) { 1441 dcb_test = 0; 1442 dcb_config = 0; 1443 } 1444 printf("Stopping ports...\n"); 1445 1446 for (pi = 0; pi < nb_ports; pi++) { 1447 if (pid < nb_ports && pid != pi) 1448 continue; 1449 1450 port = &ports[pi]; 1451 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED, 1452 RTE_PORT_HANDLING) == 0) 1453 continue; 1454 1455 rte_eth_dev_stop(pi); 1456 1457 if (rte_atomic16_cmpset(&(port->port_status), 1458 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0) 1459 printf("Port %d can not be set into stopped\n", pi); 1460 need_check_link_status = 1; 1461 } 1462 if (need_check_link_status && !no_link_check) 1463 check_all_ports_link_status(nb_ports, RTE_PORT_ALL); 1464 1465 printf("Done\n"); 1466 } 1467 1468 void 1469 close_port(portid_t pid) 1470 { 1471 portid_t pi; 1472 struct rte_port *port; 1473 1474 if (test_done == 0) { 1475 printf("Please stop forwarding first\n"); 1476 return; 1477 } 1478 1479 printf("Closing ports...\n"); 1480 1481 for (pi = 0; pi < nb_ports; pi++) { 1482 if (pid < nb_ports && pid != pi) 1483 continue; 1484 1485 port = &ports[pi]; 1486 if (rte_atomic16_cmpset(&(port->port_status), 1487 RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) { 1488 printf("Port %d is now not stopped\n", pi); 1489 continue; 1490 } 1491 1492 rte_eth_dev_close(pi); 1493 1494 if (rte_atomic16_cmpset(&(port->port_status), 1495 RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0) 1496 printf("Port %d can not be set into stopped\n", pi); 1497 } 1498 1499 printf("Done\n"); 1500 } 1501 1502 int 1503 all_ports_stopped(void) 1504 { 1505 portid_t pi; 1506 struct rte_port *port; 1507 1508 for (pi = 0; pi < nb_ports; pi++) { 1509 port = &ports[pi]; 1510 if (port->port_status != RTE_PORT_STOPPED) 1511 return 0; 1512 } 1513 1514 return 1; 1515 } 1516 1517 int 1518 port_is_started(portid_t port_id) 1519 { 1520 if (port_id_is_invalid(port_id)) 1521 return -1; 1522 1523 if (ports[port_id].port_status != RTE_PORT_STARTED) 1524 return 0; 1525 1526 return 1; 1527 } 1528 1529 void 1530 pmd_test_exit(void) 1531 { 1532 portid_t pt_id; 1533 1534 for (pt_id = 0; pt_id < nb_ports; pt_id++) { 1535 printf("Stopping port %d...", pt_id); 1536 fflush(stdout); 1537 rte_eth_dev_close(pt_id); 1538 printf("done\n"); 1539 } 1540 printf("bye...\n"); 1541 } 1542 1543 typedef void (*cmd_func_t)(void); 1544 struct pmd_test_command { 1545 const char *cmd_name; 1546 cmd_func_t cmd_func; 1547 }; 1548 1549 #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0])) 1550 1551 /* Check the link status of all ports in up to 9s, and print them finally */ 1552 static void 1553 check_all_ports_link_status(uint8_t port_num, uint32_t port_mask) 1554 { 1555 #define CHECK_INTERVAL 100 /* 100ms */ 1556 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */ 1557 uint8_t portid, count, all_ports_up, print_flag = 0; 1558 struct rte_eth_link link; 1559 1560 printf("Checking link statuses...\n"); 1561 fflush(stdout); 1562 for (count = 0; count <= MAX_CHECK_TIME; count++) { 1563 all_ports_up = 1; 1564 for (portid = 0; portid < port_num; portid++) { 1565 if ((port_mask & (1 << portid)) == 0) 1566 continue; 1567 memset(&link, 0, sizeof(link)); 1568 rte_eth_link_get_nowait(portid, &link); 1569 /* print link status if flag set */ 1570 if (print_flag == 1) { 1571 if (link.link_status) 1572 printf("Port %d Link Up - speed %u " 1573 "Mbps - %s\n", (uint8_t)portid, 1574 (unsigned)link.link_speed, 1575 (link.link_duplex == ETH_LINK_FULL_DUPLEX) ? 1576 ("full-duplex") : ("half-duplex\n")); 1577 else 1578 printf("Port %d Link Down\n", 1579 (uint8_t)portid); 1580 continue; 1581 } 1582 /* clear all_ports_up flag if any link down */ 1583 if (link.link_status == 0) { 1584 all_ports_up = 0; 1585 break; 1586 } 1587 } 1588 /* after finally printing all link status, get out */ 1589 if (print_flag == 1) 1590 break; 1591 1592 if (all_ports_up == 0) { 1593 fflush(stdout); 1594 rte_delay_ms(CHECK_INTERVAL); 1595 } 1596 1597 /* set the print_flag if all ports up or timeout */ 1598 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) { 1599 print_flag = 1; 1600 } 1601 } 1602 } 1603 1604 static int 1605 set_tx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port) 1606 { 1607 uint16_t i; 1608 int diag; 1609 uint8_t mapping_found = 0; 1610 1611 for (i = 0; i < nb_tx_queue_stats_mappings; i++) { 1612 if ((tx_queue_stats_mappings[i].port_id == port_id) && 1613 (tx_queue_stats_mappings[i].queue_id < nb_txq )) { 1614 diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id, 1615 tx_queue_stats_mappings[i].queue_id, 1616 tx_queue_stats_mappings[i].stats_counter_id); 1617 if (diag != 0) 1618 return diag; 1619 mapping_found = 1; 1620 } 1621 } 1622 if (mapping_found) 1623 port->tx_queue_stats_mapping_enabled = 1; 1624 return 0; 1625 } 1626 1627 static int 1628 set_rx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port) 1629 { 1630 uint16_t i; 1631 int diag; 1632 uint8_t mapping_found = 0; 1633 1634 for (i = 0; i < nb_rx_queue_stats_mappings; i++) { 1635 if ((rx_queue_stats_mappings[i].port_id == port_id) && 1636 (rx_queue_stats_mappings[i].queue_id < nb_rxq )) { 1637 diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id, 1638 rx_queue_stats_mappings[i].queue_id, 1639 rx_queue_stats_mappings[i].stats_counter_id); 1640 if (diag != 0) 1641 return diag; 1642 mapping_found = 1; 1643 } 1644 } 1645 if (mapping_found) 1646 port->rx_queue_stats_mapping_enabled = 1; 1647 return 0; 1648 } 1649 1650 static void 1651 map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port) 1652 { 1653 int diag = 0; 1654 1655 diag = set_tx_queue_stats_mapping_registers(pi, port); 1656 if (diag != 0) { 1657 if (diag == -ENOTSUP) { 1658 port->tx_queue_stats_mapping_enabled = 0; 1659 printf("TX queue stats mapping not supported port id=%d\n", pi); 1660 } 1661 else 1662 rte_exit(EXIT_FAILURE, 1663 "set_tx_queue_stats_mapping_registers " 1664 "failed for port id=%d diag=%d\n", 1665 pi, diag); 1666 } 1667 1668 diag = set_rx_queue_stats_mapping_registers(pi, port); 1669 if (diag != 0) { 1670 if (diag == -ENOTSUP) { 1671 port->rx_queue_stats_mapping_enabled = 0; 1672 printf("RX queue stats mapping not supported port id=%d\n", pi); 1673 } 1674 else 1675 rte_exit(EXIT_FAILURE, 1676 "set_rx_queue_stats_mapping_registers " 1677 "failed for port id=%d diag=%d\n", 1678 pi, diag); 1679 } 1680 } 1681 1682 void 1683 init_port_config(void) 1684 { 1685 portid_t pid; 1686 struct rte_port *port; 1687 1688 for (pid = 0; pid < nb_ports; pid++) { 1689 port = &ports[pid]; 1690 port->dev_conf.rxmode = rx_mode; 1691 port->dev_conf.fdir_conf = fdir_conf; 1692 if (nb_rxq > 1) { 1693 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL; 1694 port->dev_conf.rx_adv_conf.rss_conf.rss_hf = rss_hf; 1695 } else { 1696 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL; 1697 port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0; 1698 } 1699 1700 /* In SR-IOV mode, RSS mode is not available */ 1701 if (port->dcb_flag == 0 && port->dev_info.max_vfs == 0) { 1702 if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0) 1703 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_RSS; 1704 else 1705 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE; 1706 } 1707 1708 port->rx_conf.rx_thresh = rx_thresh; 1709 port->rx_conf.rx_free_thresh = rx_free_thresh; 1710 port->rx_conf.rx_drop_en = rx_drop_en; 1711 port->tx_conf.tx_thresh = tx_thresh; 1712 port->tx_conf.tx_rs_thresh = tx_rs_thresh; 1713 port->tx_conf.tx_free_thresh = tx_free_thresh; 1714 port->tx_conf.txq_flags = txq_flags; 1715 1716 rte_eth_macaddr_get(pid, &port->eth_addr); 1717 1718 map_port_queue_stats_mapping_registers(pid, port); 1719 #ifdef RTE_NIC_BYPASS 1720 rte_eth_dev_bypass_init(pid); 1721 #endif 1722 } 1723 } 1724 1725 const uint16_t vlan_tags[] = { 1726 0, 1, 2, 3, 4, 5, 6, 7, 1727 8, 9, 10, 11, 12, 13, 14, 15, 1728 16, 17, 18, 19, 20, 21, 22, 23, 1729 24, 25, 26, 27, 28, 29, 30, 31 1730 }; 1731 1732 static int 1733 get_eth_dcb_conf(struct rte_eth_conf *eth_conf, struct dcb_config *dcb_conf) 1734 { 1735 uint8_t i; 1736 1737 /* 1738 * Builds up the correct configuration for dcb+vt based on the vlan tags array 1739 * given above, and the number of traffic classes available for use. 1740 */ 1741 if (dcb_conf->dcb_mode == DCB_VT_ENABLED) { 1742 struct rte_eth_vmdq_dcb_conf vmdq_rx_conf; 1743 struct rte_eth_vmdq_dcb_tx_conf vmdq_tx_conf; 1744 1745 /* VMDQ+DCB RX and TX configrations */ 1746 vmdq_rx_conf.enable_default_pool = 0; 1747 vmdq_rx_conf.default_pool = 0; 1748 vmdq_rx_conf.nb_queue_pools = 1749 (dcb_conf->num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS); 1750 vmdq_tx_conf.nb_queue_pools = 1751 (dcb_conf->num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS); 1752 1753 vmdq_rx_conf.nb_pool_maps = sizeof( vlan_tags )/sizeof( vlan_tags[ 0 ]); 1754 for (i = 0; i < vmdq_rx_conf.nb_pool_maps; i++) { 1755 vmdq_rx_conf.pool_map[i].vlan_id = vlan_tags[ i ]; 1756 vmdq_rx_conf.pool_map[i].pools = 1 << (i % vmdq_rx_conf.nb_queue_pools); 1757 } 1758 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) { 1759 vmdq_rx_conf.dcb_queue[i] = i; 1760 vmdq_tx_conf.dcb_queue[i] = i; 1761 } 1762 1763 /*set DCB mode of RX and TX of multiple queues*/ 1764 eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB; 1765 eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB; 1766 if (dcb_conf->pfc_en) 1767 eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT|ETH_DCB_PFC_SUPPORT; 1768 else 1769 eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT; 1770 1771 (void)(rte_memcpy(ð_conf->rx_adv_conf.vmdq_dcb_conf, &vmdq_rx_conf, 1772 sizeof(struct rte_eth_vmdq_dcb_conf))); 1773 (void)(rte_memcpy(ð_conf->tx_adv_conf.vmdq_dcb_tx_conf, &vmdq_tx_conf, 1774 sizeof(struct rte_eth_vmdq_dcb_tx_conf))); 1775 } 1776 else { 1777 struct rte_eth_dcb_rx_conf rx_conf; 1778 struct rte_eth_dcb_tx_conf tx_conf; 1779 1780 /* queue mapping configuration of DCB RX and TX */ 1781 if (dcb_conf->num_tcs == ETH_4_TCS) 1782 dcb_q_mapping = DCB_4_TCS_Q_MAPPING; 1783 else 1784 dcb_q_mapping = DCB_8_TCS_Q_MAPPING; 1785 1786 rx_conf.nb_tcs = dcb_conf->num_tcs; 1787 tx_conf.nb_tcs = dcb_conf->num_tcs; 1788 1789 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++){ 1790 rx_conf.dcb_queue[i] = i; 1791 tx_conf.dcb_queue[i] = i; 1792 } 1793 eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB; 1794 eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB; 1795 if (dcb_conf->pfc_en) 1796 eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT|ETH_DCB_PFC_SUPPORT; 1797 else 1798 eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT; 1799 1800 (void)(rte_memcpy(ð_conf->rx_adv_conf.dcb_rx_conf, &rx_conf, 1801 sizeof(struct rte_eth_dcb_rx_conf))); 1802 (void)(rte_memcpy(ð_conf->tx_adv_conf.dcb_tx_conf, &tx_conf, 1803 sizeof(struct rte_eth_dcb_tx_conf))); 1804 } 1805 1806 return 0; 1807 } 1808 1809 int 1810 init_port_dcb_config(portid_t pid,struct dcb_config *dcb_conf) 1811 { 1812 struct rte_eth_conf port_conf; 1813 struct rte_port *rte_port; 1814 int retval; 1815 uint16_t nb_vlan; 1816 uint16_t i; 1817 1818 /* rxq and txq configuration in dcb mode */ 1819 nb_rxq = 128; 1820 nb_txq = 128; 1821 rx_free_thresh = 64; 1822 1823 memset(&port_conf,0,sizeof(struct rte_eth_conf)); 1824 /* Enter DCB configuration status */ 1825 dcb_config = 1; 1826 1827 nb_vlan = sizeof( vlan_tags )/sizeof( vlan_tags[ 0 ]); 1828 /*set configuration of DCB in vt mode and DCB in non-vt mode*/ 1829 retval = get_eth_dcb_conf(&port_conf, dcb_conf); 1830 if (retval < 0) 1831 return retval; 1832 1833 rte_port = &ports[pid]; 1834 memcpy(&rte_port->dev_conf, &port_conf,sizeof(struct rte_eth_conf)); 1835 1836 rte_port->rx_conf.rx_thresh = rx_thresh; 1837 rte_port->rx_conf.rx_free_thresh = rx_free_thresh; 1838 rte_port->tx_conf.tx_thresh = tx_thresh; 1839 rte_port->tx_conf.tx_rs_thresh = tx_rs_thresh; 1840 rte_port->tx_conf.tx_free_thresh = tx_free_thresh; 1841 /* VLAN filter */ 1842 rte_port->dev_conf.rxmode.hw_vlan_filter = 1; 1843 for (i = 0; i < nb_vlan; i++){ 1844 rx_vft_set(pid, vlan_tags[i], 1); 1845 } 1846 1847 rte_eth_macaddr_get(pid, &rte_port->eth_addr); 1848 map_port_queue_stats_mapping_registers(pid, rte_port); 1849 1850 rte_port->dcb_flag = 1; 1851 1852 return 0; 1853 } 1854 1855 #ifdef RTE_EXEC_ENV_BAREMETAL 1856 #define main _main 1857 #endif 1858 1859 int 1860 main(int argc, char** argv) 1861 { 1862 int diag; 1863 uint8_t port_id; 1864 1865 diag = rte_eal_init(argc, argv); 1866 if (diag < 0) 1867 rte_panic("Cannot init EAL\n"); 1868 1869 nb_ports = (portid_t) rte_eth_dev_count(); 1870 if (nb_ports == 0) 1871 rte_exit(EXIT_FAILURE, "No probed ethernet devices - " 1872 "check that " 1873 "CONFIG_RTE_LIBRTE_IGB_PMD=y and that " 1874 "CONFIG_RTE_LIBRTE_EM_PMD=y and that " 1875 "CONFIG_RTE_LIBRTE_IXGBE_PMD=y in your " 1876 "configuration file\n"); 1877 1878 set_def_fwd_config(); 1879 if (nb_lcores == 0) 1880 rte_panic("Empty set of forwarding logical cores - check the " 1881 "core mask supplied in the command parameters\n"); 1882 1883 argc -= diag; 1884 argv += diag; 1885 if (argc > 1) 1886 launch_args_parse(argc, argv); 1887 1888 if (nb_rxq > nb_txq) 1889 printf("Warning: nb_rxq=%d enables RSS configuration, " 1890 "but nb_txq=%d will prevent to fully test it.\n", 1891 nb_rxq, nb_txq); 1892 1893 init_config(); 1894 if (start_port(RTE_PORT_ALL) != 0) 1895 rte_exit(EXIT_FAILURE, "Start ports failed\n"); 1896 1897 /* set all ports to promiscuous mode by default */ 1898 for (port_id = 0; port_id < nb_ports; port_id++) 1899 rte_eth_promiscuous_enable(port_id); 1900 1901 #ifdef RTE_LIBRTE_CMDLINE 1902 if (interactive == 1) { 1903 if (auto_start) { 1904 printf("Start automatic packet forwarding\n"); 1905 start_packet_forwarding(0); 1906 } 1907 prompt(); 1908 } else 1909 #endif 1910 { 1911 char c; 1912 int rc; 1913 1914 printf("No commandline core given, start packet forwarding\n"); 1915 start_packet_forwarding(0); 1916 printf("Press enter to exit\n"); 1917 rc = read(0, &c, 1); 1918 if (rc < 0) 1919 return 1; 1920 } 1921 1922 return 0; 1923 } 1924