1 /*- 2 * BSD LICENSE 3 * 4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of Intel Corporation nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include <stdarg.h> 35 #include <stdio.h> 36 #include <stdlib.h> 37 #include <signal.h> 38 #include <string.h> 39 #include <time.h> 40 #include <fcntl.h> 41 #include <sys/types.h> 42 #include <errno.h> 43 44 #include <sys/queue.h> 45 #include <sys/stat.h> 46 47 #include <stdint.h> 48 #include <unistd.h> 49 #include <inttypes.h> 50 51 #include <rte_common.h> 52 #include <rte_byteorder.h> 53 #include <rte_log.h> 54 #include <rte_debug.h> 55 #include <rte_cycles.h> 56 #include <rte_memory.h> 57 #include <rte_memcpy.h> 58 #include <rte_memzone.h> 59 #include <rte_launch.h> 60 #include <rte_tailq.h> 61 #include <rte_eal.h> 62 #include <rte_per_lcore.h> 63 #include <rte_lcore.h> 64 #include <rte_atomic.h> 65 #include <rte_branch_prediction.h> 66 #include <rte_ring.h> 67 #include <rte_mempool.h> 68 #include <rte_malloc.h> 69 #include <rte_mbuf.h> 70 #include <rte_interrupts.h> 71 #include <rte_pci.h> 72 #include <rte_ether.h> 73 #include <rte_ethdev.h> 74 #include <rte_string_fns.h> 75 #ifdef RTE_LIBRTE_PMD_XENVIRT 76 #include <rte_eth_xenvirt.h> 77 #endif 78 79 #include "testpmd.h" 80 #include "mempool_osdep.h" 81 82 uint16_t verbose_level = 0; /**< Silent by default. */ 83 84 /* use master core for command line ? */ 85 uint8_t interactive = 0; 86 uint8_t auto_start = 0; 87 88 /* 89 * NUMA support configuration. 90 * When set, the NUMA support attempts to dispatch the allocation of the 91 * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the 92 * probed ports among the CPU sockets 0 and 1. 93 * Otherwise, all memory is allocated from CPU socket 0. 94 */ 95 uint8_t numa_support = 0; /**< No numa support by default */ 96 97 /* 98 * In UMA mode,all memory is allocated from socket 0 if --socket-num is 99 * not configured. 100 */ 101 uint8_t socket_num = UMA_NO_CONFIG; 102 103 /* 104 * Use ANONYMOUS mapped memory (might be not physically continuous) for mbufs. 105 */ 106 uint8_t mp_anon = 0; 107 108 /* 109 * Record the Ethernet address of peer target ports to which packets are 110 * forwarded. 111 * Must be instanciated with the ethernet addresses of peer traffic generator 112 * ports. 113 */ 114 struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS]; 115 portid_t nb_peer_eth_addrs = 0; 116 117 /* 118 * Probed Target Environment. 119 */ 120 struct rte_port *ports; /**< For all probed ethernet ports. */ 121 portid_t nb_ports; /**< Number of probed ethernet ports. */ 122 struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */ 123 lcoreid_t nb_lcores; /**< Number of probed logical cores. */ 124 125 /* 126 * Test Forwarding Configuration. 127 * nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores 128 * nb_fwd_ports <= nb_cfg_ports <= nb_ports 129 */ 130 lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */ 131 lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */ 132 portid_t nb_cfg_ports; /**< Number of configured ports. */ 133 portid_t nb_fwd_ports; /**< Number of forwarding ports. */ 134 135 unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */ 136 portid_t fwd_ports_ids[RTE_MAX_ETHPORTS]; /**< Port ids configuration. */ 137 138 struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */ 139 streamid_t nb_fwd_streams; /**< Is equal to (nb_ports * nb_rxq). */ 140 141 /* 142 * Forwarding engines. 143 */ 144 struct fwd_engine * fwd_engines[] = { 145 &io_fwd_engine, 146 &mac_fwd_engine, 147 &mac_retry_fwd_engine, 148 &mac_swap_engine, 149 &flow_gen_engine, 150 &rx_only_engine, 151 &tx_only_engine, 152 &csum_fwd_engine, 153 &icmp_echo_engine, 154 #ifdef RTE_LIBRTE_IEEE1588 155 &ieee1588_fwd_engine, 156 #endif 157 NULL, 158 }; 159 160 struct fwd_config cur_fwd_config; 161 struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */ 162 163 uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */ 164 uint32_t param_total_num_mbufs = 0; /**< number of mbufs in all pools - if 165 * specified on command-line. */ 166 167 /* 168 * Configuration of packet segments used by the "txonly" processing engine. 169 */ 170 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */ 171 uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = { 172 TXONLY_DEF_PACKET_LEN, 173 }; 174 uint8_t tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */ 175 176 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */ 177 uint16_t mb_mempool_cache = DEF_PKT_BURST; /**< Size of mbuf mempool cache. */ 178 179 /* current configuration is in DCB or not,0 means it is not in DCB mode */ 180 uint8_t dcb_config = 0; 181 182 /* Whether the dcb is in testing status */ 183 uint8_t dcb_test = 0; 184 185 /* DCB on and VT on mapping is default */ 186 enum dcb_queue_mapping_mode dcb_q_mapping = DCB_VT_Q_MAPPING; 187 188 /* 189 * Configurable number of RX/TX queues. 190 */ 191 queueid_t nb_rxq = 1; /**< Number of RX queues per port. */ 192 queueid_t nb_txq = 1; /**< Number of TX queues per port. */ 193 194 /* 195 * Configurable number of RX/TX ring descriptors. 196 */ 197 #define RTE_TEST_RX_DESC_DEFAULT 128 198 #define RTE_TEST_TX_DESC_DEFAULT 512 199 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */ 200 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */ 201 202 /* 203 * Configurable values of RX and TX ring threshold registers. 204 */ 205 #define RX_PTHRESH 8 /**< Default value of RX prefetch threshold register. */ 206 #define RX_HTHRESH 8 /**< Default value of RX host threshold register. */ 207 #define RX_WTHRESH 4 /**< Default value of RX write-back threshold register. */ 208 209 #define TX_PTHRESH 36 /**< Default value of TX prefetch threshold register. */ 210 #define TX_HTHRESH 0 /**< Default value of TX host threshold register. */ 211 #define TX_WTHRESH 0 /**< Default value of TX write-back threshold register. */ 212 213 struct rte_eth_thresh rx_thresh = { 214 .pthresh = RX_PTHRESH, 215 .hthresh = RX_HTHRESH, 216 .wthresh = RX_WTHRESH, 217 }; 218 219 struct rte_eth_thresh tx_thresh = { 220 .pthresh = TX_PTHRESH, 221 .hthresh = TX_HTHRESH, 222 .wthresh = TX_WTHRESH, 223 }; 224 225 /* 226 * Configurable value of RX free threshold. 227 */ 228 uint16_t rx_free_thresh = 0; /* Immediately free RX descriptors by default. */ 229 230 /* 231 * Configurable value of RX drop enable. 232 */ 233 uint8_t rx_drop_en = 0; /* Drop packets when no descriptors for queue. */ 234 235 /* 236 * Configurable value of TX free threshold. 237 */ 238 uint16_t tx_free_thresh = 0; /* Use default values. */ 239 240 /* 241 * Configurable value of TX RS bit threshold. 242 */ 243 uint16_t tx_rs_thresh = 0; /* Use default values. */ 244 245 /* 246 * Configurable value of TX queue flags. 247 */ 248 uint32_t txq_flags = 0; /* No flags set. */ 249 250 /* 251 * Receive Side Scaling (RSS) configuration. 252 */ 253 uint16_t rss_hf = ETH_RSS_IPV4 | ETH_RSS_IPV6; /* RSS IP by default. */ 254 255 /* 256 * Port topology configuration 257 */ 258 uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */ 259 260 /* 261 * Avoids to flush all the RX streams before starts forwarding. 262 */ 263 uint8_t no_flush_rx = 0; /* flush by default */ 264 265 /* 266 * Avoids to check link status when starting/stopping a port. 267 */ 268 uint8_t no_link_check = 0; /* check by default */ 269 270 /* 271 * NIC bypass mode configuration options. 272 */ 273 #ifdef RTE_NIC_BYPASS 274 275 /* The NIC bypass watchdog timeout. */ 276 uint32_t bypass_timeout = RTE_BYPASS_TMT_OFF; 277 278 #endif 279 280 /* 281 * Ethernet device configuration. 282 */ 283 struct rte_eth_rxmode rx_mode = { 284 .max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */ 285 .split_hdr_size = 0, 286 .header_split = 0, /**< Header Split disabled. */ 287 .hw_ip_checksum = 0, /**< IP checksum offload disabled. */ 288 .hw_vlan_filter = 1, /**< VLAN filtering enabled. */ 289 .hw_vlan_strip = 1, /**< VLAN strip enabled. */ 290 .hw_vlan_extend = 0, /**< Extended VLAN disabled. */ 291 .jumbo_frame = 0, /**< Jumbo Frame Support disabled. */ 292 .hw_strip_crc = 0, /**< CRC stripping by hardware disabled. */ 293 }; 294 295 struct rte_fdir_conf fdir_conf = { 296 .mode = RTE_FDIR_MODE_NONE, 297 .pballoc = RTE_FDIR_PBALLOC_64K, 298 .status = RTE_FDIR_REPORT_STATUS, 299 .flexbytes_offset = 0x6, 300 .drop_queue = 127, 301 }; 302 303 static volatile int test_done = 1; /* stop packet forwarding when set to 1. */ 304 305 struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS]; 306 struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS]; 307 308 struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array; 309 struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array; 310 311 uint16_t nb_tx_queue_stats_mappings = 0; 312 uint16_t nb_rx_queue_stats_mappings = 0; 313 314 /* Forward function declarations */ 315 static void map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port); 316 static void check_all_ports_link_status(uint8_t port_num, uint32_t port_mask); 317 318 /* 319 * Check if all the ports are started. 320 * If yes, return positive value. If not, return zero. 321 */ 322 static int all_ports_started(void); 323 324 /* 325 * Setup default configuration. 326 */ 327 static void 328 set_default_fwd_lcores_config(void) 329 { 330 unsigned int i; 331 unsigned int nb_lc; 332 333 nb_lc = 0; 334 for (i = 0; i < RTE_MAX_LCORE; i++) { 335 if (! rte_lcore_is_enabled(i)) 336 continue; 337 if (i == rte_get_master_lcore()) 338 continue; 339 fwd_lcores_cpuids[nb_lc++] = i; 340 } 341 nb_lcores = (lcoreid_t) nb_lc; 342 nb_cfg_lcores = nb_lcores; 343 nb_fwd_lcores = 1; 344 } 345 346 static void 347 set_def_peer_eth_addrs(void) 348 { 349 portid_t i; 350 351 for (i = 0; i < RTE_MAX_ETHPORTS; i++) { 352 peer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR; 353 peer_eth_addrs[i].addr_bytes[5] = i; 354 } 355 } 356 357 static void 358 set_default_fwd_ports_config(void) 359 { 360 portid_t pt_id; 361 362 for (pt_id = 0; pt_id < nb_ports; pt_id++) 363 fwd_ports_ids[pt_id] = pt_id; 364 365 nb_cfg_ports = nb_ports; 366 nb_fwd_ports = nb_ports; 367 } 368 369 void 370 set_def_fwd_config(void) 371 { 372 set_default_fwd_lcores_config(); 373 set_def_peer_eth_addrs(); 374 set_default_fwd_ports_config(); 375 } 376 377 /* 378 * Configuration initialisation done once at init time. 379 */ 380 struct mbuf_ctor_arg { 381 uint16_t seg_buf_offset; /**< offset of data in data segment of mbuf. */ 382 uint16_t seg_buf_size; /**< size of data segment in mbuf. */ 383 }; 384 385 struct mbuf_pool_ctor_arg { 386 uint16_t seg_buf_size; /**< size of data segment in mbuf. */ 387 }; 388 389 static void 390 testpmd_mbuf_ctor(struct rte_mempool *mp, 391 void *opaque_arg, 392 void *raw_mbuf, 393 __attribute__((unused)) unsigned i) 394 { 395 struct mbuf_ctor_arg *mb_ctor_arg; 396 struct rte_mbuf *mb; 397 398 mb_ctor_arg = (struct mbuf_ctor_arg *) opaque_arg; 399 mb = (struct rte_mbuf *) raw_mbuf; 400 401 mb->type = RTE_MBUF_PKT; 402 mb->pool = mp; 403 mb->buf_addr = (void *) ((char *)mb + mb_ctor_arg->seg_buf_offset); 404 mb->buf_physaddr = (uint64_t) (rte_mempool_virt2phy(mp, mb) + 405 mb_ctor_arg->seg_buf_offset); 406 mb->buf_len = mb_ctor_arg->seg_buf_size; 407 mb->type = RTE_MBUF_PKT; 408 mb->ol_flags = 0; 409 mb->pkt.data = (char *) mb->buf_addr + RTE_PKTMBUF_HEADROOM; 410 mb->pkt.nb_segs = 1; 411 mb->pkt.vlan_macip.data = 0; 412 mb->pkt.hash.rss = 0; 413 } 414 415 static void 416 testpmd_mbuf_pool_ctor(struct rte_mempool *mp, 417 void *opaque_arg) 418 { 419 struct mbuf_pool_ctor_arg *mbp_ctor_arg; 420 struct rte_pktmbuf_pool_private *mbp_priv; 421 422 if (mp->private_data_size < sizeof(struct rte_pktmbuf_pool_private)) { 423 printf("%s(%s) private_data_size %d < %d\n", 424 __func__, mp->name, (int) mp->private_data_size, 425 (int) sizeof(struct rte_pktmbuf_pool_private)); 426 return; 427 } 428 mbp_ctor_arg = (struct mbuf_pool_ctor_arg *) opaque_arg; 429 mbp_priv = rte_mempool_get_priv(mp); 430 mbp_priv->mbuf_data_room_size = mbp_ctor_arg->seg_buf_size; 431 } 432 433 static void 434 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf, 435 unsigned int socket_id) 436 { 437 char pool_name[RTE_MEMPOOL_NAMESIZE]; 438 struct rte_mempool *rte_mp; 439 struct mbuf_pool_ctor_arg mbp_ctor_arg; 440 struct mbuf_ctor_arg mb_ctor_arg; 441 uint32_t mb_size; 442 443 mbp_ctor_arg.seg_buf_size = (uint16_t) (RTE_PKTMBUF_HEADROOM + 444 mbuf_seg_size); 445 mb_ctor_arg.seg_buf_offset = 446 (uint16_t) CACHE_LINE_ROUNDUP(sizeof(struct rte_mbuf)); 447 mb_ctor_arg.seg_buf_size = mbp_ctor_arg.seg_buf_size; 448 mb_size = mb_ctor_arg.seg_buf_offset + mb_ctor_arg.seg_buf_size; 449 mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name)); 450 451 #ifdef RTE_LIBRTE_PMD_XENVIRT 452 rte_mp = rte_mempool_gntalloc_create(pool_name, nb_mbuf, mb_size, 453 (unsigned) mb_mempool_cache, 454 sizeof(struct rte_pktmbuf_pool_private), 455 testpmd_mbuf_pool_ctor, &mbp_ctor_arg, 456 testpmd_mbuf_ctor, &mb_ctor_arg, 457 socket_id, 0); 458 459 460 461 #else 462 if (mp_anon != 0) 463 rte_mp = mempool_anon_create(pool_name, nb_mbuf, mb_size, 464 (unsigned) mb_mempool_cache, 465 sizeof(struct rte_pktmbuf_pool_private), 466 testpmd_mbuf_pool_ctor, &mbp_ctor_arg, 467 testpmd_mbuf_ctor, &mb_ctor_arg, 468 socket_id, 0); 469 else 470 rte_mp = rte_mempool_create(pool_name, nb_mbuf, mb_size, 471 (unsigned) mb_mempool_cache, 472 sizeof(struct rte_pktmbuf_pool_private), 473 testpmd_mbuf_pool_ctor, &mbp_ctor_arg, 474 testpmd_mbuf_ctor, &mb_ctor_arg, 475 socket_id, 0); 476 477 #endif 478 479 if (rte_mp == NULL) { 480 rte_exit(EXIT_FAILURE, "Creation of mbuf pool for socket %u " 481 "failed\n", socket_id); 482 } else if (verbose_level > 0) { 483 rte_mempool_dump(stdout, rte_mp); 484 } 485 } 486 487 /* 488 * Check given socket id is valid or not with NUMA mode, 489 * if valid, return 0, else return -1 490 */ 491 static int 492 check_socket_id(const unsigned int socket_id) 493 { 494 static int warning_once = 0; 495 496 if (socket_id >= MAX_SOCKET) { 497 if (!warning_once && numa_support) 498 printf("Warning: NUMA should be configured manually by" 499 " using --port-numa-config and" 500 " --ring-numa-config parameters along with" 501 " --numa.\n"); 502 warning_once = 1; 503 return -1; 504 } 505 return 0; 506 } 507 508 static void 509 init_config(void) 510 { 511 portid_t pid; 512 struct rte_port *port; 513 struct rte_mempool *mbp; 514 unsigned int nb_mbuf_per_pool; 515 lcoreid_t lc_id; 516 uint8_t port_per_socket[MAX_SOCKET]; 517 518 memset(port_per_socket,0,MAX_SOCKET); 519 /* Configuration of logical cores. */ 520 fwd_lcores = rte_zmalloc("testpmd: fwd_lcores", 521 sizeof(struct fwd_lcore *) * nb_lcores, 522 CACHE_LINE_SIZE); 523 if (fwd_lcores == NULL) { 524 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) " 525 "failed\n", nb_lcores); 526 } 527 for (lc_id = 0; lc_id < nb_lcores; lc_id++) { 528 fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore", 529 sizeof(struct fwd_lcore), 530 CACHE_LINE_SIZE); 531 if (fwd_lcores[lc_id] == NULL) { 532 rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) " 533 "failed\n"); 534 } 535 fwd_lcores[lc_id]->cpuid_idx = lc_id; 536 } 537 538 /* 539 * Create pools of mbuf. 540 * If NUMA support is disabled, create a single pool of mbuf in 541 * socket 0 memory by default. 542 * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1. 543 * 544 * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and 545 * nb_txd can be configured at run time. 546 */ 547 if (param_total_num_mbufs) 548 nb_mbuf_per_pool = param_total_num_mbufs; 549 else { 550 nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX + (nb_lcores * mb_mempool_cache) 551 + RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST; 552 553 if (!numa_support) 554 nb_mbuf_per_pool = (nb_mbuf_per_pool * nb_ports); 555 } 556 557 if (!numa_support) { 558 if (socket_num == UMA_NO_CONFIG) 559 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0); 560 else 561 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 562 socket_num); 563 } 564 565 /* Configuration of Ethernet ports. */ 566 ports = rte_zmalloc("testpmd: ports", 567 sizeof(struct rte_port) * nb_ports, 568 CACHE_LINE_SIZE); 569 if (ports == NULL) { 570 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d struct rte_port) " 571 "failed\n", nb_ports); 572 } 573 574 for (pid = 0; pid < nb_ports; pid++) { 575 port = &ports[pid]; 576 rte_eth_dev_info_get(pid, &port->dev_info); 577 578 if (numa_support) { 579 if (port_numa[pid] != NUMA_NO_CONFIG) 580 port_per_socket[port_numa[pid]]++; 581 else { 582 uint32_t socket_id = rte_eth_dev_socket_id(pid); 583 584 /* if socket_id is invalid, set to 0 */ 585 if (check_socket_id(socket_id) < 0) 586 socket_id = 0; 587 port_per_socket[socket_id]++; 588 } 589 } 590 591 /* set flag to initialize port/queue */ 592 port->need_reconfig = 1; 593 port->need_reconfig_queues = 1; 594 } 595 596 if (numa_support) { 597 uint8_t i; 598 unsigned int nb_mbuf; 599 600 if (param_total_num_mbufs) 601 nb_mbuf_per_pool = nb_mbuf_per_pool/nb_ports; 602 603 for (i = 0; i < MAX_SOCKET; i++) { 604 nb_mbuf = (nb_mbuf_per_pool * 605 port_per_socket[i]); 606 if (nb_mbuf) 607 mbuf_pool_create(mbuf_data_size, 608 nb_mbuf,i); 609 } 610 } 611 init_port_config(); 612 613 /* 614 * Records which Mbuf pool to use by each logical core, if needed. 615 */ 616 for (lc_id = 0; lc_id < nb_lcores; lc_id++) { 617 mbp = mbuf_pool_find(rte_lcore_to_socket_id(lc_id)); 618 if (mbp == NULL) 619 mbp = mbuf_pool_find(0); 620 fwd_lcores[lc_id]->mbp = mbp; 621 } 622 623 /* Configuration of packet forwarding streams. */ 624 if (init_fwd_streams() < 0) 625 rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n"); 626 } 627 628 int 629 init_fwd_streams(void) 630 { 631 portid_t pid; 632 struct rte_port *port; 633 streamid_t sm_id, nb_fwd_streams_new; 634 635 /* set socket id according to numa or not */ 636 for (pid = 0; pid < nb_ports; pid++) { 637 port = &ports[pid]; 638 if (nb_rxq > port->dev_info.max_rx_queues) { 639 printf("Fail: nb_rxq(%d) is greater than " 640 "max_rx_queues(%d)\n", nb_rxq, 641 port->dev_info.max_rx_queues); 642 return -1; 643 } 644 if (nb_txq > port->dev_info.max_tx_queues) { 645 printf("Fail: nb_txq(%d) is greater than " 646 "max_tx_queues(%d)\n", nb_txq, 647 port->dev_info.max_tx_queues); 648 return -1; 649 } 650 if (numa_support) { 651 if (port_numa[pid] != NUMA_NO_CONFIG) 652 port->socket_id = port_numa[pid]; 653 else { 654 port->socket_id = rte_eth_dev_socket_id(pid); 655 656 /* if socket_id is invalid, set to 0 */ 657 if (check_socket_id(port->socket_id) < 0) 658 port->socket_id = 0; 659 } 660 } 661 else { 662 if (socket_num == UMA_NO_CONFIG) 663 port->socket_id = 0; 664 else 665 port->socket_id = socket_num; 666 } 667 } 668 669 nb_fwd_streams_new = (streamid_t)(nb_ports * nb_rxq); 670 if (nb_fwd_streams_new == nb_fwd_streams) 671 return 0; 672 /* clear the old */ 673 if (fwd_streams != NULL) { 674 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) { 675 if (fwd_streams[sm_id] == NULL) 676 continue; 677 rte_free(fwd_streams[sm_id]); 678 fwd_streams[sm_id] = NULL; 679 } 680 rte_free(fwd_streams); 681 fwd_streams = NULL; 682 } 683 684 /* init new */ 685 nb_fwd_streams = nb_fwd_streams_new; 686 fwd_streams = rte_zmalloc("testpmd: fwd_streams", 687 sizeof(struct fwd_stream *) * nb_fwd_streams, CACHE_LINE_SIZE); 688 if (fwd_streams == NULL) 689 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_stream *)) " 690 "failed\n", nb_fwd_streams); 691 692 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) { 693 fwd_streams[sm_id] = rte_zmalloc("testpmd: struct fwd_stream", 694 sizeof(struct fwd_stream), CACHE_LINE_SIZE); 695 if (fwd_streams[sm_id] == NULL) 696 rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_stream)" 697 " failed\n"); 698 } 699 700 return 0; 701 } 702 703 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS 704 static void 705 pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs) 706 { 707 unsigned int total_burst; 708 unsigned int nb_burst; 709 unsigned int burst_stats[3]; 710 uint16_t pktnb_stats[3]; 711 uint16_t nb_pkt; 712 int burst_percent[3]; 713 714 /* 715 * First compute the total number of packet bursts and the 716 * two highest numbers of bursts of the same number of packets. 717 */ 718 total_burst = 0; 719 burst_stats[0] = burst_stats[1] = burst_stats[2] = 0; 720 pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0; 721 for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) { 722 nb_burst = pbs->pkt_burst_spread[nb_pkt]; 723 if (nb_burst == 0) 724 continue; 725 total_burst += nb_burst; 726 if (nb_burst > burst_stats[0]) { 727 burst_stats[1] = burst_stats[0]; 728 pktnb_stats[1] = pktnb_stats[0]; 729 burst_stats[0] = nb_burst; 730 pktnb_stats[0] = nb_pkt; 731 } 732 } 733 if (total_burst == 0) 734 return; 735 burst_percent[0] = (burst_stats[0] * 100) / total_burst; 736 printf(" %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst, 737 burst_percent[0], (int) pktnb_stats[0]); 738 if (burst_stats[0] == total_burst) { 739 printf("]\n"); 740 return; 741 } 742 if (burst_stats[0] + burst_stats[1] == total_burst) { 743 printf(" + %d%% of %d pkts]\n", 744 100 - burst_percent[0], pktnb_stats[1]); 745 return; 746 } 747 burst_percent[1] = (burst_stats[1] * 100) / total_burst; 748 burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]); 749 if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) { 750 printf(" + %d%% of others]\n", 100 - burst_percent[0]); 751 return; 752 } 753 printf(" + %d%% of %d pkts + %d%% of others]\n", 754 burst_percent[1], (int) pktnb_stats[1], burst_percent[2]); 755 } 756 #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */ 757 758 static void 759 fwd_port_stats_display(portid_t port_id, struct rte_eth_stats *stats) 760 { 761 struct rte_port *port; 762 uint8_t i; 763 764 static const char *fwd_stats_border = "----------------------"; 765 766 port = &ports[port_id]; 767 printf("\n %s Forward statistics for port %-2d %s\n", 768 fwd_stats_border, port_id, fwd_stats_border); 769 770 if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) { 771 printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: " 772 "%-"PRIu64"\n", 773 stats->ipackets, stats->ierrors, 774 (uint64_t) (stats->ipackets + stats->ierrors)); 775 776 if (cur_fwd_eng == &csum_fwd_engine) 777 printf(" Bad-ipcsum: %-14"PRIu64" Bad-l4csum: %-14"PRIu64" \n", 778 port->rx_bad_ip_csum, port->rx_bad_l4_csum); 779 780 printf(" TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: " 781 "%-"PRIu64"\n", 782 stats->opackets, port->tx_dropped, 783 (uint64_t) (stats->opackets + port->tx_dropped)); 784 785 if (stats->rx_nombuf > 0) 786 printf(" RX-nombufs: %-14"PRIu64"\n", stats->rx_nombuf); 787 788 } 789 else { 790 printf(" RX-packets: %14"PRIu64" RX-dropped:%14"PRIu64" RX-total:" 791 "%14"PRIu64"\n", 792 stats->ipackets, stats->ierrors, 793 (uint64_t) (stats->ipackets + stats->ierrors)); 794 795 if (cur_fwd_eng == &csum_fwd_engine) 796 printf(" Bad-ipcsum:%14"PRIu64" Bad-l4csum:%14"PRIu64"\n", 797 port->rx_bad_ip_csum, port->rx_bad_l4_csum); 798 799 printf(" TX-packets: %14"PRIu64" TX-dropped:%14"PRIu64" TX-total:" 800 "%14"PRIu64"\n", 801 stats->opackets, port->tx_dropped, 802 (uint64_t) (stats->opackets + port->tx_dropped)); 803 804 if (stats->rx_nombuf > 0) 805 printf(" RX-nombufs:%14"PRIu64"\n", stats->rx_nombuf); 806 } 807 808 /* Display statistics of XON/XOFF pause frames, if any. */ 809 if ((stats->tx_pause_xon | stats->rx_pause_xon | 810 stats->tx_pause_xoff | stats->rx_pause_xoff) > 0) { 811 printf(" RX-XOFF: %-14"PRIu64" RX-XON: %-14"PRIu64"\n", 812 stats->rx_pause_xoff, stats->rx_pause_xon); 813 printf(" TX-XOFF: %-14"PRIu64" TX-XON: %-14"PRIu64"\n", 814 stats->tx_pause_xoff, stats->tx_pause_xon); 815 } 816 817 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS 818 if (port->rx_stream) 819 pkt_burst_stats_display("RX", 820 &port->rx_stream->rx_burst_stats); 821 if (port->tx_stream) 822 pkt_burst_stats_display("TX", 823 &port->tx_stream->tx_burst_stats); 824 #endif 825 /* stats fdir */ 826 if (fdir_conf.mode != RTE_FDIR_MODE_NONE) 827 printf(" Fdirmiss:%14"PRIu64" Fdirmatch:%14"PRIu64"\n", 828 stats->fdirmiss, 829 stats->fdirmatch); 830 831 if (port->rx_queue_stats_mapping_enabled) { 832 printf("\n"); 833 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) { 834 printf(" Stats reg %2d RX-packets:%14"PRIu64 835 " RX-errors:%14"PRIu64 836 " RX-bytes:%14"PRIu64"\n", 837 i, stats->q_ipackets[i], stats->q_errors[i], stats->q_ibytes[i]); 838 } 839 printf("\n"); 840 } 841 if (port->tx_queue_stats_mapping_enabled) { 842 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) { 843 printf(" Stats reg %2d TX-packets:%14"PRIu64 844 " TX-bytes:%14"PRIu64"\n", 845 i, stats->q_opackets[i], stats->q_obytes[i]); 846 } 847 } 848 849 printf(" %s--------------------------------%s\n", 850 fwd_stats_border, fwd_stats_border); 851 } 852 853 static void 854 fwd_stream_stats_display(streamid_t stream_id) 855 { 856 struct fwd_stream *fs; 857 static const char *fwd_top_stats_border = "-------"; 858 859 fs = fwd_streams[stream_id]; 860 if ((fs->rx_packets == 0) && (fs->tx_packets == 0) && 861 (fs->fwd_dropped == 0)) 862 return; 863 printf("\n %s Forward Stats for RX Port=%2d/Queue=%2d -> " 864 "TX Port=%2d/Queue=%2d %s\n", 865 fwd_top_stats_border, fs->rx_port, fs->rx_queue, 866 fs->tx_port, fs->tx_queue, fwd_top_stats_border); 867 printf(" RX-packets: %-14u TX-packets: %-14u TX-dropped: %-14u", 868 fs->rx_packets, fs->tx_packets, fs->fwd_dropped); 869 870 /* if checksum mode */ 871 if (cur_fwd_eng == &csum_fwd_engine) { 872 printf(" RX- bad IP checksum: %-14u Rx- bad L4 checksum: " 873 "%-14u\n", fs->rx_bad_ip_csum, fs->rx_bad_l4_csum); 874 } 875 876 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS 877 pkt_burst_stats_display("RX", &fs->rx_burst_stats); 878 pkt_burst_stats_display("TX", &fs->tx_burst_stats); 879 #endif 880 } 881 882 static void 883 flush_fwd_rx_queues(void) 884 { 885 struct rte_mbuf *pkts_burst[MAX_PKT_BURST]; 886 portid_t rxp; 887 portid_t port_id; 888 queueid_t rxq; 889 uint16_t nb_rx; 890 uint16_t i; 891 uint8_t j; 892 893 for (j = 0; j < 2; j++) { 894 for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) { 895 for (rxq = 0; rxq < nb_rxq; rxq++) { 896 port_id = fwd_ports_ids[rxp]; 897 do { 898 nb_rx = rte_eth_rx_burst(port_id, rxq, 899 pkts_burst, MAX_PKT_BURST); 900 for (i = 0; i < nb_rx; i++) 901 rte_pktmbuf_free(pkts_burst[i]); 902 } while (nb_rx > 0); 903 } 904 } 905 rte_delay_ms(10); /* wait 10 milli-seconds before retrying */ 906 } 907 } 908 909 static void 910 run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd) 911 { 912 struct fwd_stream **fsm; 913 streamid_t nb_fs; 914 streamid_t sm_id; 915 916 fsm = &fwd_streams[fc->stream_idx]; 917 nb_fs = fc->stream_nb; 918 do { 919 for (sm_id = 0; sm_id < nb_fs; sm_id++) 920 (*pkt_fwd)(fsm[sm_id]); 921 } while (! fc->stopped); 922 } 923 924 static int 925 start_pkt_forward_on_core(void *fwd_arg) 926 { 927 run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg, 928 cur_fwd_config.fwd_eng->packet_fwd); 929 return 0; 930 } 931 932 /* 933 * Run the TXONLY packet forwarding engine to send a single burst of packets. 934 * Used to start communication flows in network loopback test configurations. 935 */ 936 static int 937 run_one_txonly_burst_on_core(void *fwd_arg) 938 { 939 struct fwd_lcore *fwd_lc; 940 struct fwd_lcore tmp_lcore; 941 942 fwd_lc = (struct fwd_lcore *) fwd_arg; 943 tmp_lcore = *fwd_lc; 944 tmp_lcore.stopped = 1; 945 run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd); 946 return 0; 947 } 948 949 /* 950 * Launch packet forwarding: 951 * - Setup per-port forwarding context. 952 * - launch logical cores with their forwarding configuration. 953 */ 954 static void 955 launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore) 956 { 957 port_fwd_begin_t port_fwd_begin; 958 unsigned int i; 959 unsigned int lc_id; 960 int diag; 961 962 port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin; 963 if (port_fwd_begin != NULL) { 964 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) 965 (*port_fwd_begin)(fwd_ports_ids[i]); 966 } 967 for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) { 968 lc_id = fwd_lcores_cpuids[i]; 969 if ((interactive == 0) || (lc_id != rte_lcore_id())) { 970 fwd_lcores[i]->stopped = 0; 971 diag = rte_eal_remote_launch(pkt_fwd_on_lcore, 972 fwd_lcores[i], lc_id); 973 if (diag != 0) 974 printf("launch lcore %u failed - diag=%d\n", 975 lc_id, diag); 976 } 977 } 978 } 979 980 /* 981 * Launch packet forwarding configuration. 982 */ 983 void 984 start_packet_forwarding(int with_tx_first) 985 { 986 port_fwd_begin_t port_fwd_begin; 987 port_fwd_end_t port_fwd_end; 988 struct rte_port *port; 989 unsigned int i; 990 portid_t pt_id; 991 streamid_t sm_id; 992 993 if (all_ports_started() == 0) { 994 printf("Not all ports were started\n"); 995 return; 996 } 997 if (test_done == 0) { 998 printf("Packet forwarding already started\n"); 999 return; 1000 } 1001 if(dcb_test) { 1002 for (i = 0; i < nb_fwd_ports; i++) { 1003 pt_id = fwd_ports_ids[i]; 1004 port = &ports[pt_id]; 1005 if (!port->dcb_flag) { 1006 printf("In DCB mode, all forwarding ports must " 1007 "be configured in this mode.\n"); 1008 return; 1009 } 1010 } 1011 if (nb_fwd_lcores == 1) { 1012 printf("In DCB mode,the nb forwarding cores " 1013 "should be larger than 1.\n"); 1014 return; 1015 } 1016 } 1017 test_done = 0; 1018 1019 if(!no_flush_rx) 1020 flush_fwd_rx_queues(); 1021 1022 fwd_config_setup(); 1023 rxtx_config_display(); 1024 1025 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) { 1026 pt_id = fwd_ports_ids[i]; 1027 port = &ports[pt_id]; 1028 rte_eth_stats_get(pt_id, &port->stats); 1029 port->tx_dropped = 0; 1030 1031 map_port_queue_stats_mapping_registers(pt_id, port); 1032 } 1033 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) { 1034 fwd_streams[sm_id]->rx_packets = 0; 1035 fwd_streams[sm_id]->tx_packets = 0; 1036 fwd_streams[sm_id]->fwd_dropped = 0; 1037 fwd_streams[sm_id]->rx_bad_ip_csum = 0; 1038 fwd_streams[sm_id]->rx_bad_l4_csum = 0; 1039 1040 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS 1041 memset(&fwd_streams[sm_id]->rx_burst_stats, 0, 1042 sizeof(fwd_streams[sm_id]->rx_burst_stats)); 1043 memset(&fwd_streams[sm_id]->tx_burst_stats, 0, 1044 sizeof(fwd_streams[sm_id]->tx_burst_stats)); 1045 #endif 1046 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES 1047 fwd_streams[sm_id]->core_cycles = 0; 1048 #endif 1049 } 1050 if (with_tx_first) { 1051 port_fwd_begin = tx_only_engine.port_fwd_begin; 1052 if (port_fwd_begin != NULL) { 1053 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) 1054 (*port_fwd_begin)(fwd_ports_ids[i]); 1055 } 1056 launch_packet_forwarding(run_one_txonly_burst_on_core); 1057 rte_eal_mp_wait_lcore(); 1058 port_fwd_end = tx_only_engine.port_fwd_end; 1059 if (port_fwd_end != NULL) { 1060 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) 1061 (*port_fwd_end)(fwd_ports_ids[i]); 1062 } 1063 } 1064 launch_packet_forwarding(start_pkt_forward_on_core); 1065 } 1066 1067 void 1068 stop_packet_forwarding(void) 1069 { 1070 struct rte_eth_stats stats; 1071 struct rte_port *port; 1072 port_fwd_end_t port_fwd_end; 1073 int i; 1074 portid_t pt_id; 1075 streamid_t sm_id; 1076 lcoreid_t lc_id; 1077 uint64_t total_recv; 1078 uint64_t total_xmit; 1079 uint64_t total_rx_dropped; 1080 uint64_t total_tx_dropped; 1081 uint64_t total_rx_nombuf; 1082 uint64_t tx_dropped; 1083 uint64_t rx_bad_ip_csum; 1084 uint64_t rx_bad_l4_csum; 1085 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES 1086 uint64_t fwd_cycles; 1087 #endif 1088 static const char *acc_stats_border = "+++++++++++++++"; 1089 1090 if (all_ports_started() == 0) { 1091 printf("Not all ports were started\n"); 1092 return; 1093 } 1094 if (test_done) { 1095 printf("Packet forwarding not started\n"); 1096 return; 1097 } 1098 printf("Telling cores to stop..."); 1099 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) 1100 fwd_lcores[lc_id]->stopped = 1; 1101 printf("\nWaiting for lcores to finish...\n"); 1102 rte_eal_mp_wait_lcore(); 1103 port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end; 1104 if (port_fwd_end != NULL) { 1105 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) { 1106 pt_id = fwd_ports_ids[i]; 1107 (*port_fwd_end)(pt_id); 1108 } 1109 } 1110 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES 1111 fwd_cycles = 0; 1112 #endif 1113 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) { 1114 if (cur_fwd_config.nb_fwd_streams > 1115 cur_fwd_config.nb_fwd_ports) { 1116 fwd_stream_stats_display(sm_id); 1117 ports[fwd_streams[sm_id]->tx_port].tx_stream = NULL; 1118 ports[fwd_streams[sm_id]->rx_port].rx_stream = NULL; 1119 } else { 1120 ports[fwd_streams[sm_id]->tx_port].tx_stream = 1121 fwd_streams[sm_id]; 1122 ports[fwd_streams[sm_id]->rx_port].rx_stream = 1123 fwd_streams[sm_id]; 1124 } 1125 tx_dropped = ports[fwd_streams[sm_id]->tx_port].tx_dropped; 1126 tx_dropped = (uint64_t) (tx_dropped + 1127 fwd_streams[sm_id]->fwd_dropped); 1128 ports[fwd_streams[sm_id]->tx_port].tx_dropped = tx_dropped; 1129 1130 rx_bad_ip_csum = 1131 ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum; 1132 rx_bad_ip_csum = (uint64_t) (rx_bad_ip_csum + 1133 fwd_streams[sm_id]->rx_bad_ip_csum); 1134 ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum = 1135 rx_bad_ip_csum; 1136 1137 rx_bad_l4_csum = 1138 ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum; 1139 rx_bad_l4_csum = (uint64_t) (rx_bad_l4_csum + 1140 fwd_streams[sm_id]->rx_bad_l4_csum); 1141 ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum = 1142 rx_bad_l4_csum; 1143 1144 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES 1145 fwd_cycles = (uint64_t) (fwd_cycles + 1146 fwd_streams[sm_id]->core_cycles); 1147 #endif 1148 } 1149 total_recv = 0; 1150 total_xmit = 0; 1151 total_rx_dropped = 0; 1152 total_tx_dropped = 0; 1153 total_rx_nombuf = 0; 1154 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) { 1155 pt_id = fwd_ports_ids[i]; 1156 1157 port = &ports[pt_id]; 1158 rte_eth_stats_get(pt_id, &stats); 1159 stats.ipackets -= port->stats.ipackets; 1160 port->stats.ipackets = 0; 1161 stats.opackets -= port->stats.opackets; 1162 port->stats.opackets = 0; 1163 stats.ibytes -= port->stats.ibytes; 1164 port->stats.ibytes = 0; 1165 stats.obytes -= port->stats.obytes; 1166 port->stats.obytes = 0; 1167 stats.ierrors -= port->stats.ierrors; 1168 port->stats.ierrors = 0; 1169 stats.oerrors -= port->stats.oerrors; 1170 port->stats.oerrors = 0; 1171 stats.rx_nombuf -= port->stats.rx_nombuf; 1172 port->stats.rx_nombuf = 0; 1173 stats.fdirmatch -= port->stats.fdirmatch; 1174 port->stats.rx_nombuf = 0; 1175 stats.fdirmiss -= port->stats.fdirmiss; 1176 port->stats.rx_nombuf = 0; 1177 1178 total_recv += stats.ipackets; 1179 total_xmit += stats.opackets; 1180 total_rx_dropped += stats.ierrors; 1181 total_tx_dropped += port->tx_dropped; 1182 total_rx_nombuf += stats.rx_nombuf; 1183 1184 fwd_port_stats_display(pt_id, &stats); 1185 } 1186 printf("\n %s Accumulated forward statistics for all ports" 1187 "%s\n", 1188 acc_stats_border, acc_stats_border); 1189 printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: " 1190 "%-"PRIu64"\n" 1191 " TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: " 1192 "%-"PRIu64"\n", 1193 total_recv, total_rx_dropped, total_recv + total_rx_dropped, 1194 total_xmit, total_tx_dropped, total_xmit + total_tx_dropped); 1195 if (total_rx_nombuf > 0) 1196 printf(" RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf); 1197 printf(" %s++++++++++++++++++++++++++++++++++++++++++++++" 1198 "%s\n", 1199 acc_stats_border, acc_stats_border); 1200 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES 1201 if (total_recv > 0) 1202 printf("\n CPU cycles/packet=%u (total cycles=" 1203 "%"PRIu64" / total RX packets=%"PRIu64")\n", 1204 (unsigned int)(fwd_cycles / total_recv), 1205 fwd_cycles, total_recv); 1206 #endif 1207 printf("\nDone.\n"); 1208 test_done = 1; 1209 } 1210 1211 void 1212 dev_set_link_up(portid_t pid) 1213 { 1214 if (rte_eth_dev_set_link_up((uint8_t)pid) < 0) 1215 printf("\nSet link up fail.\n"); 1216 } 1217 1218 void 1219 dev_set_link_down(portid_t pid) 1220 { 1221 if (rte_eth_dev_set_link_down((uint8_t)pid) < 0) 1222 printf("\nSet link down fail.\n"); 1223 } 1224 1225 static int 1226 all_ports_started(void) 1227 { 1228 portid_t pi; 1229 struct rte_port *port; 1230 1231 for (pi = 0; pi < nb_ports; pi++) { 1232 port = &ports[pi]; 1233 /* Check if there is a port which is not started */ 1234 if (port->port_status != RTE_PORT_STARTED) 1235 return 0; 1236 } 1237 1238 /* No port is not started */ 1239 return 1; 1240 } 1241 1242 int 1243 start_port(portid_t pid) 1244 { 1245 int diag, need_check_link_status = 0; 1246 portid_t pi; 1247 queueid_t qi; 1248 struct rte_port *port; 1249 uint8_t *mac_addr; 1250 1251 if (test_done == 0) { 1252 printf("Please stop forwarding first\n"); 1253 return -1; 1254 } 1255 1256 if (init_fwd_streams() < 0) { 1257 printf("Fail from init_fwd_streams()\n"); 1258 return -1; 1259 } 1260 1261 if(dcb_config) 1262 dcb_test = 1; 1263 for (pi = 0; pi < nb_ports; pi++) { 1264 if (pid < nb_ports && pid != pi) 1265 continue; 1266 1267 port = &ports[pi]; 1268 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED, 1269 RTE_PORT_HANDLING) == 0) { 1270 printf("Port %d is now not stopped\n", pi); 1271 continue; 1272 } 1273 1274 if (port->need_reconfig > 0) { 1275 port->need_reconfig = 0; 1276 1277 printf("Configuring Port %d (socket %u)\n", pi, 1278 port->socket_id); 1279 /* configure port */ 1280 diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq, 1281 &(port->dev_conf)); 1282 if (diag != 0) { 1283 if (rte_atomic16_cmpset(&(port->port_status), 1284 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0) 1285 printf("Port %d can not be set back " 1286 "to stopped\n", pi); 1287 printf("Fail to configure port %d\n", pi); 1288 /* try to reconfigure port next time */ 1289 port->need_reconfig = 1; 1290 return -1; 1291 } 1292 } 1293 if (port->need_reconfig_queues > 0) { 1294 port->need_reconfig_queues = 0; 1295 /* setup tx queues */ 1296 for (qi = 0; qi < nb_txq; qi++) { 1297 if ((numa_support) && 1298 (txring_numa[pi] != NUMA_NO_CONFIG)) 1299 diag = rte_eth_tx_queue_setup(pi, qi, 1300 nb_txd,txring_numa[pi], 1301 &(port->tx_conf)); 1302 else 1303 diag = rte_eth_tx_queue_setup(pi, qi, 1304 nb_txd,port->socket_id, 1305 &(port->tx_conf)); 1306 1307 if (diag == 0) 1308 continue; 1309 1310 /* Fail to setup tx queue, return */ 1311 if (rte_atomic16_cmpset(&(port->port_status), 1312 RTE_PORT_HANDLING, 1313 RTE_PORT_STOPPED) == 0) 1314 printf("Port %d can not be set back " 1315 "to stopped\n", pi); 1316 printf("Fail to configure port %d tx queues\n", pi); 1317 /* try to reconfigure queues next time */ 1318 port->need_reconfig_queues = 1; 1319 return -1; 1320 } 1321 /* setup rx queues */ 1322 for (qi = 0; qi < nb_rxq; qi++) { 1323 if ((numa_support) && 1324 (rxring_numa[pi] != NUMA_NO_CONFIG)) { 1325 struct rte_mempool * mp = 1326 mbuf_pool_find(rxring_numa[pi]); 1327 if (mp == NULL) { 1328 printf("Failed to setup RX queue:" 1329 "No mempool allocation" 1330 "on the socket %d\n", 1331 rxring_numa[pi]); 1332 return -1; 1333 } 1334 1335 diag = rte_eth_rx_queue_setup(pi, qi, 1336 nb_rxd,rxring_numa[pi], 1337 &(port->rx_conf),mp); 1338 } 1339 else 1340 diag = rte_eth_rx_queue_setup(pi, qi, 1341 nb_rxd,port->socket_id, 1342 &(port->rx_conf), 1343 mbuf_pool_find(port->socket_id)); 1344 1345 if (diag == 0) 1346 continue; 1347 1348 1349 /* Fail to setup rx queue, return */ 1350 if (rte_atomic16_cmpset(&(port->port_status), 1351 RTE_PORT_HANDLING, 1352 RTE_PORT_STOPPED) == 0) 1353 printf("Port %d can not be set back " 1354 "to stopped\n", pi); 1355 printf("Fail to configure port %d rx queues\n", pi); 1356 /* try to reconfigure queues next time */ 1357 port->need_reconfig_queues = 1; 1358 return -1; 1359 } 1360 } 1361 /* start port */ 1362 if (rte_eth_dev_start(pi) < 0) { 1363 printf("Fail to start port %d\n", pi); 1364 1365 /* Fail to setup rx queue, return */ 1366 if (rte_atomic16_cmpset(&(port->port_status), 1367 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0) 1368 printf("Port %d can not be set back to " 1369 "stopped\n", pi); 1370 continue; 1371 } 1372 1373 if (rte_atomic16_cmpset(&(port->port_status), 1374 RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0) 1375 printf("Port %d can not be set into started\n", pi); 1376 1377 mac_addr = port->eth_addr.addr_bytes; 1378 printf("Port %d: %02X:%02X:%02X:%02X:%02X:%02X\n", pi, 1379 mac_addr[0], mac_addr[1], mac_addr[2], 1380 mac_addr[3], mac_addr[4], mac_addr[5]); 1381 1382 /* at least one port started, need checking link status */ 1383 need_check_link_status = 1; 1384 } 1385 1386 if (need_check_link_status && !no_link_check) 1387 check_all_ports_link_status(nb_ports, RTE_PORT_ALL); 1388 else 1389 printf("Please stop the ports first\n"); 1390 1391 printf("Done\n"); 1392 return 0; 1393 } 1394 1395 void 1396 stop_port(portid_t pid) 1397 { 1398 portid_t pi; 1399 struct rte_port *port; 1400 int need_check_link_status = 0; 1401 1402 if (test_done == 0) { 1403 printf("Please stop forwarding first\n"); 1404 return; 1405 } 1406 if (dcb_test) { 1407 dcb_test = 0; 1408 dcb_config = 0; 1409 } 1410 printf("Stopping ports...\n"); 1411 1412 for (pi = 0; pi < nb_ports; pi++) { 1413 if (pid < nb_ports && pid != pi) 1414 continue; 1415 1416 port = &ports[pi]; 1417 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED, 1418 RTE_PORT_HANDLING) == 0) 1419 continue; 1420 1421 rte_eth_dev_stop(pi); 1422 1423 if (rte_atomic16_cmpset(&(port->port_status), 1424 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0) 1425 printf("Port %d can not be set into stopped\n", pi); 1426 need_check_link_status = 1; 1427 } 1428 if (need_check_link_status && !no_link_check) 1429 check_all_ports_link_status(nb_ports, RTE_PORT_ALL); 1430 1431 printf("Done\n"); 1432 } 1433 1434 void 1435 close_port(portid_t pid) 1436 { 1437 portid_t pi; 1438 struct rte_port *port; 1439 1440 if (test_done == 0) { 1441 printf("Please stop forwarding first\n"); 1442 return; 1443 } 1444 1445 printf("Closing ports...\n"); 1446 1447 for (pi = 0; pi < nb_ports; pi++) { 1448 if (pid < nb_ports && pid != pi) 1449 continue; 1450 1451 port = &ports[pi]; 1452 if (rte_atomic16_cmpset(&(port->port_status), 1453 RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) { 1454 printf("Port %d is now not stopped\n", pi); 1455 continue; 1456 } 1457 1458 rte_eth_dev_close(pi); 1459 1460 if (rte_atomic16_cmpset(&(port->port_status), 1461 RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0) 1462 printf("Port %d can not be set into stopped\n", pi); 1463 } 1464 1465 printf("Done\n"); 1466 } 1467 1468 int 1469 all_ports_stopped(void) 1470 { 1471 portid_t pi; 1472 struct rte_port *port; 1473 1474 for (pi = 0; pi < nb_ports; pi++) { 1475 port = &ports[pi]; 1476 if (port->port_status != RTE_PORT_STOPPED) 1477 return 0; 1478 } 1479 1480 return 1; 1481 } 1482 1483 void 1484 pmd_test_exit(void) 1485 { 1486 portid_t pt_id; 1487 1488 for (pt_id = 0; pt_id < nb_ports; pt_id++) { 1489 printf("Stopping port %d...", pt_id); 1490 fflush(stdout); 1491 rte_eth_dev_close(pt_id); 1492 printf("done\n"); 1493 } 1494 printf("bye...\n"); 1495 } 1496 1497 typedef void (*cmd_func_t)(void); 1498 struct pmd_test_command { 1499 const char *cmd_name; 1500 cmd_func_t cmd_func; 1501 }; 1502 1503 #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0])) 1504 1505 /* Check the link status of all ports in up to 9s, and print them finally */ 1506 static void 1507 check_all_ports_link_status(uint8_t port_num, uint32_t port_mask) 1508 { 1509 #define CHECK_INTERVAL 100 /* 100ms */ 1510 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */ 1511 uint8_t portid, count, all_ports_up, print_flag = 0; 1512 struct rte_eth_link link; 1513 1514 printf("Checking link statuses...\n"); 1515 fflush(stdout); 1516 for (count = 0; count <= MAX_CHECK_TIME; count++) { 1517 all_ports_up = 1; 1518 for (portid = 0; portid < port_num; portid++) { 1519 if ((port_mask & (1 << portid)) == 0) 1520 continue; 1521 memset(&link, 0, sizeof(link)); 1522 rte_eth_link_get_nowait(portid, &link); 1523 /* print link status if flag set */ 1524 if (print_flag == 1) { 1525 if (link.link_status) 1526 printf("Port %d Link Up - speed %u " 1527 "Mbps - %s\n", (uint8_t)portid, 1528 (unsigned)link.link_speed, 1529 (link.link_duplex == ETH_LINK_FULL_DUPLEX) ? 1530 ("full-duplex") : ("half-duplex\n")); 1531 else 1532 printf("Port %d Link Down\n", 1533 (uint8_t)portid); 1534 continue; 1535 } 1536 /* clear all_ports_up flag if any link down */ 1537 if (link.link_status == 0) { 1538 all_ports_up = 0; 1539 break; 1540 } 1541 } 1542 /* after finally printing all link status, get out */ 1543 if (print_flag == 1) 1544 break; 1545 1546 if (all_ports_up == 0) { 1547 fflush(stdout); 1548 rte_delay_ms(CHECK_INTERVAL); 1549 } 1550 1551 /* set the print_flag if all ports up or timeout */ 1552 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) { 1553 print_flag = 1; 1554 } 1555 } 1556 } 1557 1558 static int 1559 set_tx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port) 1560 { 1561 uint16_t i; 1562 int diag; 1563 uint8_t mapping_found = 0; 1564 1565 for (i = 0; i < nb_tx_queue_stats_mappings; i++) { 1566 if ((tx_queue_stats_mappings[i].port_id == port_id) && 1567 (tx_queue_stats_mappings[i].queue_id < nb_txq )) { 1568 diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id, 1569 tx_queue_stats_mappings[i].queue_id, 1570 tx_queue_stats_mappings[i].stats_counter_id); 1571 if (diag != 0) 1572 return diag; 1573 mapping_found = 1; 1574 } 1575 } 1576 if (mapping_found) 1577 port->tx_queue_stats_mapping_enabled = 1; 1578 return 0; 1579 } 1580 1581 static int 1582 set_rx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port) 1583 { 1584 uint16_t i; 1585 int diag; 1586 uint8_t mapping_found = 0; 1587 1588 for (i = 0; i < nb_rx_queue_stats_mappings; i++) { 1589 if ((rx_queue_stats_mappings[i].port_id == port_id) && 1590 (rx_queue_stats_mappings[i].queue_id < nb_rxq )) { 1591 diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id, 1592 rx_queue_stats_mappings[i].queue_id, 1593 rx_queue_stats_mappings[i].stats_counter_id); 1594 if (diag != 0) 1595 return diag; 1596 mapping_found = 1; 1597 } 1598 } 1599 if (mapping_found) 1600 port->rx_queue_stats_mapping_enabled = 1; 1601 return 0; 1602 } 1603 1604 static void 1605 map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port) 1606 { 1607 int diag = 0; 1608 1609 diag = set_tx_queue_stats_mapping_registers(pi, port); 1610 if (diag != 0) { 1611 if (diag == -ENOTSUP) { 1612 port->tx_queue_stats_mapping_enabled = 0; 1613 printf("TX queue stats mapping not supported port id=%d\n", pi); 1614 } 1615 else 1616 rte_exit(EXIT_FAILURE, 1617 "set_tx_queue_stats_mapping_registers " 1618 "failed for port id=%d diag=%d\n", 1619 pi, diag); 1620 } 1621 1622 diag = set_rx_queue_stats_mapping_registers(pi, port); 1623 if (diag != 0) { 1624 if (diag == -ENOTSUP) { 1625 port->rx_queue_stats_mapping_enabled = 0; 1626 printf("RX queue stats mapping not supported port id=%d\n", pi); 1627 } 1628 else 1629 rte_exit(EXIT_FAILURE, 1630 "set_rx_queue_stats_mapping_registers " 1631 "failed for port id=%d diag=%d\n", 1632 pi, diag); 1633 } 1634 } 1635 1636 void 1637 init_port_config(void) 1638 { 1639 portid_t pid; 1640 struct rte_port *port; 1641 1642 for (pid = 0; pid < nb_ports; pid++) { 1643 port = &ports[pid]; 1644 port->dev_conf.rxmode = rx_mode; 1645 port->dev_conf.fdir_conf = fdir_conf; 1646 if (nb_rxq > 1) { 1647 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL; 1648 port->dev_conf.rx_adv_conf.rss_conf.rss_hf = rss_hf; 1649 } else { 1650 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL; 1651 port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0; 1652 } 1653 1654 /* In SR-IOV mode, RSS mode is not available */ 1655 if (port->dcb_flag == 0 && port->dev_info.max_vfs == 0) { 1656 if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0) 1657 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_RSS; 1658 else 1659 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE; 1660 } 1661 1662 port->rx_conf.rx_thresh = rx_thresh; 1663 port->rx_conf.rx_free_thresh = rx_free_thresh; 1664 port->rx_conf.rx_drop_en = rx_drop_en; 1665 port->tx_conf.tx_thresh = tx_thresh; 1666 port->tx_conf.tx_rs_thresh = tx_rs_thresh; 1667 port->tx_conf.tx_free_thresh = tx_free_thresh; 1668 port->tx_conf.txq_flags = txq_flags; 1669 1670 rte_eth_macaddr_get(pid, &port->eth_addr); 1671 1672 map_port_queue_stats_mapping_registers(pid, port); 1673 #ifdef RTE_NIC_BYPASS 1674 rte_eth_dev_bypass_init(pid); 1675 #endif 1676 } 1677 } 1678 1679 const uint16_t vlan_tags[] = { 1680 0, 1, 2, 3, 4, 5, 6, 7, 1681 8, 9, 10, 11, 12, 13, 14, 15, 1682 16, 17, 18, 19, 20, 21, 22, 23, 1683 24, 25, 26, 27, 28, 29, 30, 31 1684 }; 1685 1686 static int 1687 get_eth_dcb_conf(struct rte_eth_conf *eth_conf, struct dcb_config *dcb_conf) 1688 { 1689 uint8_t i; 1690 1691 /* 1692 * Builds up the correct configuration for dcb+vt based on the vlan tags array 1693 * given above, and the number of traffic classes available for use. 1694 */ 1695 if (dcb_conf->dcb_mode == DCB_VT_ENABLED) { 1696 struct rte_eth_vmdq_dcb_conf vmdq_rx_conf; 1697 struct rte_eth_vmdq_dcb_tx_conf vmdq_tx_conf; 1698 1699 /* VMDQ+DCB RX and TX configrations */ 1700 vmdq_rx_conf.enable_default_pool = 0; 1701 vmdq_rx_conf.default_pool = 0; 1702 vmdq_rx_conf.nb_queue_pools = 1703 (dcb_conf->num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS); 1704 vmdq_tx_conf.nb_queue_pools = 1705 (dcb_conf->num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS); 1706 1707 vmdq_rx_conf.nb_pool_maps = sizeof( vlan_tags )/sizeof( vlan_tags[ 0 ]); 1708 for (i = 0; i < vmdq_rx_conf.nb_pool_maps; i++) { 1709 vmdq_rx_conf.pool_map[i].vlan_id = vlan_tags[ i ]; 1710 vmdq_rx_conf.pool_map[i].pools = 1 << (i % vmdq_rx_conf.nb_queue_pools); 1711 } 1712 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) { 1713 vmdq_rx_conf.dcb_queue[i] = i; 1714 vmdq_tx_conf.dcb_queue[i] = i; 1715 } 1716 1717 /*set DCB mode of RX and TX of multiple queues*/ 1718 eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB; 1719 eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB; 1720 if (dcb_conf->pfc_en) 1721 eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT|ETH_DCB_PFC_SUPPORT; 1722 else 1723 eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT; 1724 1725 (void)(rte_memcpy(ð_conf->rx_adv_conf.vmdq_dcb_conf, &vmdq_rx_conf, 1726 sizeof(struct rte_eth_vmdq_dcb_conf))); 1727 (void)(rte_memcpy(ð_conf->tx_adv_conf.vmdq_dcb_tx_conf, &vmdq_tx_conf, 1728 sizeof(struct rte_eth_vmdq_dcb_tx_conf))); 1729 } 1730 else { 1731 struct rte_eth_dcb_rx_conf rx_conf; 1732 struct rte_eth_dcb_tx_conf tx_conf; 1733 1734 /* queue mapping configuration of DCB RX and TX */ 1735 if (dcb_conf->num_tcs == ETH_4_TCS) 1736 dcb_q_mapping = DCB_4_TCS_Q_MAPPING; 1737 else 1738 dcb_q_mapping = DCB_8_TCS_Q_MAPPING; 1739 1740 rx_conf.nb_tcs = dcb_conf->num_tcs; 1741 tx_conf.nb_tcs = dcb_conf->num_tcs; 1742 1743 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++){ 1744 rx_conf.dcb_queue[i] = i; 1745 tx_conf.dcb_queue[i] = i; 1746 } 1747 eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB; 1748 eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB; 1749 if (dcb_conf->pfc_en) 1750 eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT|ETH_DCB_PFC_SUPPORT; 1751 else 1752 eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT; 1753 1754 (void)(rte_memcpy(ð_conf->rx_adv_conf.dcb_rx_conf, &rx_conf, 1755 sizeof(struct rte_eth_dcb_rx_conf))); 1756 (void)(rte_memcpy(ð_conf->tx_adv_conf.dcb_tx_conf, &tx_conf, 1757 sizeof(struct rte_eth_dcb_tx_conf))); 1758 } 1759 1760 return 0; 1761 } 1762 1763 int 1764 init_port_dcb_config(portid_t pid,struct dcb_config *dcb_conf) 1765 { 1766 struct rte_eth_conf port_conf; 1767 struct rte_port *rte_port; 1768 int retval; 1769 uint16_t nb_vlan; 1770 uint16_t i; 1771 1772 /* rxq and txq configuration in dcb mode */ 1773 nb_rxq = 128; 1774 nb_txq = 128; 1775 rx_free_thresh = 64; 1776 1777 memset(&port_conf,0,sizeof(struct rte_eth_conf)); 1778 /* Enter DCB configuration status */ 1779 dcb_config = 1; 1780 1781 nb_vlan = sizeof( vlan_tags )/sizeof( vlan_tags[ 0 ]); 1782 /*set configuration of DCB in vt mode and DCB in non-vt mode*/ 1783 retval = get_eth_dcb_conf(&port_conf, dcb_conf); 1784 if (retval < 0) 1785 return retval; 1786 1787 rte_port = &ports[pid]; 1788 memcpy(&rte_port->dev_conf, &port_conf,sizeof(struct rte_eth_conf)); 1789 1790 rte_port->rx_conf.rx_thresh = rx_thresh; 1791 rte_port->rx_conf.rx_free_thresh = rx_free_thresh; 1792 rte_port->tx_conf.tx_thresh = tx_thresh; 1793 rte_port->tx_conf.tx_rs_thresh = tx_rs_thresh; 1794 rte_port->tx_conf.tx_free_thresh = tx_free_thresh; 1795 /* VLAN filter */ 1796 rte_port->dev_conf.rxmode.hw_vlan_filter = 1; 1797 for (i = 0; i < nb_vlan; i++){ 1798 rx_vft_set(pid, vlan_tags[i], 1); 1799 } 1800 1801 rte_eth_macaddr_get(pid, &rte_port->eth_addr); 1802 map_port_queue_stats_mapping_registers(pid, rte_port); 1803 1804 rte_port->dcb_flag = 1; 1805 1806 return 0; 1807 } 1808 1809 #ifdef RTE_EXEC_ENV_BAREMETAL 1810 #define main _main 1811 #endif 1812 1813 int 1814 main(int argc, char** argv) 1815 { 1816 int diag; 1817 uint8_t port_id; 1818 1819 diag = rte_eal_init(argc, argv); 1820 if (diag < 0) 1821 rte_panic("Cannot init EAL\n"); 1822 1823 if (rte_eal_pci_probe()) 1824 rte_panic("Cannot probe PCI\n"); 1825 1826 nb_ports = (portid_t) rte_eth_dev_count(); 1827 if (nb_ports == 0) 1828 rte_exit(EXIT_FAILURE, "No probed ethernet devices - " 1829 "check that " 1830 "CONFIG_RTE_LIBRTE_IGB_PMD=y and that " 1831 "CONFIG_RTE_LIBRTE_EM_PMD=y and that " 1832 "CONFIG_RTE_LIBRTE_IXGBE_PMD=y in your " 1833 "configuration file\n"); 1834 1835 set_def_fwd_config(); 1836 if (nb_lcores == 0) 1837 rte_panic("Empty set of forwarding logical cores - check the " 1838 "core mask supplied in the command parameters\n"); 1839 1840 argc -= diag; 1841 argv += diag; 1842 if (argc > 1) 1843 launch_args_parse(argc, argv); 1844 1845 if (nb_rxq > nb_txq) 1846 printf("Warning: nb_rxq=%d enables RSS configuration, " 1847 "but nb_txq=%d will prevent to fully test it.\n", 1848 nb_rxq, nb_txq); 1849 1850 init_config(); 1851 if (start_port(RTE_PORT_ALL) != 0) 1852 rte_exit(EXIT_FAILURE, "Start ports failed\n"); 1853 1854 /* set all ports to promiscuous mode by default */ 1855 for (port_id = 0; port_id < nb_ports; port_id++) 1856 rte_eth_promiscuous_enable(port_id); 1857 1858 #ifdef RTE_LIBRTE_CMDLINE 1859 if (interactive == 1) { 1860 if (auto_start) { 1861 printf("Start automatic packet forwarding\n"); 1862 start_packet_forwarding(0); 1863 } 1864 prompt(); 1865 } else 1866 #endif 1867 { 1868 char c; 1869 int rc; 1870 1871 printf("No commandline core given, start packet forwarding\n"); 1872 start_packet_forwarding(0); 1873 printf("Press enter to exit\n"); 1874 rc = read(0, &c, 1); 1875 if (rc < 0) 1876 return 1; 1877 } 1878 1879 return 0; 1880 } 1881