1af75078fSIntel /*- 2af75078fSIntel * BSD LICENSE 3af75078fSIntel * 4af75078fSIntel * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. 5af75078fSIntel * All rights reserved. 6af75078fSIntel * 7af75078fSIntel * Redistribution and use in source and binary forms, with or without 8af75078fSIntel * modification, are permitted provided that the following conditions 9af75078fSIntel * are met: 10af75078fSIntel * 11af75078fSIntel * * Redistributions of source code must retain the above copyright 12af75078fSIntel * notice, this list of conditions and the following disclaimer. 13af75078fSIntel * * Redistributions in binary form must reproduce the above copyright 14af75078fSIntel * notice, this list of conditions and the following disclaimer in 15af75078fSIntel * the documentation and/or other materials provided with the 16af75078fSIntel * distribution. 17af75078fSIntel * * Neither the name of Intel Corporation nor the names of its 18af75078fSIntel * contributors may be used to endorse or promote products derived 19af75078fSIntel * from this software without specific prior written permission. 20af75078fSIntel * 21af75078fSIntel * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22af75078fSIntel * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23af75078fSIntel * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24af75078fSIntel * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25af75078fSIntel * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26af75078fSIntel * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27af75078fSIntel * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28af75078fSIntel * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29af75078fSIntel * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30af75078fSIntel * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31af75078fSIntel * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32af75078fSIntel * 33af75078fSIntel */ 34af75078fSIntel 35af75078fSIntel #include <stdarg.h> 36af75078fSIntel #include <stdio.h> 37af75078fSIntel #include <stdlib.h> 38af75078fSIntel #include <signal.h> 39af75078fSIntel #include <string.h> 40af75078fSIntel #include <time.h> 41af75078fSIntel #include <fcntl.h> 42af75078fSIntel #include <sys/types.h> 43af75078fSIntel #include <errno.h> 44af75078fSIntel 45af75078fSIntel #include <sys/queue.h> 46af75078fSIntel #include <sys/stat.h> 47af75078fSIntel 48af75078fSIntel #include <stdint.h> 49af75078fSIntel #include <unistd.h> 50af75078fSIntel #include <inttypes.h> 51af75078fSIntel 52af75078fSIntel #include <rte_common.h> 53af75078fSIntel #include <rte_byteorder.h> 54af75078fSIntel #include <rte_log.h> 55af75078fSIntel #include <rte_debug.h> 56af75078fSIntel #include <rte_cycles.h> 57af75078fSIntel #include <rte_memory.h> 58af75078fSIntel #include <rte_memcpy.h> 59af75078fSIntel #include <rte_memzone.h> 60af75078fSIntel #include <rte_launch.h> 61af75078fSIntel #include <rte_tailq.h> 62af75078fSIntel #include <rte_eal.h> 63af75078fSIntel #include <rte_per_lcore.h> 64af75078fSIntel #include <rte_lcore.h> 65af75078fSIntel #include <rte_atomic.h> 66af75078fSIntel #include <rte_branch_prediction.h> 67af75078fSIntel #include <rte_ring.h> 68af75078fSIntel #include <rte_mempool.h> 69af75078fSIntel #include <rte_malloc.h> 70af75078fSIntel #include <rte_mbuf.h> 71af75078fSIntel #include <rte_interrupts.h> 72af75078fSIntel #include <rte_pci.h> 73af75078fSIntel #include <rte_ether.h> 74af75078fSIntel #include <rte_ethdev.h> 75af75078fSIntel #include <rte_string_fns.h> 76af75078fSIntel 77af75078fSIntel #include "testpmd.h" 78af75078fSIntel 79af75078fSIntel uint16_t verbose_level = 0; /**< Silent by default. */ 80af75078fSIntel 81af75078fSIntel /* use master core for command line ? */ 82af75078fSIntel uint8_t interactive = 0; 83af75078fSIntel 84af75078fSIntel /* 85af75078fSIntel * NUMA support configuration. 86af75078fSIntel * When set, the NUMA support attempts to dispatch the allocation of the 87af75078fSIntel * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the 88af75078fSIntel * probed ports among the CPU sockets 0 and 1. 89af75078fSIntel * Otherwise, all memory is allocated from CPU socket 0. 90af75078fSIntel */ 91af75078fSIntel uint8_t numa_support = 0; /**< No numa support by default */ 92af75078fSIntel 93af75078fSIntel /* 94af75078fSIntel * Record the Ethernet address of peer target ports to which packets are 95af75078fSIntel * forwarded. 96af75078fSIntel * Must be instanciated with the ethernet addresses of peer traffic generator 97af75078fSIntel * ports. 98af75078fSIntel */ 99af75078fSIntel struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS]; 100af75078fSIntel portid_t nb_peer_eth_addrs = 0; 101af75078fSIntel 102af75078fSIntel /* 103af75078fSIntel * Probed Target Environment. 104af75078fSIntel */ 105af75078fSIntel struct rte_port *ports; /**< For all probed ethernet ports. */ 106af75078fSIntel portid_t nb_ports; /**< Number of probed ethernet ports. */ 107af75078fSIntel struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */ 108af75078fSIntel lcoreid_t nb_lcores; /**< Number of probed logical cores. */ 109af75078fSIntel 110af75078fSIntel /* 111af75078fSIntel * Test Forwarding Configuration. 112af75078fSIntel * nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores 113af75078fSIntel * nb_fwd_ports <= nb_cfg_ports <= nb_ports 114af75078fSIntel */ 115af75078fSIntel lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */ 116af75078fSIntel lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */ 117af75078fSIntel portid_t nb_cfg_ports; /**< Number of configured ports. */ 118af75078fSIntel portid_t nb_fwd_ports; /**< Number of forwarding ports. */ 119af75078fSIntel 120af75078fSIntel unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */ 121af75078fSIntel portid_t fwd_ports_ids[RTE_MAX_ETHPORTS]; /**< Port ids configuration. */ 122af75078fSIntel 123af75078fSIntel struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */ 124af75078fSIntel streamid_t nb_fwd_streams; /**< Is equal to (nb_ports * nb_rxq). */ 125af75078fSIntel 126af75078fSIntel /* 127af75078fSIntel * Forwarding engines. 128af75078fSIntel */ 129af75078fSIntel struct fwd_engine * fwd_engines[] = { 130af75078fSIntel &io_fwd_engine, 131af75078fSIntel &mac_fwd_engine, 132af75078fSIntel &rx_only_engine, 133af75078fSIntel &tx_only_engine, 134af75078fSIntel &csum_fwd_engine, 135af75078fSIntel #ifdef RTE_LIBRTE_IEEE1588 136af75078fSIntel &ieee1588_fwd_engine, 137af75078fSIntel #endif 138af75078fSIntel NULL, 139af75078fSIntel }; 140af75078fSIntel 141af75078fSIntel struct fwd_config cur_fwd_config; 142af75078fSIntel struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */ 143af75078fSIntel 144af75078fSIntel uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */ 145c8798818SIntel uint32_t param_total_num_mbufs = 0; /**< number of mbufs in all pools - if 146c8798818SIntel * specified on command-line. */ 147af75078fSIntel 148af75078fSIntel /* 149af75078fSIntel * Configuration of packet segments used by the "txonly" processing engine. 150af75078fSIntel */ 151af75078fSIntel uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */ 152af75078fSIntel uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = { 153af75078fSIntel TXONLY_DEF_PACKET_LEN, 154af75078fSIntel }; 155af75078fSIntel uint8_t tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */ 156af75078fSIntel 157af75078fSIntel uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */ 158af75078fSIntel uint16_t mb_mempool_cache = DEF_PKT_BURST; /**< Size of mbuf mempool cache. */ 159af75078fSIntel 160900550deSIntel /* current configuration is in DCB or not,0 means it is not in DCB mode */ 161900550deSIntel uint8_t dcb_config = 0; 162900550deSIntel 163900550deSIntel /* Whether the dcb is in testing status */ 164900550deSIntel uint8_t dcb_test = 0; 165900550deSIntel 166900550deSIntel /* DCB on and VT on mapping is default */ 167900550deSIntel enum dcb_queue_mapping_mode dcb_q_mapping = DCB_VT_Q_MAPPING; 168af75078fSIntel 169af75078fSIntel /* 170af75078fSIntel * Configurable number of RX/TX queues. 171af75078fSIntel */ 172af75078fSIntel queueid_t nb_rxq = 1; /**< Number of RX queues per port. */ 173af75078fSIntel queueid_t nb_txq = 1; /**< Number of TX queues per port. */ 174af75078fSIntel 175af75078fSIntel /* 176af75078fSIntel * Configurable number of RX/TX ring descriptors. 177af75078fSIntel */ 178af75078fSIntel #define RTE_TEST_RX_DESC_DEFAULT 128 179af75078fSIntel #define RTE_TEST_TX_DESC_DEFAULT 512 180af75078fSIntel uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */ 181af75078fSIntel uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */ 182af75078fSIntel 183af75078fSIntel /* 184af75078fSIntel * Configurable values of RX and TX ring threshold registers. 185af75078fSIntel */ 186af75078fSIntel #define RX_PTHRESH 8 /**< Default value of RX prefetch threshold register. */ 187af75078fSIntel #define RX_HTHRESH 8 /**< Default value of RX host threshold register. */ 188af75078fSIntel #define RX_WTHRESH 4 /**< Default value of RX write-back threshold register. */ 189af75078fSIntel 190af75078fSIntel #define TX_PTHRESH 36 /**< Default value of TX prefetch threshold register. */ 191af75078fSIntel #define TX_HTHRESH 0 /**< Default value of TX host threshold register. */ 192af75078fSIntel #define TX_WTHRESH 0 /**< Default value of TX write-back threshold register. */ 193af75078fSIntel 194af75078fSIntel struct rte_eth_thresh rx_thresh = { 195af75078fSIntel .pthresh = RX_PTHRESH, 196af75078fSIntel .hthresh = RX_HTHRESH, 197af75078fSIntel .wthresh = RX_WTHRESH, 198af75078fSIntel }; 199af75078fSIntel 200af75078fSIntel struct rte_eth_thresh tx_thresh = { 201af75078fSIntel .pthresh = TX_PTHRESH, 202af75078fSIntel .hthresh = TX_HTHRESH, 203af75078fSIntel .wthresh = TX_WTHRESH, 204af75078fSIntel }; 205af75078fSIntel 206af75078fSIntel /* 207af75078fSIntel * Configurable value of RX free threshold. 208af75078fSIntel */ 209af75078fSIntel uint16_t rx_free_thresh = 0; /* Immediately free RX descriptors by default. */ 210af75078fSIntel 211af75078fSIntel /* 212ce8d5614SIntel * Configurable value of RX drop enable. 213ce8d5614SIntel */ 214ce8d5614SIntel uint8_t rx_drop_en = 0; /* Drop packets when no descriptors for queue. */ 215ce8d5614SIntel 216ce8d5614SIntel /* 217af75078fSIntel * Configurable value of TX free threshold. 218af75078fSIntel */ 219af75078fSIntel uint16_t tx_free_thresh = 0; /* Use default values. */ 220af75078fSIntel 221af75078fSIntel /* 222af75078fSIntel * Configurable value of TX RS bit threshold. 223af75078fSIntel */ 224af75078fSIntel uint16_t tx_rs_thresh = 0; /* Use default values. */ 225af75078fSIntel 226af75078fSIntel /* 227ce8d5614SIntel * Configurable value of TX queue flags. 228ce8d5614SIntel */ 229ce8d5614SIntel uint32_t txq_flags = 0; /* No flags set. */ 230ce8d5614SIntel 231ce8d5614SIntel /* 232af75078fSIntel * Receive Side Scaling (RSS) configuration. 233af75078fSIntel */ 234af75078fSIntel uint16_t rss_hf = ETH_RSS_IPV4 | ETH_RSS_IPV6; /* RSS IP by default. */ 235af75078fSIntel 236af75078fSIntel /* 237af75078fSIntel * Port topology configuration 238af75078fSIntel */ 239af75078fSIntel uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */ 240af75078fSIntel 241af75078fSIntel /* 242af75078fSIntel * Ethernet device configuration. 243af75078fSIntel */ 244af75078fSIntel struct rte_eth_rxmode rx_mode = { 245af75078fSIntel .max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */ 246af75078fSIntel .split_hdr_size = 0, 247af75078fSIntel .header_split = 0, /**< Header Split disabled. */ 248af75078fSIntel .hw_ip_checksum = 0, /**< IP checksum offload disabled. */ 249af75078fSIntel .hw_vlan_filter = 1, /**< VLAN filtering enabled. */ 250a47aa8b9SIntel .hw_vlan_strip = 1, /**< VLAN strip enabled. */ 251a47aa8b9SIntel .hw_vlan_extend = 0, /**< Extended VLAN disabled. */ 252af75078fSIntel .jumbo_frame = 0, /**< Jumbo Frame Support disabled. */ 253af75078fSIntel .hw_strip_crc = 0, /**< CRC stripping by hardware disabled. */ 254af75078fSIntel }; 255af75078fSIntel 256af75078fSIntel struct rte_fdir_conf fdir_conf = { 257af75078fSIntel .mode = RTE_FDIR_MODE_NONE, 258af75078fSIntel .pballoc = RTE_FDIR_PBALLOC_64K, 259af75078fSIntel .status = RTE_FDIR_REPORT_STATUS, 260af75078fSIntel .flexbytes_offset = 0x6, 261af75078fSIntel .drop_queue = 127, 262af75078fSIntel }; 263af75078fSIntel 264af75078fSIntel static volatile int test_done = 1; /* stop packet forwarding when set to 1. */ 265af75078fSIntel 266ed30d9b6SIntel struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS]; 267ed30d9b6SIntel struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS]; 268ed30d9b6SIntel 269ed30d9b6SIntel struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array; 270ed30d9b6SIntel struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array; 271ed30d9b6SIntel 272ed30d9b6SIntel uint16_t nb_tx_queue_stats_mappings = 0; 273ed30d9b6SIntel uint16_t nb_rx_queue_stats_mappings = 0; 274ed30d9b6SIntel 275ed30d9b6SIntel /* Forward function declarations */ 276ed30d9b6SIntel static void map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port); 277ce8d5614SIntel static void check_all_ports_link_status(uint8_t port_num, uint32_t port_mask); 278ce8d5614SIntel 279ce8d5614SIntel /* 280ce8d5614SIntel * Check if all the ports are started. 281ce8d5614SIntel * If yes, return positive value. If not, return zero. 282ce8d5614SIntel */ 283ce8d5614SIntel static int all_ports_started(void); 284ed30d9b6SIntel 285af75078fSIntel /* 286af75078fSIntel * Setup default configuration. 287af75078fSIntel */ 288af75078fSIntel static void 289af75078fSIntel set_default_fwd_lcores_config(void) 290af75078fSIntel { 291af75078fSIntel unsigned int i; 292af75078fSIntel unsigned int nb_lc; 293af75078fSIntel 294af75078fSIntel nb_lc = 0; 295af75078fSIntel for (i = 0; i < RTE_MAX_LCORE; i++) { 296af75078fSIntel if (! rte_lcore_is_enabled(i)) 297af75078fSIntel continue; 298af75078fSIntel if (i == rte_get_master_lcore()) 299af75078fSIntel continue; 300af75078fSIntel fwd_lcores_cpuids[nb_lc++] = i; 301af75078fSIntel } 302af75078fSIntel nb_lcores = (lcoreid_t) nb_lc; 303af75078fSIntel nb_cfg_lcores = nb_lcores; 304af75078fSIntel nb_fwd_lcores = 1; 305af75078fSIntel } 306af75078fSIntel 307af75078fSIntel static void 308af75078fSIntel set_def_peer_eth_addrs(void) 309af75078fSIntel { 310af75078fSIntel portid_t i; 311af75078fSIntel 312af75078fSIntel for (i = 0; i < RTE_MAX_ETHPORTS; i++) { 313af75078fSIntel peer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR; 314af75078fSIntel peer_eth_addrs[i].addr_bytes[5] = i; 315af75078fSIntel } 316af75078fSIntel } 317af75078fSIntel 318af75078fSIntel static void 319af75078fSIntel set_default_fwd_ports_config(void) 320af75078fSIntel { 321af75078fSIntel portid_t pt_id; 322af75078fSIntel 323af75078fSIntel for (pt_id = 0; pt_id < nb_ports; pt_id++) 324af75078fSIntel fwd_ports_ids[pt_id] = pt_id; 325af75078fSIntel 326af75078fSIntel nb_cfg_ports = nb_ports; 327af75078fSIntel nb_fwd_ports = nb_ports; 328af75078fSIntel } 329af75078fSIntel 330af75078fSIntel void 331af75078fSIntel set_def_fwd_config(void) 332af75078fSIntel { 333af75078fSIntel set_default_fwd_lcores_config(); 334af75078fSIntel set_def_peer_eth_addrs(); 335af75078fSIntel set_default_fwd_ports_config(); 336af75078fSIntel } 337af75078fSIntel 338af75078fSIntel /* 339af75078fSIntel * Configuration initialisation done once at init time. 340af75078fSIntel */ 341af75078fSIntel struct mbuf_ctor_arg { 342af75078fSIntel uint16_t seg_buf_offset; /**< offset of data in data segment of mbuf. */ 343af75078fSIntel uint16_t seg_buf_size; /**< size of data segment in mbuf. */ 344af75078fSIntel }; 345af75078fSIntel 346af75078fSIntel struct mbuf_pool_ctor_arg { 347af75078fSIntel uint16_t seg_buf_size; /**< size of data segment in mbuf. */ 348af75078fSIntel }; 349af75078fSIntel 350af75078fSIntel static void 351af75078fSIntel testpmd_mbuf_ctor(struct rte_mempool *mp, 352af75078fSIntel void *opaque_arg, 353af75078fSIntel void *raw_mbuf, 354af75078fSIntel __attribute__((unused)) unsigned i) 355af75078fSIntel { 356af75078fSIntel struct mbuf_ctor_arg *mb_ctor_arg; 357af75078fSIntel struct rte_mbuf *mb; 358af75078fSIntel 359af75078fSIntel mb_ctor_arg = (struct mbuf_ctor_arg *) opaque_arg; 360af75078fSIntel mb = (struct rte_mbuf *) raw_mbuf; 361af75078fSIntel 362af75078fSIntel mb->pool = mp; 363af75078fSIntel mb->buf_addr = (void *) ((char *)mb + mb_ctor_arg->seg_buf_offset); 364af75078fSIntel mb->buf_physaddr = (uint64_t) (rte_mempool_virt2phy(mp, mb) + 365af75078fSIntel mb_ctor_arg->seg_buf_offset); 366af75078fSIntel mb->buf_len = mb_ctor_arg->seg_buf_size; 367af75078fSIntel mb->type = RTE_MBUF_PKT; 368af75078fSIntel mb->ol_flags = 0; 369af75078fSIntel mb->pkt.data = (char *) mb->buf_addr + RTE_PKTMBUF_HEADROOM; 370af75078fSIntel mb->pkt.nb_segs = 1; 37142d71416SIntel mb->pkt.vlan_macip.data = 0; 372af75078fSIntel mb->pkt.hash.rss = 0; 373af75078fSIntel } 374af75078fSIntel 375af75078fSIntel static void 376af75078fSIntel testpmd_mbuf_pool_ctor(struct rte_mempool *mp, 377af75078fSIntel void *opaque_arg) 378af75078fSIntel { 379af75078fSIntel struct mbuf_pool_ctor_arg *mbp_ctor_arg; 380af75078fSIntel struct rte_pktmbuf_pool_private *mbp_priv; 381af75078fSIntel 382af75078fSIntel if (mp->private_data_size < sizeof(struct rte_pktmbuf_pool_private)) { 383af75078fSIntel printf("%s(%s) private_data_size %d < %d\n", 384af75078fSIntel __func__, mp->name, (int) mp->private_data_size, 385af75078fSIntel (int) sizeof(struct rte_pktmbuf_pool_private)); 386af75078fSIntel return; 387af75078fSIntel } 388af75078fSIntel mbp_ctor_arg = (struct mbuf_pool_ctor_arg *) opaque_arg; 389af75078fSIntel mbp_priv = (struct rte_pktmbuf_pool_private *) 390af75078fSIntel ((char *)mp + sizeof(struct rte_mempool)); 391af75078fSIntel mbp_priv->mbuf_data_room_size = mbp_ctor_arg->seg_buf_size; 392af75078fSIntel } 393af75078fSIntel 394af75078fSIntel static void 395af75078fSIntel mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf, 396af75078fSIntel unsigned int socket_id) 397af75078fSIntel { 398af75078fSIntel char pool_name[RTE_MEMPOOL_NAMESIZE]; 399af75078fSIntel struct rte_mempool *rte_mp; 400af75078fSIntel struct mbuf_pool_ctor_arg mbp_ctor_arg; 401af75078fSIntel struct mbuf_ctor_arg mb_ctor_arg; 402af75078fSIntel uint32_t mb_size; 403af75078fSIntel 404af75078fSIntel mbp_ctor_arg.seg_buf_size = (uint16_t) (RTE_PKTMBUF_HEADROOM + 405af75078fSIntel mbuf_seg_size); 406af75078fSIntel mb_ctor_arg.seg_buf_offset = 407af75078fSIntel (uint16_t) CACHE_LINE_ROUNDUP(sizeof(struct rte_mbuf)); 408af75078fSIntel mb_ctor_arg.seg_buf_size = mbp_ctor_arg.seg_buf_size; 409af75078fSIntel mb_size = mb_ctor_arg.seg_buf_offset + mb_ctor_arg.seg_buf_size; 410af75078fSIntel mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name)); 411af75078fSIntel rte_mp = rte_mempool_create(pool_name, nb_mbuf, (unsigned) mb_size, 412af75078fSIntel (unsigned) mb_mempool_cache, 413af75078fSIntel sizeof(struct rte_pktmbuf_pool_private), 414af75078fSIntel testpmd_mbuf_pool_ctor, &mbp_ctor_arg, 415af75078fSIntel testpmd_mbuf_ctor, &mb_ctor_arg, 416af75078fSIntel socket_id, 0); 417af75078fSIntel if (rte_mp == NULL) { 418ce8d5614SIntel rte_exit(EXIT_FAILURE, "Creation of mbuf pool for socket %u " 419ce8d5614SIntel "failed\n", socket_id); 420af75078fSIntel } 421af75078fSIntel } 422af75078fSIntel 423af75078fSIntel static void 424af75078fSIntel init_config(void) 425af75078fSIntel { 426ce8d5614SIntel portid_t pid; 427af75078fSIntel struct rte_port *port; 428af75078fSIntel struct rte_mempool *mbp; 429af75078fSIntel unsigned int nb_mbuf_per_pool; 430af75078fSIntel lcoreid_t lc_id; 431af75078fSIntel 432af75078fSIntel /* Configuration of logical cores. */ 433af75078fSIntel fwd_lcores = rte_zmalloc("testpmd: fwd_lcores", 434af75078fSIntel sizeof(struct fwd_lcore *) * nb_lcores, 435af75078fSIntel CACHE_LINE_SIZE); 436af75078fSIntel if (fwd_lcores == NULL) { 437ce8d5614SIntel rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) " 438ce8d5614SIntel "failed\n", nb_lcores); 439af75078fSIntel } 440af75078fSIntel for (lc_id = 0; lc_id < nb_lcores; lc_id++) { 441af75078fSIntel fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore", 442af75078fSIntel sizeof(struct fwd_lcore), 443af75078fSIntel CACHE_LINE_SIZE); 444af75078fSIntel if (fwd_lcores[lc_id] == NULL) { 445ce8d5614SIntel rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) " 446ce8d5614SIntel "failed\n"); 447af75078fSIntel } 448af75078fSIntel fwd_lcores[lc_id]->cpuid_idx = lc_id; 449af75078fSIntel } 450af75078fSIntel 451af75078fSIntel /* 452af75078fSIntel * Create pools of mbuf. 453af75078fSIntel * If NUMA support is disabled, create a single pool of mbuf in 454af75078fSIntel * socket 0 memory. 455af75078fSIntel * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1. 456c8798818SIntel * 457c8798818SIntel * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and 458c8798818SIntel * nb_txd can be configured at run time. 459af75078fSIntel */ 460c8798818SIntel if (param_total_num_mbufs) 461c8798818SIntel nb_mbuf_per_pool = param_total_num_mbufs; 462c8798818SIntel else { 463c8798818SIntel nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX + (nb_lcores * mb_mempool_cache) 464c8798818SIntel + RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST; 465c8798818SIntel nb_mbuf_per_pool = (nb_mbuf_per_pool * nb_ports); 466c8798818SIntel } 467af75078fSIntel if (numa_support) { 468c8798818SIntel nb_mbuf_per_pool /= 2; 469af75078fSIntel mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0); 470af75078fSIntel mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 1); 471af75078fSIntel } else { 472af75078fSIntel mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0); 473af75078fSIntel } 474af75078fSIntel 475af75078fSIntel /* 476af75078fSIntel * Records which Mbuf pool to use by each logical core, if needed. 477af75078fSIntel */ 478af75078fSIntel for (lc_id = 0; lc_id < nb_lcores; lc_id++) { 479af75078fSIntel mbp = mbuf_pool_find(rte_lcore_to_socket_id(lc_id)); 480af75078fSIntel if (mbp == NULL) 481af75078fSIntel mbp = mbuf_pool_find(0); 482af75078fSIntel fwd_lcores[lc_id]->mbp = mbp; 483af75078fSIntel } 484af75078fSIntel 485af75078fSIntel /* Configuration of Ethernet ports. */ 486af75078fSIntel ports = rte_zmalloc("testpmd: ports", 487af75078fSIntel sizeof(struct rte_port) * nb_ports, 488af75078fSIntel CACHE_LINE_SIZE); 489af75078fSIntel if (ports == NULL) { 490ce8d5614SIntel rte_exit(EXIT_FAILURE, "rte_zmalloc(%d struct rte_port) " 491ce8d5614SIntel "failed\n", nb_ports); 492af75078fSIntel } 493af75078fSIntel 494ce8d5614SIntel for (pid = 0; pid < nb_ports; pid++) { 495ce8d5614SIntel port = &ports[pid]; 496ce8d5614SIntel rte_eth_dev_info_get(pid, &port->dev_info); 497ce8d5614SIntel 498ce8d5614SIntel /* set flag to initialize port/queue */ 499ce8d5614SIntel port->need_reconfig = 1; 500ce8d5614SIntel port->need_reconfig_queues = 1; 501ce8d5614SIntel } 502ce8d5614SIntel 503ce8d5614SIntel init_port_config(); 504ce8d5614SIntel 505ce8d5614SIntel /* Configuration of packet forwarding streams. */ 506ce8d5614SIntel if (init_fwd_streams() < 0) 507ce8d5614SIntel rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n"); 508ce8d5614SIntel } 509ce8d5614SIntel 510ce8d5614SIntel int 511ce8d5614SIntel init_fwd_streams(void) 512ce8d5614SIntel { 513ce8d5614SIntel portid_t pid; 514ce8d5614SIntel struct rte_port *port; 515ce8d5614SIntel streamid_t sm_id, nb_fwd_streams_new; 516ce8d5614SIntel 517ce8d5614SIntel /* set socket id according to numa or not */ 518ce8d5614SIntel for (pid = 0; pid < nb_ports; pid++) { 519ce8d5614SIntel port = &ports[pid]; 520ce8d5614SIntel if (nb_rxq > port->dev_info.max_rx_queues) { 521ce8d5614SIntel printf("Fail: nb_rxq(%d) is greater than " 522ce8d5614SIntel "max_rx_queues(%d)\n", nb_rxq, 523ce8d5614SIntel port->dev_info.max_rx_queues); 524ce8d5614SIntel return -1; 525ce8d5614SIntel } 526ce8d5614SIntel if (nb_txq > port->dev_info.max_tx_queues) { 527ce8d5614SIntel printf("Fail: nb_txq(%d) is greater than " 528ce8d5614SIntel "max_tx_queues(%d)\n", nb_txq, 529ce8d5614SIntel port->dev_info.max_tx_queues); 530ce8d5614SIntel return -1; 531ce8d5614SIntel } 532af75078fSIntel if (numa_support) 533ce8d5614SIntel port->socket_id = (pid < (nb_ports >> 1)) ? 0 : 1; 534af75078fSIntel else 535af75078fSIntel port->socket_id = 0; 536af75078fSIntel } 537af75078fSIntel 538ce8d5614SIntel nb_fwd_streams_new = (streamid_t)(nb_ports * nb_rxq); 539ce8d5614SIntel if (nb_fwd_streams_new == nb_fwd_streams) 540ce8d5614SIntel return 0; 541ce8d5614SIntel /* clear the old */ 542ce8d5614SIntel if (fwd_streams != NULL) { 543ce8d5614SIntel for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) { 544ce8d5614SIntel if (fwd_streams[sm_id] == NULL) 545ce8d5614SIntel continue; 546ce8d5614SIntel rte_free(fwd_streams[sm_id]); 547ce8d5614SIntel fwd_streams[sm_id] = NULL; 548af75078fSIntel } 549ce8d5614SIntel rte_free(fwd_streams); 550ce8d5614SIntel fwd_streams = NULL; 551ce8d5614SIntel } 552ce8d5614SIntel 553ce8d5614SIntel /* init new */ 554ce8d5614SIntel nb_fwd_streams = nb_fwd_streams_new; 555ce8d5614SIntel fwd_streams = rte_zmalloc("testpmd: fwd_streams", 556ce8d5614SIntel sizeof(struct fwd_stream *) * nb_fwd_streams, CACHE_LINE_SIZE); 557ce8d5614SIntel if (fwd_streams == NULL) 558ce8d5614SIntel rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_stream *)) " 559ce8d5614SIntel "failed\n", nb_fwd_streams); 560ce8d5614SIntel 561af75078fSIntel for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) { 562af75078fSIntel fwd_streams[sm_id] = rte_zmalloc("testpmd: struct fwd_stream", 563ce8d5614SIntel sizeof(struct fwd_stream), CACHE_LINE_SIZE); 564ce8d5614SIntel if (fwd_streams[sm_id] == NULL) 565ce8d5614SIntel rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_stream)" 566ce8d5614SIntel " failed\n"); 567af75078fSIntel } 568ce8d5614SIntel 569ce8d5614SIntel return 0; 570af75078fSIntel } 571af75078fSIntel 572af75078fSIntel #ifdef RTE_TEST_PMD_RECORD_BURST_STATS 573af75078fSIntel static void 574af75078fSIntel pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs) 575af75078fSIntel { 576af75078fSIntel unsigned int total_burst; 577af75078fSIntel unsigned int nb_burst; 578af75078fSIntel unsigned int burst_stats[3]; 579af75078fSIntel uint16_t pktnb_stats[3]; 580af75078fSIntel uint16_t nb_pkt; 581af75078fSIntel int burst_percent[3]; 582af75078fSIntel 583af75078fSIntel /* 584af75078fSIntel * First compute the total number of packet bursts and the 585af75078fSIntel * two highest numbers of bursts of the same number of packets. 586af75078fSIntel */ 587af75078fSIntel total_burst = 0; 588af75078fSIntel burst_stats[0] = burst_stats[1] = burst_stats[2] = 0; 589af75078fSIntel pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0; 590af75078fSIntel for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) { 591af75078fSIntel nb_burst = pbs->pkt_burst_spread[nb_pkt]; 592af75078fSIntel if (nb_burst == 0) 593af75078fSIntel continue; 594af75078fSIntel total_burst += nb_burst; 595af75078fSIntel if (nb_burst > burst_stats[0]) { 596af75078fSIntel burst_stats[1] = burst_stats[0]; 597af75078fSIntel pktnb_stats[1] = pktnb_stats[0]; 598af75078fSIntel burst_stats[0] = nb_burst; 599af75078fSIntel pktnb_stats[0] = nb_pkt; 600af75078fSIntel } 601af75078fSIntel } 602af75078fSIntel if (total_burst == 0) 603af75078fSIntel return; 604af75078fSIntel burst_percent[0] = (burst_stats[0] * 100) / total_burst; 605af75078fSIntel printf(" %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst, 606af75078fSIntel burst_percent[0], (int) pktnb_stats[0]); 607af75078fSIntel if (burst_stats[0] == total_burst) { 608af75078fSIntel printf("]\n"); 609af75078fSIntel return; 610af75078fSIntel } 611af75078fSIntel if (burst_stats[0] + burst_stats[1] == total_burst) { 612af75078fSIntel printf(" + %d%% of %d pkts]\n", 613af75078fSIntel 100 - burst_percent[0], pktnb_stats[1]); 614af75078fSIntel return; 615af75078fSIntel } 616af75078fSIntel burst_percent[1] = (burst_stats[1] * 100) / total_burst; 617af75078fSIntel burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]); 618af75078fSIntel if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) { 619af75078fSIntel printf(" + %d%% of others]\n", 100 - burst_percent[0]); 620af75078fSIntel return; 621af75078fSIntel } 622af75078fSIntel printf(" + %d%% of %d pkts + %d%% of others]\n", 623af75078fSIntel burst_percent[1], (int) pktnb_stats[1], burst_percent[2]); 624af75078fSIntel } 625af75078fSIntel #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */ 626af75078fSIntel 627af75078fSIntel static void 628af75078fSIntel fwd_port_stats_display(portid_t port_id, struct rte_eth_stats *stats) 629af75078fSIntel { 630af75078fSIntel struct rte_port *port; 631*013af9b6SIntel uint8_t i; 632af75078fSIntel 633af75078fSIntel static const char *fwd_stats_border = "----------------------"; 634af75078fSIntel 635af75078fSIntel port = &ports[port_id]; 636af75078fSIntel printf("\n %s Forward statistics for port %-2d %s\n", 637af75078fSIntel fwd_stats_border, port_id, fwd_stats_border); 638*013af9b6SIntel 639*013af9b6SIntel if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) { 640af75078fSIntel printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: " 641af75078fSIntel "%-"PRIu64"\n", 642af75078fSIntel stats->ipackets, stats->ierrors, 643af75078fSIntel (uint64_t) (stats->ipackets + stats->ierrors)); 644af75078fSIntel 645af75078fSIntel if (cur_fwd_eng == &csum_fwd_engine) 646af75078fSIntel printf(" Bad-ipcsum: %-14"PRIu64" Bad-l4csum: %-14"PRIu64" \n", 647af75078fSIntel port->rx_bad_ip_csum, port->rx_bad_l4_csum); 648af75078fSIntel 649af75078fSIntel printf(" TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: " 650af75078fSIntel "%-"PRIu64"\n", 651af75078fSIntel stats->opackets, port->tx_dropped, 652af75078fSIntel (uint64_t) (stats->opackets + port->tx_dropped)); 653af75078fSIntel 654af75078fSIntel if (stats->rx_nombuf > 0) 655af75078fSIntel printf(" RX-nombufs: %-14"PRIu64"\n", stats->rx_nombuf); 656*013af9b6SIntel 657*013af9b6SIntel } 658*013af9b6SIntel else { 659*013af9b6SIntel printf(" RX-packets: %14"PRIu64" RX-dropped:%14"PRIu64" RX-total:" 660*013af9b6SIntel "%14"PRIu64"\n", 661*013af9b6SIntel stats->ipackets, stats->ierrors, 662*013af9b6SIntel (uint64_t) (stats->ipackets + stats->ierrors)); 663*013af9b6SIntel 664*013af9b6SIntel if (cur_fwd_eng == &csum_fwd_engine) 665*013af9b6SIntel printf(" Bad-ipcsum:%14"PRIu64" Bad-l4csum:%14"PRIu64"\n", 666*013af9b6SIntel port->rx_bad_ip_csum, port->rx_bad_l4_csum); 667*013af9b6SIntel 668*013af9b6SIntel printf(" TX-packets: %14"PRIu64" TX-dropped:%14"PRIu64" TX-total:" 669*013af9b6SIntel "%14"PRIu64"\n", 670*013af9b6SIntel stats->opackets, port->tx_dropped, 671*013af9b6SIntel (uint64_t) (stats->opackets + port->tx_dropped)); 672*013af9b6SIntel 673*013af9b6SIntel if (stats->rx_nombuf > 0) 674*013af9b6SIntel printf(" RX-nombufs:%14"PRIu64"\n", stats->rx_nombuf); 675*013af9b6SIntel } 676af75078fSIntel #ifdef RTE_TEST_PMD_RECORD_BURST_STATS 677af75078fSIntel if (port->rx_stream) 678*013af9b6SIntel pkt_burst_stats_display("RX", 679*013af9b6SIntel &port->rx_stream->rx_burst_stats); 680af75078fSIntel if (port->tx_stream) 681*013af9b6SIntel pkt_burst_stats_display("TX", 682*013af9b6SIntel &port->tx_stream->tx_burst_stats); 683af75078fSIntel #endif 684af75078fSIntel /* stats fdir */ 685af75078fSIntel if (fdir_conf.mode != RTE_FDIR_MODE_NONE) 686*013af9b6SIntel printf(" Fdirmiss:%14"PRIu64" Fdirmatch:%14"PRIu64"\n", 687af75078fSIntel stats->fdirmiss, 688af75078fSIntel stats->fdirmatch); 689af75078fSIntel 690*013af9b6SIntel if (port->rx_queue_stats_mapping_enabled) { 691*013af9b6SIntel printf("\n"); 692*013af9b6SIntel for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) { 693*013af9b6SIntel printf(" Stats reg %2d RX-packets:%14"PRIu64 694*013af9b6SIntel " RX-errors:%14"PRIu64 695*013af9b6SIntel " RX-bytes:%14"PRIu64"\n", 696*013af9b6SIntel i, stats->q_ipackets[i], stats->q_errors[i], stats->q_ibytes[i]); 697*013af9b6SIntel } 698*013af9b6SIntel printf("\n"); 699*013af9b6SIntel } 700*013af9b6SIntel if (port->tx_queue_stats_mapping_enabled) { 701*013af9b6SIntel for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) { 702*013af9b6SIntel printf(" Stats reg %2d TX-packets:%14"PRIu64 703*013af9b6SIntel " TX-bytes:%14"PRIu64"\n", 704*013af9b6SIntel i, stats->q_opackets[i], stats->q_obytes[i]); 705*013af9b6SIntel } 706*013af9b6SIntel } 707*013af9b6SIntel 708af75078fSIntel printf(" %s--------------------------------%s\n", 709af75078fSIntel fwd_stats_border, fwd_stats_border); 710af75078fSIntel } 711af75078fSIntel 712af75078fSIntel static void 713af75078fSIntel fwd_stream_stats_display(streamid_t stream_id) 714af75078fSIntel { 715af75078fSIntel struct fwd_stream *fs; 716af75078fSIntel static const char *fwd_top_stats_border = "-------"; 717af75078fSIntel 718af75078fSIntel fs = fwd_streams[stream_id]; 719af75078fSIntel if ((fs->rx_packets == 0) && (fs->tx_packets == 0) && 720af75078fSIntel (fs->fwd_dropped == 0)) 721af75078fSIntel return; 722af75078fSIntel printf("\n %s Forward Stats for RX Port=%2d/Queue=%2d -> " 723af75078fSIntel "TX Port=%2d/Queue=%2d %s\n", 724af75078fSIntel fwd_top_stats_border, fs->rx_port, fs->rx_queue, 725af75078fSIntel fs->tx_port, fs->tx_queue, fwd_top_stats_border); 726af75078fSIntel printf(" RX-packets: %-14u TX-packets: %-14u TX-dropped: %-14u", 727af75078fSIntel fs->rx_packets, fs->tx_packets, fs->fwd_dropped); 728af75078fSIntel 729af75078fSIntel /* if checksum mode */ 730af75078fSIntel if (cur_fwd_eng == &csum_fwd_engine) { 731*013af9b6SIntel printf(" RX- bad IP checksum: %-14u Rx- bad L4 checksum: " 732*013af9b6SIntel "%-14u\n", fs->rx_bad_ip_csum, fs->rx_bad_l4_csum); 733af75078fSIntel } 734af75078fSIntel 735af75078fSIntel #ifdef RTE_TEST_PMD_RECORD_BURST_STATS 736af75078fSIntel pkt_burst_stats_display("RX", &fs->rx_burst_stats); 737af75078fSIntel pkt_burst_stats_display("TX", &fs->tx_burst_stats); 738af75078fSIntel #endif 739af75078fSIntel } 740af75078fSIntel 741af75078fSIntel static void 742af75078fSIntel flush_all_rx_queues(void) 743af75078fSIntel { 744af75078fSIntel struct rte_mbuf *pkts_burst[MAX_PKT_BURST]; 745af75078fSIntel portid_t rxp; 746af75078fSIntel queueid_t rxq; 747af75078fSIntel uint16_t nb_rx; 748af75078fSIntel uint16_t i; 749af75078fSIntel uint8_t j; 750af75078fSIntel 751af75078fSIntel for (j = 0; j < 2; j++) { 752af75078fSIntel for (rxp = 0; rxp < nb_ports; rxp++) { 753af75078fSIntel for (rxq = 0; rxq < nb_rxq; rxq++) { 754af75078fSIntel do { 755af75078fSIntel nb_rx = rte_eth_rx_burst(rxp, rxq, 756*013af9b6SIntel pkts_burst, MAX_PKT_BURST); 757af75078fSIntel for (i = 0; i < nb_rx; i++) 758af75078fSIntel rte_pktmbuf_free(pkts_burst[i]); 759af75078fSIntel } while (nb_rx > 0); 760af75078fSIntel } 761af75078fSIntel } 762af75078fSIntel rte_delay_ms(10); /* wait 10 milli-seconds before retrying */ 763af75078fSIntel } 764af75078fSIntel } 765af75078fSIntel 766af75078fSIntel static void 767af75078fSIntel run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd) 768af75078fSIntel { 769af75078fSIntel struct fwd_stream **fsm; 770af75078fSIntel streamid_t nb_fs; 771af75078fSIntel streamid_t sm_id; 772af75078fSIntel 773af75078fSIntel fsm = &fwd_streams[fc->stream_idx]; 774af75078fSIntel nb_fs = fc->stream_nb; 775af75078fSIntel do { 776af75078fSIntel for (sm_id = 0; sm_id < nb_fs; sm_id++) 777af75078fSIntel (*pkt_fwd)(fsm[sm_id]); 778af75078fSIntel } while (! fc->stopped); 779af75078fSIntel } 780af75078fSIntel 781af75078fSIntel static int 782af75078fSIntel start_pkt_forward_on_core(void *fwd_arg) 783af75078fSIntel { 784af75078fSIntel run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg, 785af75078fSIntel cur_fwd_config.fwd_eng->packet_fwd); 786af75078fSIntel return 0; 787af75078fSIntel } 788af75078fSIntel 789af75078fSIntel /* 790af75078fSIntel * Run the TXONLY packet forwarding engine to send a single burst of packets. 791af75078fSIntel * Used to start communication flows in network loopback test configurations. 792af75078fSIntel */ 793af75078fSIntel static int 794af75078fSIntel run_one_txonly_burst_on_core(void *fwd_arg) 795af75078fSIntel { 796af75078fSIntel struct fwd_lcore *fwd_lc; 797af75078fSIntel struct fwd_lcore tmp_lcore; 798af75078fSIntel 799af75078fSIntel fwd_lc = (struct fwd_lcore *) fwd_arg; 800af75078fSIntel tmp_lcore = *fwd_lc; 801af75078fSIntel tmp_lcore.stopped = 1; 802af75078fSIntel run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd); 803af75078fSIntel return 0; 804af75078fSIntel } 805af75078fSIntel 806af75078fSIntel /* 807af75078fSIntel * Launch packet forwarding: 808af75078fSIntel * - Setup per-port forwarding context. 809af75078fSIntel * - launch logical cores with their forwarding configuration. 810af75078fSIntel */ 811af75078fSIntel static void 812af75078fSIntel launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore) 813af75078fSIntel { 814af75078fSIntel port_fwd_begin_t port_fwd_begin; 815af75078fSIntel unsigned int i; 816af75078fSIntel unsigned int lc_id; 817af75078fSIntel int diag; 818af75078fSIntel 819af75078fSIntel port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin; 820af75078fSIntel if (port_fwd_begin != NULL) { 821af75078fSIntel for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) 822af75078fSIntel (*port_fwd_begin)(fwd_ports_ids[i]); 823af75078fSIntel } 824af75078fSIntel for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) { 825af75078fSIntel lc_id = fwd_lcores_cpuids[i]; 826af75078fSIntel if ((interactive == 0) || (lc_id != rte_lcore_id())) { 827af75078fSIntel fwd_lcores[i]->stopped = 0; 828af75078fSIntel diag = rte_eal_remote_launch(pkt_fwd_on_lcore, 829af75078fSIntel fwd_lcores[i], lc_id); 830af75078fSIntel if (diag != 0) 831af75078fSIntel printf("launch lcore %u failed - diag=%d\n", 832af75078fSIntel lc_id, diag); 833af75078fSIntel } 834af75078fSIntel } 835af75078fSIntel } 836af75078fSIntel 837af75078fSIntel /* 838af75078fSIntel * Launch packet forwarding configuration. 839af75078fSIntel */ 840af75078fSIntel void 841af75078fSIntel start_packet_forwarding(int with_tx_first) 842af75078fSIntel { 843af75078fSIntel port_fwd_begin_t port_fwd_begin; 844af75078fSIntel port_fwd_end_t port_fwd_end; 845af75078fSIntel struct rte_port *port; 846af75078fSIntel unsigned int i; 847af75078fSIntel portid_t pt_id; 848af75078fSIntel streamid_t sm_id; 849af75078fSIntel 850ce8d5614SIntel if (all_ports_started() == 0) { 851ce8d5614SIntel printf("Not all ports were started\n"); 852ce8d5614SIntel return; 853ce8d5614SIntel } 854af75078fSIntel if (test_done == 0) { 855af75078fSIntel printf("Packet forwarding already started\n"); 856af75078fSIntel return; 857af75078fSIntel } 858*013af9b6SIntel if((dcb_test) && (nb_fwd_lcores == 1)) { 859*013af9b6SIntel printf("In DCB mode,the nb forwarding cores should be larger than 1.\n"); 860*013af9b6SIntel return; 861*013af9b6SIntel } 862af75078fSIntel test_done = 0; 863af75078fSIntel flush_all_rx_queues(); 864af75078fSIntel fwd_config_setup(); 865af75078fSIntel rxtx_config_display(); 866af75078fSIntel 867af75078fSIntel for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) { 868af75078fSIntel pt_id = fwd_ports_ids[i]; 869af75078fSIntel port = &ports[pt_id]; 870af75078fSIntel rte_eth_stats_get(pt_id, &port->stats); 871af75078fSIntel port->tx_dropped = 0; 872*013af9b6SIntel 873*013af9b6SIntel map_port_queue_stats_mapping_registers(pt_id, port); 874af75078fSIntel } 875af75078fSIntel for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) { 876af75078fSIntel fwd_streams[sm_id]->rx_packets = 0; 877af75078fSIntel fwd_streams[sm_id]->tx_packets = 0; 878af75078fSIntel fwd_streams[sm_id]->fwd_dropped = 0; 879af75078fSIntel fwd_streams[sm_id]->rx_bad_ip_csum = 0; 880af75078fSIntel fwd_streams[sm_id]->rx_bad_l4_csum = 0; 881af75078fSIntel 882af75078fSIntel #ifdef RTE_TEST_PMD_RECORD_BURST_STATS 883af75078fSIntel memset(&fwd_streams[sm_id]->rx_burst_stats, 0, 884af75078fSIntel sizeof(fwd_streams[sm_id]->rx_burst_stats)); 885af75078fSIntel memset(&fwd_streams[sm_id]->tx_burst_stats, 0, 886af75078fSIntel sizeof(fwd_streams[sm_id]->tx_burst_stats)); 887af75078fSIntel #endif 888af75078fSIntel #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES 889af75078fSIntel fwd_streams[sm_id]->core_cycles = 0; 890af75078fSIntel #endif 891af75078fSIntel } 892af75078fSIntel if (with_tx_first) { 893af75078fSIntel port_fwd_begin = tx_only_engine.port_fwd_begin; 894af75078fSIntel if (port_fwd_begin != NULL) { 895af75078fSIntel for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) 896af75078fSIntel (*port_fwd_begin)(fwd_ports_ids[i]); 897af75078fSIntel } 898af75078fSIntel launch_packet_forwarding(run_one_txonly_burst_on_core); 899af75078fSIntel rte_eal_mp_wait_lcore(); 900af75078fSIntel port_fwd_end = tx_only_engine.port_fwd_end; 901af75078fSIntel if (port_fwd_end != NULL) { 902af75078fSIntel for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) 903af75078fSIntel (*port_fwd_end)(fwd_ports_ids[i]); 904af75078fSIntel } 905af75078fSIntel } 906af75078fSIntel launch_packet_forwarding(start_pkt_forward_on_core); 907af75078fSIntel } 908af75078fSIntel 909af75078fSIntel void 910af75078fSIntel stop_packet_forwarding(void) 911af75078fSIntel { 912af75078fSIntel struct rte_eth_stats stats; 913af75078fSIntel struct rte_port *port; 914af75078fSIntel port_fwd_end_t port_fwd_end; 915af75078fSIntel int i; 916af75078fSIntel portid_t pt_id; 917af75078fSIntel streamid_t sm_id; 918af75078fSIntel lcoreid_t lc_id; 919af75078fSIntel uint64_t total_recv; 920af75078fSIntel uint64_t total_xmit; 921af75078fSIntel uint64_t total_rx_dropped; 922af75078fSIntel uint64_t total_tx_dropped; 923af75078fSIntel uint64_t total_rx_nombuf; 924af75078fSIntel uint64_t tx_dropped; 925af75078fSIntel uint64_t rx_bad_ip_csum; 926af75078fSIntel uint64_t rx_bad_l4_csum; 927af75078fSIntel #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES 928af75078fSIntel uint64_t fwd_cycles; 929af75078fSIntel #endif 930af75078fSIntel static const char *acc_stats_border = "+++++++++++++++"; 931af75078fSIntel 932ce8d5614SIntel if (all_ports_started() == 0) { 933ce8d5614SIntel printf("Not all ports were started\n"); 934ce8d5614SIntel return; 935ce8d5614SIntel } 936af75078fSIntel if (test_done) { 937af75078fSIntel printf("Packet forwarding not started\n"); 938af75078fSIntel return; 939af75078fSIntel } 940af75078fSIntel printf("Telling cores to stop..."); 941af75078fSIntel for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) 942af75078fSIntel fwd_lcores[lc_id]->stopped = 1; 943af75078fSIntel printf("\nWaiting for lcores to finish...\n"); 944af75078fSIntel rte_eal_mp_wait_lcore(); 945af75078fSIntel port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end; 946af75078fSIntel if (port_fwd_end != NULL) { 947af75078fSIntel for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) { 948af75078fSIntel pt_id = fwd_ports_ids[i]; 949af75078fSIntel (*port_fwd_end)(pt_id); 950af75078fSIntel } 951af75078fSIntel } 952af75078fSIntel #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES 953af75078fSIntel fwd_cycles = 0; 954af75078fSIntel #endif 955af75078fSIntel for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) { 956af75078fSIntel if (cur_fwd_config.nb_fwd_streams > 957af75078fSIntel cur_fwd_config.nb_fwd_ports) { 958af75078fSIntel fwd_stream_stats_display(sm_id); 959af75078fSIntel ports[fwd_streams[sm_id]->tx_port].tx_stream = NULL; 960af75078fSIntel ports[fwd_streams[sm_id]->rx_port].rx_stream = NULL; 961af75078fSIntel } else { 962af75078fSIntel ports[fwd_streams[sm_id]->tx_port].tx_stream = 963af75078fSIntel fwd_streams[sm_id]; 964af75078fSIntel ports[fwd_streams[sm_id]->rx_port].rx_stream = 965af75078fSIntel fwd_streams[sm_id]; 966af75078fSIntel } 967af75078fSIntel tx_dropped = ports[fwd_streams[sm_id]->tx_port].tx_dropped; 968af75078fSIntel tx_dropped = (uint64_t) (tx_dropped + 969af75078fSIntel fwd_streams[sm_id]->fwd_dropped); 970af75078fSIntel ports[fwd_streams[sm_id]->tx_port].tx_dropped = tx_dropped; 971af75078fSIntel 972*013af9b6SIntel rx_bad_ip_csum = 973*013af9b6SIntel ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum; 974af75078fSIntel rx_bad_ip_csum = (uint64_t) (rx_bad_ip_csum + 975af75078fSIntel fwd_streams[sm_id]->rx_bad_ip_csum); 976*013af9b6SIntel ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum = 977*013af9b6SIntel rx_bad_ip_csum; 978af75078fSIntel 979*013af9b6SIntel rx_bad_l4_csum = 980*013af9b6SIntel ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum; 981af75078fSIntel rx_bad_l4_csum = (uint64_t) (rx_bad_l4_csum + 982af75078fSIntel fwd_streams[sm_id]->rx_bad_l4_csum); 983*013af9b6SIntel ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum = 984*013af9b6SIntel rx_bad_l4_csum; 985af75078fSIntel 986af75078fSIntel #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES 987af75078fSIntel fwd_cycles = (uint64_t) (fwd_cycles + 988af75078fSIntel fwd_streams[sm_id]->core_cycles); 989af75078fSIntel #endif 990af75078fSIntel } 991af75078fSIntel total_recv = 0; 992af75078fSIntel total_xmit = 0; 993af75078fSIntel total_rx_dropped = 0; 994af75078fSIntel total_tx_dropped = 0; 995af75078fSIntel total_rx_nombuf = 0; 996af75078fSIntel for (i = 0; i < ((cur_fwd_config.nb_fwd_ports + 1) & ~0x1); i++) { 997af75078fSIntel pt_id = fwd_ports_ids[i]; 998af75078fSIntel 999af75078fSIntel port = &ports[pt_id]; 1000af75078fSIntel rte_eth_stats_get(pt_id, &stats); 1001af75078fSIntel stats.ipackets -= port->stats.ipackets; 1002af75078fSIntel port->stats.ipackets = 0; 1003af75078fSIntel stats.opackets -= port->stats.opackets; 1004af75078fSIntel port->stats.opackets = 0; 1005af75078fSIntel stats.ibytes -= port->stats.ibytes; 1006af75078fSIntel port->stats.ibytes = 0; 1007af75078fSIntel stats.obytes -= port->stats.obytes; 1008af75078fSIntel port->stats.obytes = 0; 1009af75078fSIntel stats.ierrors -= port->stats.ierrors; 1010af75078fSIntel port->stats.ierrors = 0; 1011af75078fSIntel stats.oerrors -= port->stats.oerrors; 1012af75078fSIntel port->stats.oerrors = 0; 1013af75078fSIntel stats.rx_nombuf -= port->stats.rx_nombuf; 1014af75078fSIntel port->stats.rx_nombuf = 0; 1015af75078fSIntel stats.fdirmatch -= port->stats.fdirmatch; 1016af75078fSIntel port->stats.rx_nombuf = 0; 1017af75078fSIntel stats.fdirmiss -= port->stats.fdirmiss; 1018af75078fSIntel port->stats.rx_nombuf = 0; 1019af75078fSIntel 1020af75078fSIntel total_recv += stats.ipackets; 1021af75078fSIntel total_xmit += stats.opackets; 1022af75078fSIntel total_rx_dropped += stats.ierrors; 1023af75078fSIntel total_tx_dropped += port->tx_dropped; 1024af75078fSIntel total_rx_nombuf += stats.rx_nombuf; 1025af75078fSIntel 1026af75078fSIntel fwd_port_stats_display(pt_id, &stats); 1027af75078fSIntel } 1028af75078fSIntel printf("\n %s Accumulated forward statistics for all ports" 1029af75078fSIntel "%s\n", 1030af75078fSIntel acc_stats_border, acc_stats_border); 1031af75078fSIntel printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: " 1032af75078fSIntel "%-"PRIu64"\n" 1033af75078fSIntel " TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: " 1034af75078fSIntel "%-"PRIu64"\n", 1035af75078fSIntel total_recv, total_rx_dropped, total_recv + total_rx_dropped, 1036af75078fSIntel total_xmit, total_tx_dropped, total_xmit + total_tx_dropped); 1037af75078fSIntel if (total_rx_nombuf > 0) 1038af75078fSIntel printf(" RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf); 1039af75078fSIntel printf(" %s++++++++++++++++++++++++++++++++++++++++++++++" 1040af75078fSIntel "%s\n", 1041af75078fSIntel acc_stats_border, acc_stats_border); 1042af75078fSIntel #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES 1043af75078fSIntel if (total_recv > 0) 1044af75078fSIntel printf("\n CPU cycles/packet=%u (total cycles=" 1045af75078fSIntel "%"PRIu64" / total RX packets=%"PRIu64")\n", 1046af75078fSIntel (unsigned int)(fwd_cycles / total_recv), 1047af75078fSIntel fwd_cycles, total_recv); 1048af75078fSIntel #endif 1049af75078fSIntel printf("\nDone.\n"); 1050af75078fSIntel test_done = 1; 1051af75078fSIntel } 1052af75078fSIntel 1053ce8d5614SIntel static int 1054ce8d5614SIntel all_ports_started(void) 1055ce8d5614SIntel { 1056ce8d5614SIntel portid_t pi; 1057ce8d5614SIntel struct rte_port *port; 1058ce8d5614SIntel 1059ce8d5614SIntel for (pi = 0; pi < nb_ports; pi++) { 1060ce8d5614SIntel port = &ports[pi]; 1061ce8d5614SIntel /* Check if there is a port which is not started */ 1062ce8d5614SIntel if (port->port_status != RTE_PORT_STARTED) 1063ce8d5614SIntel return 0; 1064ce8d5614SIntel } 1065ce8d5614SIntel 1066ce8d5614SIntel /* No port is not started */ 1067ce8d5614SIntel return 1; 1068ce8d5614SIntel } 1069ce8d5614SIntel 1070ce8d5614SIntel void 1071ce8d5614SIntel start_port(portid_t pid) 1072ce8d5614SIntel { 1073ce8d5614SIntel int diag, need_check_link_status = 0; 1074ce8d5614SIntel portid_t pi; 1075ce8d5614SIntel queueid_t qi; 1076ce8d5614SIntel struct rte_port *port; 1077ce8d5614SIntel 1078ce8d5614SIntel if (test_done == 0) { 1079ce8d5614SIntel printf("Please stop forwarding first\n"); 1080ce8d5614SIntel return; 1081ce8d5614SIntel } 1082ce8d5614SIntel 1083ce8d5614SIntel if (init_fwd_streams() < 0) { 1084ce8d5614SIntel printf("Fail from init_fwd_streams()\n"); 1085ce8d5614SIntel return; 1086ce8d5614SIntel } 1087ce8d5614SIntel 1088ce8d5614SIntel if(dcb_config) 1089ce8d5614SIntel dcb_test = 1; 1090ce8d5614SIntel for (pi = 0; pi < nb_ports; pi++) { 1091ce8d5614SIntel if (pid < nb_ports && pid != pi) 1092ce8d5614SIntel continue; 1093ce8d5614SIntel 1094ce8d5614SIntel port = &ports[pi]; 1095ce8d5614SIntel if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED, 1096ce8d5614SIntel RTE_PORT_HANDLING) == 0) { 1097ce8d5614SIntel printf("Port %d is now not stopped\n", pi); 1098ce8d5614SIntel continue; 1099ce8d5614SIntel } 1100ce8d5614SIntel 1101ce8d5614SIntel if (port->need_reconfig > 0) { 1102ce8d5614SIntel port->need_reconfig = 0; 1103ce8d5614SIntel 1104ce8d5614SIntel printf("Configuring Port %d\n", pi); 1105ce8d5614SIntel /* configure port */ 1106ce8d5614SIntel diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq, 1107ce8d5614SIntel &(port->dev_conf)); 1108ce8d5614SIntel if (diag != 0) { 1109ce8d5614SIntel if (rte_atomic16_cmpset(&(port->port_status), 1110ce8d5614SIntel RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0) 1111ce8d5614SIntel printf("Port %d can not be set back " 1112ce8d5614SIntel "to stopped\n", pi); 1113ce8d5614SIntel printf("Fail to configure port %d\n", pi); 1114ce8d5614SIntel /* try to reconfigure port next time */ 1115ce8d5614SIntel port->need_reconfig = 1; 1116ce8d5614SIntel return; 1117ce8d5614SIntel } 1118ce8d5614SIntel } 1119ce8d5614SIntel 1120ce8d5614SIntel if (port->need_reconfig_queues > 0) { 1121ce8d5614SIntel port->need_reconfig_queues = 0; 1122ce8d5614SIntel 1123ce8d5614SIntel /* setup tx queues */ 1124ce8d5614SIntel for (qi = 0; qi < nb_txq; qi++) { 1125ce8d5614SIntel diag = rte_eth_tx_queue_setup(pi, qi, nb_txd, 1126ce8d5614SIntel port->socket_id, &(port->tx_conf)); 1127ce8d5614SIntel if (diag == 0) 1128ce8d5614SIntel continue; 1129ce8d5614SIntel 1130ce8d5614SIntel /* Fail to setup tx queue, return */ 1131ce8d5614SIntel if (rte_atomic16_cmpset(&(port->port_status), 1132ce8d5614SIntel RTE_PORT_HANDLING, 1133ce8d5614SIntel RTE_PORT_STOPPED) == 0) 1134ce8d5614SIntel printf("Port %d can not be set back " 1135ce8d5614SIntel "to stopped\n", pi); 1136ce8d5614SIntel printf("Fail to configure port %d tx queues\n", pi); 1137ce8d5614SIntel /* try to reconfigure queues next time */ 1138ce8d5614SIntel port->need_reconfig_queues = 1; 1139ce8d5614SIntel return; 1140ce8d5614SIntel } 1141ce8d5614SIntel /* setup rx queues */ 1142ce8d5614SIntel for (qi = 0; qi < nb_rxq; qi++) { 1143ce8d5614SIntel diag = rte_eth_rx_queue_setup(pi, qi, nb_rxd, 1144ce8d5614SIntel port->socket_id, &(port->rx_conf), 1145ce8d5614SIntel mbuf_pool_find(port->socket_id)); 1146ce8d5614SIntel if (diag == 0) 1147ce8d5614SIntel continue; 1148ce8d5614SIntel 1149ce8d5614SIntel /* Fail to setup rx queue, return */ 1150ce8d5614SIntel if (rte_atomic16_cmpset(&(port->port_status), 1151ce8d5614SIntel RTE_PORT_HANDLING, 1152ce8d5614SIntel RTE_PORT_STOPPED) == 0) 1153ce8d5614SIntel printf("Port %d can not be set back " 1154ce8d5614SIntel "to stopped\n", pi); 1155ce8d5614SIntel printf("Fail to configure port %d rx queues\n", pi); 1156ce8d5614SIntel /* try to reconfigure queues next time */ 1157ce8d5614SIntel port->need_reconfig_queues = 1; 1158ce8d5614SIntel return; 1159ce8d5614SIntel } 1160ce8d5614SIntel } 1161ce8d5614SIntel 1162ce8d5614SIntel /* start port */ 1163ce8d5614SIntel if (rte_eth_dev_start(pi) < 0) { 1164ce8d5614SIntel printf("Fail to start port %d\n", pi); 1165ce8d5614SIntel 1166ce8d5614SIntel /* Fail to setup rx queue, return */ 1167ce8d5614SIntel if (rte_atomic16_cmpset(&(port->port_status), 1168ce8d5614SIntel RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0) 1169ce8d5614SIntel printf("Port %d can not be set back to " 1170ce8d5614SIntel "stopped\n", pi); 1171ce8d5614SIntel continue; 1172ce8d5614SIntel } 1173ce8d5614SIntel 1174ce8d5614SIntel if (rte_atomic16_cmpset(&(port->port_status), 1175ce8d5614SIntel RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0) 1176ce8d5614SIntel printf("Port %d can not be set into started\n", pi); 1177ce8d5614SIntel 1178ce8d5614SIntel /* at least one port started, need checking link status */ 1179ce8d5614SIntel need_check_link_status = 1; 1180ce8d5614SIntel } 1181ce8d5614SIntel 1182ce8d5614SIntel if (need_check_link_status) 1183ce8d5614SIntel check_all_ports_link_status(nb_ports, RTE_PORT_ALL); 1184ce8d5614SIntel else 1185ce8d5614SIntel printf("Please stop the ports first\n"); 1186ce8d5614SIntel 1187ce8d5614SIntel printf("Done\n"); 1188ce8d5614SIntel } 1189ce8d5614SIntel 1190ce8d5614SIntel void 1191ce8d5614SIntel stop_port(portid_t pid) 1192ce8d5614SIntel { 1193ce8d5614SIntel portid_t pi; 1194ce8d5614SIntel struct rte_port *port; 1195ce8d5614SIntel int need_check_link_status = 0; 1196ce8d5614SIntel 1197ce8d5614SIntel if (test_done == 0) { 1198ce8d5614SIntel printf("Please stop forwarding first\n"); 1199ce8d5614SIntel return; 1200ce8d5614SIntel } 1201ce8d5614SIntel if (dcb_test) { 1202ce8d5614SIntel dcb_test = 0; 1203ce8d5614SIntel dcb_config = 0; 1204ce8d5614SIntel } 1205ce8d5614SIntel printf("Stopping ports...\n"); 1206ce8d5614SIntel 1207ce8d5614SIntel for (pi = 0; pi < nb_ports; pi++) { 1208ce8d5614SIntel if (pid < nb_ports && pid != pi) 1209ce8d5614SIntel continue; 1210ce8d5614SIntel 1211ce8d5614SIntel port = &ports[pi]; 1212ce8d5614SIntel if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED, 1213ce8d5614SIntel RTE_PORT_HANDLING) == 0) 1214ce8d5614SIntel continue; 1215ce8d5614SIntel 1216ce8d5614SIntel rte_eth_dev_stop(pi); 1217ce8d5614SIntel 1218ce8d5614SIntel if (rte_atomic16_cmpset(&(port->port_status), 1219ce8d5614SIntel RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0) 1220ce8d5614SIntel printf("Port %d can not be set into stopped\n", pi); 1221ce8d5614SIntel need_check_link_status = 1; 1222ce8d5614SIntel } 1223ce8d5614SIntel if (need_check_link_status) 1224ce8d5614SIntel check_all_ports_link_status(nb_ports, RTE_PORT_ALL); 1225ce8d5614SIntel 1226ce8d5614SIntel printf("Done\n"); 1227ce8d5614SIntel } 1228ce8d5614SIntel 1229ce8d5614SIntel void 1230ce8d5614SIntel close_port(portid_t pid) 1231ce8d5614SIntel { 1232ce8d5614SIntel portid_t pi; 1233ce8d5614SIntel struct rte_port *port; 1234ce8d5614SIntel 1235ce8d5614SIntel if (test_done == 0) { 1236ce8d5614SIntel printf("Please stop forwarding first\n"); 1237ce8d5614SIntel return; 1238ce8d5614SIntel } 1239ce8d5614SIntel 1240ce8d5614SIntel printf("Closing ports...\n"); 1241ce8d5614SIntel 1242ce8d5614SIntel for (pi = 0; pi < nb_ports; pi++) { 1243ce8d5614SIntel if (pid < nb_ports && pid != pi) 1244ce8d5614SIntel continue; 1245ce8d5614SIntel 1246ce8d5614SIntel port = &ports[pi]; 1247ce8d5614SIntel if (rte_atomic16_cmpset(&(port->port_status), 1248ce8d5614SIntel RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) { 1249ce8d5614SIntel printf("Port %d is now not stopped\n", pi); 1250ce8d5614SIntel continue; 1251ce8d5614SIntel } 1252ce8d5614SIntel 1253ce8d5614SIntel rte_eth_dev_close(pi); 1254ce8d5614SIntel 1255ce8d5614SIntel if (rte_atomic16_cmpset(&(port->port_status), 1256ce8d5614SIntel RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0) 1257ce8d5614SIntel printf("Port %d can not be set into stopped\n", pi); 1258ce8d5614SIntel } 1259ce8d5614SIntel 1260ce8d5614SIntel printf("Done\n"); 1261ce8d5614SIntel } 1262ce8d5614SIntel 1263ce8d5614SIntel int 1264ce8d5614SIntel all_ports_stopped(void) 1265ce8d5614SIntel { 1266ce8d5614SIntel portid_t pi; 1267ce8d5614SIntel struct rte_port *port; 1268ce8d5614SIntel 1269ce8d5614SIntel for (pi = 0; pi < nb_ports; pi++) { 1270ce8d5614SIntel port = &ports[pi]; 1271ce8d5614SIntel if (port->port_status != RTE_PORT_STOPPED) 1272ce8d5614SIntel return 0; 1273ce8d5614SIntel } 1274ce8d5614SIntel 1275ce8d5614SIntel return 1; 1276ce8d5614SIntel } 1277ce8d5614SIntel 1278af75078fSIntel void 1279af75078fSIntel pmd_test_exit(void) 1280af75078fSIntel { 1281af75078fSIntel portid_t pt_id; 1282af75078fSIntel 1283af75078fSIntel for (pt_id = 0; pt_id < nb_ports; pt_id++) { 1284af75078fSIntel printf("Stopping port %d...", pt_id); 1285af75078fSIntel fflush(stdout); 1286af75078fSIntel rte_eth_dev_close(pt_id); 1287af75078fSIntel printf("done\n"); 1288af75078fSIntel } 1289af75078fSIntel printf("bye...\n"); 1290af75078fSIntel } 1291af75078fSIntel 1292af75078fSIntel typedef void (*cmd_func_t)(void); 1293af75078fSIntel struct pmd_test_command { 1294af75078fSIntel const char *cmd_name; 1295af75078fSIntel cmd_func_t cmd_func; 1296af75078fSIntel }; 1297af75078fSIntel 1298af75078fSIntel #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0])) 1299af75078fSIntel 1300ce8d5614SIntel /* Check the link status of all ports in up to 9s, and print them finally */ 1301af75078fSIntel static void 1302ce8d5614SIntel check_all_ports_link_status(uint8_t port_num, uint32_t port_mask) 1303af75078fSIntel { 1304ce8d5614SIntel #define CHECK_INTERVAL 100 /* 100ms */ 1305ce8d5614SIntel #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */ 1306ce8d5614SIntel uint8_t portid, count, all_ports_up, print_flag = 0; 1307ce8d5614SIntel struct rte_eth_link link; 1308ce8d5614SIntel 1309ce8d5614SIntel printf("Checking link statuses...\n"); 1310ce8d5614SIntel fflush(stdout); 1311ce8d5614SIntel for (count = 0; count <= MAX_CHECK_TIME; count++) { 1312ce8d5614SIntel all_ports_up = 1; 1313ce8d5614SIntel for (portid = 0; portid < port_num; portid++) { 1314ce8d5614SIntel if ((port_mask & (1 << portid)) == 0) 1315ce8d5614SIntel continue; 1316ce8d5614SIntel memset(&link, 0, sizeof(link)); 1317ce8d5614SIntel rte_eth_link_get_nowait(portid, &link); 1318ce8d5614SIntel /* print link status if flag set */ 1319ce8d5614SIntel if (print_flag == 1) { 1320ce8d5614SIntel if (link.link_status) 1321ce8d5614SIntel printf("Port %d Link Up - speed %u " 1322ce8d5614SIntel "Mbps - %s\n", (uint8_t)portid, 1323ce8d5614SIntel (unsigned)link.link_speed, 1324ce8d5614SIntel (link.link_duplex == ETH_LINK_FULL_DUPLEX) ? 1325ce8d5614SIntel ("full-duplex") : ("half-duplex\n")); 1326ce8d5614SIntel else 1327ce8d5614SIntel printf("Port %d Link Down\n", 1328ce8d5614SIntel (uint8_t)portid); 1329ce8d5614SIntel continue; 1330ce8d5614SIntel } 1331ce8d5614SIntel /* clear all_ports_up flag if any link down */ 1332ce8d5614SIntel if (link.link_status == 0) { 1333ce8d5614SIntel all_ports_up = 0; 1334ce8d5614SIntel break; 1335ce8d5614SIntel } 1336ce8d5614SIntel } 1337ce8d5614SIntel /* after finally printing all link status, get out */ 1338ce8d5614SIntel if (print_flag == 1) 1339ce8d5614SIntel break; 1340ce8d5614SIntel 1341ce8d5614SIntel if (all_ports_up == 0) { 1342ce8d5614SIntel fflush(stdout); 1343ce8d5614SIntel rte_delay_ms(CHECK_INTERVAL); 1344ce8d5614SIntel } 1345ce8d5614SIntel 1346ce8d5614SIntel /* set the print_flag if all ports up or timeout */ 1347ce8d5614SIntel if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) { 1348ce8d5614SIntel print_flag = 1; 1349ce8d5614SIntel } 1350ce8d5614SIntel } 1351af75078fSIntel } 1352af75078fSIntel 1353*013af9b6SIntel static int 1354*013af9b6SIntel set_tx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port) 1355af75078fSIntel { 1356*013af9b6SIntel uint16_t i; 1357af75078fSIntel int diag; 1358*013af9b6SIntel uint8_t mapping_found = 0; 1359af75078fSIntel 1360*013af9b6SIntel for (i = 0; i < nb_tx_queue_stats_mappings; i++) { 1361*013af9b6SIntel if ((tx_queue_stats_mappings[i].port_id == port_id) && 1362*013af9b6SIntel (tx_queue_stats_mappings[i].queue_id < nb_txq )) { 1363*013af9b6SIntel diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id, 1364*013af9b6SIntel tx_queue_stats_mappings[i].queue_id, 1365*013af9b6SIntel tx_queue_stats_mappings[i].stats_counter_id); 1366*013af9b6SIntel if (diag != 0) 1367*013af9b6SIntel return diag; 1368*013af9b6SIntel mapping_found = 1; 1369af75078fSIntel } 1370*013af9b6SIntel } 1371*013af9b6SIntel if (mapping_found) 1372*013af9b6SIntel port->tx_queue_stats_mapping_enabled = 1; 1373*013af9b6SIntel return 0; 1374*013af9b6SIntel } 1375*013af9b6SIntel 1376*013af9b6SIntel static int 1377*013af9b6SIntel set_rx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port) 1378*013af9b6SIntel { 1379*013af9b6SIntel uint16_t i; 1380*013af9b6SIntel int diag; 1381*013af9b6SIntel uint8_t mapping_found = 0; 1382*013af9b6SIntel 1383*013af9b6SIntel for (i = 0; i < nb_rx_queue_stats_mappings; i++) { 1384*013af9b6SIntel if ((rx_queue_stats_mappings[i].port_id == port_id) && 1385*013af9b6SIntel (rx_queue_stats_mappings[i].queue_id < nb_rxq )) { 1386*013af9b6SIntel diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id, 1387*013af9b6SIntel rx_queue_stats_mappings[i].queue_id, 1388*013af9b6SIntel rx_queue_stats_mappings[i].stats_counter_id); 1389*013af9b6SIntel if (diag != 0) 1390*013af9b6SIntel return diag; 1391*013af9b6SIntel mapping_found = 1; 1392*013af9b6SIntel } 1393*013af9b6SIntel } 1394*013af9b6SIntel if (mapping_found) 1395*013af9b6SIntel port->rx_queue_stats_mapping_enabled = 1; 1396*013af9b6SIntel return 0; 1397*013af9b6SIntel } 1398*013af9b6SIntel 1399*013af9b6SIntel static void 1400*013af9b6SIntel map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port) 1401*013af9b6SIntel { 1402*013af9b6SIntel int diag = 0; 1403*013af9b6SIntel 1404*013af9b6SIntel diag = set_tx_queue_stats_mapping_registers(pi, port); 1405af75078fSIntel if (diag != 0) { 1406*013af9b6SIntel if (diag == -ENOTSUP) { 1407*013af9b6SIntel port->tx_queue_stats_mapping_enabled = 0; 1408*013af9b6SIntel printf("TX queue stats mapping not supported port id=%d\n", pi); 1409*013af9b6SIntel } 1410*013af9b6SIntel else 1411*013af9b6SIntel rte_exit(EXIT_FAILURE, 1412*013af9b6SIntel "set_tx_queue_stats_mapping_registers " 1413*013af9b6SIntel "failed for port id=%d diag=%d\n", 1414af75078fSIntel pi, diag); 1415af75078fSIntel } 1416*013af9b6SIntel 1417*013af9b6SIntel diag = set_rx_queue_stats_mapping_registers(pi, port); 1418af75078fSIntel if (diag != 0) { 1419*013af9b6SIntel if (diag == -ENOTSUP) { 1420*013af9b6SIntel port->rx_queue_stats_mapping_enabled = 0; 1421*013af9b6SIntel printf("RX queue stats mapping not supported port id=%d\n", pi); 1422*013af9b6SIntel } 1423*013af9b6SIntel else 1424*013af9b6SIntel rte_exit(EXIT_FAILURE, 1425*013af9b6SIntel "set_rx_queue_stats_mapping_registers " 1426*013af9b6SIntel "failed for port id=%d diag=%d\n", 1427af75078fSIntel pi, diag); 1428af75078fSIntel } 1429af75078fSIntel } 1430af75078fSIntel 1431*013af9b6SIntel void 1432*013af9b6SIntel init_port_config(void) 1433*013af9b6SIntel { 1434*013af9b6SIntel portid_t pid; 1435*013af9b6SIntel struct rte_port *port; 1436*013af9b6SIntel 1437*013af9b6SIntel for (pid = 0; pid < nb_ports; pid++) { 1438*013af9b6SIntel port = &ports[pid]; 1439*013af9b6SIntel port->dev_conf.rxmode = rx_mode; 1440*013af9b6SIntel port->dev_conf.fdir_conf = fdir_conf; 1441*013af9b6SIntel if (nb_rxq > 0) { 1442*013af9b6SIntel port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL; 1443*013af9b6SIntel port->dev_conf.rx_adv_conf.rss_conf.rss_hf = rss_hf; 1444af75078fSIntel } else { 1445*013af9b6SIntel port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL; 1446*013af9b6SIntel port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0; 1447af75078fSIntel } 1448*013af9b6SIntel port->rx_conf.rx_thresh = rx_thresh; 1449*013af9b6SIntel port->rx_conf.rx_free_thresh = rx_free_thresh; 1450*013af9b6SIntel port->rx_conf.rx_drop_en = rx_drop_en; 1451*013af9b6SIntel port->tx_conf.tx_thresh = tx_thresh; 1452*013af9b6SIntel port->tx_conf.tx_rs_thresh = tx_rs_thresh; 1453*013af9b6SIntel port->tx_conf.tx_free_thresh = tx_free_thresh; 1454*013af9b6SIntel port->tx_conf.txq_flags = txq_flags; 1455*013af9b6SIntel 1456*013af9b6SIntel rte_eth_macaddr_get(pid, &port->eth_addr); 1457*013af9b6SIntel 1458*013af9b6SIntel map_port_queue_stats_mapping_registers(pid, port); 1459*013af9b6SIntel } 1460*013af9b6SIntel } 1461*013af9b6SIntel 1462*013af9b6SIntel const uint16_t vlan_tags[] = { 1463*013af9b6SIntel 0, 1, 2, 3, 4, 5, 6, 7, 1464*013af9b6SIntel 8, 9, 10, 11, 12, 13, 14, 15, 1465*013af9b6SIntel 16, 17, 18, 19, 20, 21, 22, 23, 1466*013af9b6SIntel 24, 25, 26, 27, 28, 29, 30, 31 1467*013af9b6SIntel }; 1468*013af9b6SIntel 1469*013af9b6SIntel static int 1470*013af9b6SIntel get_eth_dcb_conf(struct rte_eth_conf *eth_conf, struct dcb_config *dcb_conf) 1471*013af9b6SIntel { 1472*013af9b6SIntel uint8_t i; 1473af75078fSIntel 1474af75078fSIntel /* 1475*013af9b6SIntel * Builds up the correct configuration for dcb+vt based on the vlan tags array 1476*013af9b6SIntel * given above, and the number of traffic classes available for use. 1477af75078fSIntel */ 1478*013af9b6SIntel if (dcb_conf->dcb_mode == DCB_VT_ENABLED) { 1479*013af9b6SIntel struct rte_eth_vmdq_dcb_conf vmdq_rx_conf; 1480*013af9b6SIntel struct rte_eth_vmdq_dcb_tx_conf vmdq_tx_conf; 1481*013af9b6SIntel 1482*013af9b6SIntel /* VMDQ+DCB RX and TX configrations */ 1483*013af9b6SIntel vmdq_rx_conf.enable_default_pool = 0; 1484*013af9b6SIntel vmdq_rx_conf.default_pool = 0; 1485*013af9b6SIntel vmdq_rx_conf.nb_queue_pools = 1486*013af9b6SIntel (dcb_conf->num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS); 1487*013af9b6SIntel vmdq_tx_conf.nb_queue_pools = 1488*013af9b6SIntel (dcb_conf->num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS); 1489*013af9b6SIntel 1490*013af9b6SIntel vmdq_rx_conf.nb_pool_maps = sizeof( vlan_tags )/sizeof( vlan_tags[ 0 ]); 1491*013af9b6SIntel for (i = 0; i < vmdq_rx_conf.nb_pool_maps; i++) { 1492*013af9b6SIntel vmdq_rx_conf.pool_map[i].vlan_id = vlan_tags[ i ]; 1493*013af9b6SIntel vmdq_rx_conf.pool_map[i].pools = 1 << (i % vmdq_rx_conf.nb_queue_pools); 1494af75078fSIntel } 1495*013af9b6SIntel for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) { 1496*013af9b6SIntel vmdq_rx_conf.dcb_queue[i] = i; 1497*013af9b6SIntel vmdq_tx_conf.dcb_queue[i] = i; 1498*013af9b6SIntel } 1499*013af9b6SIntel 1500*013af9b6SIntel /*set DCB mode of RX and TX of multiple queues*/ 1501*013af9b6SIntel eth_conf->rxmode.mq_mode = ETH_VMDQ_DCB; 1502*013af9b6SIntel eth_conf->txmode.mq_mode = ETH_VMDQ_DCB_TX; 1503*013af9b6SIntel if (dcb_conf->pfc_en) 1504*013af9b6SIntel eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT|ETH_DCB_PFC_SUPPORT; 1505*013af9b6SIntel else 1506*013af9b6SIntel eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT; 1507*013af9b6SIntel 1508*013af9b6SIntel (void)(rte_memcpy(ð_conf->rx_adv_conf.vmdq_dcb_conf, &vmdq_rx_conf, 1509*013af9b6SIntel sizeof(struct rte_eth_vmdq_dcb_conf))); 1510*013af9b6SIntel (void)(rte_memcpy(ð_conf->tx_adv_conf.vmdq_dcb_tx_conf, &vmdq_tx_conf, 1511*013af9b6SIntel sizeof(struct rte_eth_vmdq_dcb_tx_conf))); 1512*013af9b6SIntel } 1513*013af9b6SIntel else { 1514*013af9b6SIntel struct rte_eth_dcb_rx_conf rx_conf; 1515*013af9b6SIntel struct rte_eth_dcb_tx_conf tx_conf; 1516*013af9b6SIntel 1517*013af9b6SIntel /* queue mapping configuration of DCB RX and TX */ 1518*013af9b6SIntel if (dcb_conf->num_tcs == ETH_4_TCS) 1519*013af9b6SIntel dcb_q_mapping = DCB_4_TCS_Q_MAPPING; 1520*013af9b6SIntel else 1521*013af9b6SIntel dcb_q_mapping = DCB_8_TCS_Q_MAPPING; 1522*013af9b6SIntel 1523*013af9b6SIntel rx_conf.nb_tcs = dcb_conf->num_tcs; 1524*013af9b6SIntel tx_conf.nb_tcs = dcb_conf->num_tcs; 1525*013af9b6SIntel 1526*013af9b6SIntel for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++){ 1527*013af9b6SIntel rx_conf.dcb_queue[i] = i; 1528*013af9b6SIntel tx_conf.dcb_queue[i] = i; 1529*013af9b6SIntel } 1530*013af9b6SIntel eth_conf->rxmode.mq_mode = ETH_DCB_RX; 1531*013af9b6SIntel eth_conf->txmode.mq_mode = ETH_DCB_TX; 1532*013af9b6SIntel if (dcb_conf->pfc_en) 1533*013af9b6SIntel eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT|ETH_DCB_PFC_SUPPORT; 1534*013af9b6SIntel else 1535*013af9b6SIntel eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT; 1536*013af9b6SIntel 1537*013af9b6SIntel (void)(rte_memcpy(ð_conf->rx_adv_conf.dcb_rx_conf, &rx_conf, 1538*013af9b6SIntel sizeof(struct rte_eth_dcb_rx_conf))); 1539*013af9b6SIntel (void)(rte_memcpy(ð_conf->tx_adv_conf.dcb_tx_conf, &tx_conf, 1540*013af9b6SIntel sizeof(struct rte_eth_dcb_tx_conf))); 1541*013af9b6SIntel } 1542*013af9b6SIntel 1543*013af9b6SIntel return 0; 1544*013af9b6SIntel } 1545*013af9b6SIntel 1546*013af9b6SIntel int 1547*013af9b6SIntel init_port_dcb_config(portid_t pid,struct dcb_config *dcb_conf) 1548*013af9b6SIntel { 1549*013af9b6SIntel struct rte_eth_conf port_conf; 1550*013af9b6SIntel struct rte_port *rte_port; 1551*013af9b6SIntel int retval; 1552*013af9b6SIntel uint16_t nb_vlan; 1553*013af9b6SIntel uint16_t i; 1554*013af9b6SIntel 1555*013af9b6SIntel /* rxq and txq configuration in dcb mode */ 1556*013af9b6SIntel nb_rxq = 128; 1557*013af9b6SIntel nb_txq = 128; 1558*013af9b6SIntel rx_free_thresh = 64; 1559*013af9b6SIntel 1560*013af9b6SIntel memset(&port_conf,0,sizeof(struct rte_eth_conf)); 1561*013af9b6SIntel /* Enter DCB configuration status */ 1562*013af9b6SIntel dcb_config = 1; 1563*013af9b6SIntel 1564*013af9b6SIntel nb_vlan = sizeof( vlan_tags )/sizeof( vlan_tags[ 0 ]); 1565*013af9b6SIntel /*set configuration of DCB in vt mode and DCB in non-vt mode*/ 1566*013af9b6SIntel retval = get_eth_dcb_conf(&port_conf, dcb_conf); 1567*013af9b6SIntel if (retval < 0) 1568*013af9b6SIntel return retval; 1569*013af9b6SIntel 1570*013af9b6SIntel rte_port = &ports[pid]; 1571*013af9b6SIntel memcpy(&rte_port->dev_conf, &port_conf,sizeof(struct rte_eth_conf)); 1572*013af9b6SIntel 1573*013af9b6SIntel rte_port->rx_conf.rx_thresh = rx_thresh; 1574*013af9b6SIntel rte_port->rx_conf.rx_free_thresh = rx_free_thresh; 1575*013af9b6SIntel rte_port->tx_conf.tx_thresh = tx_thresh; 1576*013af9b6SIntel rte_port->tx_conf.tx_rs_thresh = tx_rs_thresh; 1577*013af9b6SIntel rte_port->tx_conf.tx_free_thresh = tx_free_thresh; 1578*013af9b6SIntel /* VLAN filter */ 1579*013af9b6SIntel rte_port->dev_conf.rxmode.hw_vlan_filter = 1; 1580*013af9b6SIntel for (i = 0; i < nb_vlan; i++){ 1581*013af9b6SIntel rx_vft_set(pid, vlan_tags[i], 1); 1582*013af9b6SIntel } 1583*013af9b6SIntel 1584*013af9b6SIntel rte_eth_macaddr_get(pid, &rte_port->eth_addr); 1585*013af9b6SIntel map_port_queue_stats_mapping_registers(pid, rte_port); 1586*013af9b6SIntel 1587*013af9b6SIntel return 0; 1588af75078fSIntel } 1589af75078fSIntel 1590af75078fSIntel #ifdef RTE_EXEC_ENV_BAREMETAL 1591af75078fSIntel #define main _main 1592af75078fSIntel #endif 1593af75078fSIntel 1594af75078fSIntel int 1595af75078fSIntel main(int argc, char** argv) 1596af75078fSIntel { 1597af75078fSIntel int diag; 1598*013af9b6SIntel uint8_t port_id; 1599af75078fSIntel 1600af75078fSIntel diag = rte_eal_init(argc, argv); 1601af75078fSIntel if (diag < 0) 1602af75078fSIntel rte_panic("Cannot init EAL\n"); 1603af75078fSIntel 160469d22b8eSIntel if (rte_pmd_init_all()) 160569d22b8eSIntel rte_panic("Cannot init PMD\n"); 1606af75078fSIntel 1607af75078fSIntel if (rte_eal_pci_probe()) 1608af75078fSIntel rte_panic("Cannot probe PCI\n"); 1609af75078fSIntel 1610af75078fSIntel nb_ports = (portid_t) rte_eth_dev_count(); 1611af75078fSIntel if (nb_ports == 0) 1612*013af9b6SIntel rte_exit(EXIT_FAILURE, "No probed ethernet devices - " 1613*013af9b6SIntel "check that " 1614af75078fSIntel "CONFIG_RTE_LIBRTE_IGB_PMD=y and that " 161569d22b8eSIntel "CONFIG_RTE_LIBRTE_EM_PMD=y and that " 1616af75078fSIntel "CONFIG_RTE_LIBRTE_IXGBE_PMD=y in your " 1617af75078fSIntel "configuration file\n"); 1618af75078fSIntel 1619af75078fSIntel set_def_fwd_config(); 1620af75078fSIntel if (nb_lcores == 0) 1621af75078fSIntel rte_panic("Empty set of forwarding logical cores - check the " 1622af75078fSIntel "core mask supplied in the command parameters\n"); 1623af75078fSIntel 1624af75078fSIntel argc -= diag; 1625af75078fSIntel argv += diag; 1626af75078fSIntel if (argc > 1) 1627af75078fSIntel launch_args_parse(argc, argv); 1628af75078fSIntel 1629af75078fSIntel if (nb_rxq > nb_txq) 1630af75078fSIntel printf("Warning: nb_rxq=%d enables RSS configuration, " 1631af75078fSIntel "but nb_txq=%d will prevent to fully test it.\n", 1632af75078fSIntel nb_rxq, nb_txq); 1633af75078fSIntel 1634af75078fSIntel init_config(); 1635ce8d5614SIntel start_port(RTE_PORT_ALL); 1636af75078fSIntel 1637ce8d5614SIntel /* set all ports to promiscuous mode by default */ 1638ce8d5614SIntel for (port_id = 0; port_id < nb_ports; port_id++) 1639ce8d5614SIntel rte_eth_promiscuous_enable(port_id); 1640af75078fSIntel 1641af75078fSIntel if (interactive == 1) 1642af75078fSIntel prompt(); 1643af75078fSIntel else { 1644af75078fSIntel char c; 1645af75078fSIntel int rc; 1646af75078fSIntel 1647af75078fSIntel printf("No commandline core given, start packet forwarding\n"); 1648af75078fSIntel start_packet_forwarding(0); 1649af75078fSIntel printf("Press enter to exit\n"); 1650af75078fSIntel rc = read(0, &c, 1); 1651af75078fSIntel if (rc < 0) 1652af75078fSIntel return 1; 1653af75078fSIntel } 1654af75078fSIntel 1655af75078fSIntel return 0; 1656af75078fSIntel } 1657