1af75078fSIntel /*- 2af75078fSIntel * BSD LICENSE 3af75078fSIntel * 4e9d48c00SBruce Richardson * Copyright(c) 2010-2014 Intel Corporation. All rights reserved. 5af75078fSIntel * All rights reserved. 6af75078fSIntel * 7af75078fSIntel * Redistribution and use in source and binary forms, with or without 8af75078fSIntel * modification, are permitted provided that the following conditions 9af75078fSIntel * are met: 10af75078fSIntel * 11af75078fSIntel * * Redistributions of source code must retain the above copyright 12af75078fSIntel * notice, this list of conditions and the following disclaimer. 13af75078fSIntel * * Redistributions in binary form must reproduce the above copyright 14af75078fSIntel * notice, this list of conditions and the following disclaimer in 15af75078fSIntel * the documentation and/or other materials provided with the 16af75078fSIntel * distribution. 17af75078fSIntel * * Neither the name of Intel Corporation nor the names of its 18af75078fSIntel * contributors may be used to endorse or promote products derived 19af75078fSIntel * from this software without specific prior written permission. 20af75078fSIntel * 21af75078fSIntel * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22af75078fSIntel * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23af75078fSIntel * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24af75078fSIntel * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25af75078fSIntel * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26af75078fSIntel * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27af75078fSIntel * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28af75078fSIntel * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29af75078fSIntel * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30af75078fSIntel * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31af75078fSIntel * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32af75078fSIntel */ 33af75078fSIntel 34af75078fSIntel #include <stdarg.h> 35af75078fSIntel #include <stdio.h> 36af75078fSIntel #include <stdlib.h> 37af75078fSIntel #include <signal.h> 38af75078fSIntel #include <string.h> 39af75078fSIntel #include <time.h> 40af75078fSIntel #include <fcntl.h> 41af75078fSIntel #include <sys/types.h> 42af75078fSIntel #include <errno.h> 43af75078fSIntel 44af75078fSIntel #include <sys/queue.h> 45af75078fSIntel #include <sys/stat.h> 46af75078fSIntel 47af75078fSIntel #include <stdint.h> 48af75078fSIntel #include <unistd.h> 49af75078fSIntel #include <inttypes.h> 50af75078fSIntel 51af75078fSIntel #include <rte_common.h> 52af75078fSIntel #include <rte_byteorder.h> 53af75078fSIntel #include <rte_log.h> 54af75078fSIntel #include <rte_debug.h> 55af75078fSIntel #include <rte_cycles.h> 56af75078fSIntel #include <rte_memory.h> 57af75078fSIntel #include <rte_memcpy.h> 58af75078fSIntel #include <rte_memzone.h> 59af75078fSIntel #include <rte_launch.h> 60af75078fSIntel #include <rte_tailq.h> 61af75078fSIntel #include <rte_eal.h> 62af75078fSIntel #include <rte_per_lcore.h> 63af75078fSIntel #include <rte_lcore.h> 64af75078fSIntel #include <rte_atomic.h> 65af75078fSIntel #include <rte_branch_prediction.h> 66af75078fSIntel #include <rte_ring.h> 67af75078fSIntel #include <rte_mempool.h> 68af75078fSIntel #include <rte_malloc.h> 69af75078fSIntel #include <rte_mbuf.h> 70af75078fSIntel #include <rte_interrupts.h> 71af75078fSIntel #include <rte_pci.h> 72af75078fSIntel #include <rte_ether.h> 73af75078fSIntel #include <rte_ethdev.h> 74af75078fSIntel #include <rte_string_fns.h> 75148f963fSBruce Richardson #ifdef RTE_LIBRTE_PMD_XENVIRT 76148f963fSBruce Richardson #include <rte_eth_xenvirt.h> 77148f963fSBruce Richardson #endif 78af75078fSIntel 79af75078fSIntel #include "testpmd.h" 80148f963fSBruce Richardson #include "mempool_osdep.h" 81af75078fSIntel 82af75078fSIntel uint16_t verbose_level = 0; /**< Silent by default. */ 83af75078fSIntel 84af75078fSIntel /* use master core for command line ? */ 85af75078fSIntel uint8_t interactive = 0; 86ca7feb22SCyril Chemparathy uint8_t auto_start = 0; 87af75078fSIntel 88af75078fSIntel /* 89af75078fSIntel * NUMA support configuration. 90af75078fSIntel * When set, the NUMA support attempts to dispatch the allocation of the 91af75078fSIntel * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the 92af75078fSIntel * probed ports among the CPU sockets 0 and 1. 93af75078fSIntel * Otherwise, all memory is allocated from CPU socket 0. 94af75078fSIntel */ 95af75078fSIntel uint8_t numa_support = 0; /**< No numa support by default */ 96af75078fSIntel 97af75078fSIntel /* 98b6ea6408SIntel * In UMA mode,all memory is allocated from socket 0 if --socket-num is 99b6ea6408SIntel * not configured. 100b6ea6408SIntel */ 101b6ea6408SIntel uint8_t socket_num = UMA_NO_CONFIG; 102b6ea6408SIntel 103b6ea6408SIntel /* 104148f963fSBruce Richardson * Use ANONYMOUS mapped memory (might be not physically continuous) for mbufs. 105148f963fSBruce Richardson */ 106148f963fSBruce Richardson uint8_t mp_anon = 0; 107148f963fSBruce Richardson 108148f963fSBruce Richardson /* 109af75078fSIntel * Record the Ethernet address of peer target ports to which packets are 110af75078fSIntel * forwarded. 111af75078fSIntel * Must be instanciated with the ethernet addresses of peer traffic generator 112af75078fSIntel * ports. 113af75078fSIntel */ 114af75078fSIntel struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS]; 115af75078fSIntel portid_t nb_peer_eth_addrs = 0; 116af75078fSIntel 117af75078fSIntel /* 118af75078fSIntel * Probed Target Environment. 119af75078fSIntel */ 120af75078fSIntel struct rte_port *ports; /**< For all probed ethernet ports. */ 121af75078fSIntel portid_t nb_ports; /**< Number of probed ethernet ports. */ 122af75078fSIntel struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */ 123af75078fSIntel lcoreid_t nb_lcores; /**< Number of probed logical cores. */ 124af75078fSIntel 125af75078fSIntel /* 126af75078fSIntel * Test Forwarding Configuration. 127af75078fSIntel * nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores 128af75078fSIntel * nb_fwd_ports <= nb_cfg_ports <= nb_ports 129af75078fSIntel */ 130af75078fSIntel lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */ 131af75078fSIntel lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */ 132af75078fSIntel portid_t nb_cfg_ports; /**< Number of configured ports. */ 133af75078fSIntel portid_t nb_fwd_ports; /**< Number of forwarding ports. */ 134af75078fSIntel 135af75078fSIntel unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */ 136af75078fSIntel portid_t fwd_ports_ids[RTE_MAX_ETHPORTS]; /**< Port ids configuration. */ 137af75078fSIntel 138af75078fSIntel struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */ 139af75078fSIntel streamid_t nb_fwd_streams; /**< Is equal to (nb_ports * nb_rxq). */ 140af75078fSIntel 141af75078fSIntel /* 142af75078fSIntel * Forwarding engines. 143af75078fSIntel */ 144af75078fSIntel struct fwd_engine * fwd_engines[] = { 145af75078fSIntel &io_fwd_engine, 146af75078fSIntel &mac_fwd_engine, 14757e85242SBruce Richardson &mac_retry_fwd_engine, 148d47388f1SCyril Chemparathy &mac_swap_engine, 149e9e23a61SCyril Chemparathy &flow_gen_engine, 150af75078fSIntel &rx_only_engine, 151af75078fSIntel &tx_only_engine, 152af75078fSIntel &csum_fwd_engine, 153168dfa61SIvan Boule &icmp_echo_engine, 154af75078fSIntel #ifdef RTE_LIBRTE_IEEE1588 155af75078fSIntel &ieee1588_fwd_engine, 156af75078fSIntel #endif 157af75078fSIntel NULL, 158af75078fSIntel }; 159af75078fSIntel 160af75078fSIntel struct fwd_config cur_fwd_config; 161af75078fSIntel struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */ 162af75078fSIntel 163af75078fSIntel uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */ 164c8798818SIntel uint32_t param_total_num_mbufs = 0; /**< number of mbufs in all pools - if 165c8798818SIntel * specified on command-line. */ 166af75078fSIntel 167af75078fSIntel /* 168af75078fSIntel * Configuration of packet segments used by the "txonly" processing engine. 169af75078fSIntel */ 170af75078fSIntel uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */ 171af75078fSIntel uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = { 172af75078fSIntel TXONLY_DEF_PACKET_LEN, 173af75078fSIntel }; 174af75078fSIntel uint8_t tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */ 175af75078fSIntel 176af75078fSIntel uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */ 177e9378bbcSCunming Liang uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */ 178af75078fSIntel 179900550deSIntel /* current configuration is in DCB or not,0 means it is not in DCB mode */ 180900550deSIntel uint8_t dcb_config = 0; 181900550deSIntel 182900550deSIntel /* Whether the dcb is in testing status */ 183900550deSIntel uint8_t dcb_test = 0; 184900550deSIntel 185900550deSIntel /* DCB on and VT on mapping is default */ 186900550deSIntel enum dcb_queue_mapping_mode dcb_q_mapping = DCB_VT_Q_MAPPING; 187af75078fSIntel 188af75078fSIntel /* 189af75078fSIntel * Configurable number of RX/TX queues. 190af75078fSIntel */ 191af75078fSIntel queueid_t nb_rxq = 1; /**< Number of RX queues per port. */ 192af75078fSIntel queueid_t nb_txq = 1; /**< Number of TX queues per port. */ 193af75078fSIntel 194af75078fSIntel /* 195af75078fSIntel * Configurable number of RX/TX ring descriptors. 196af75078fSIntel */ 197af75078fSIntel #define RTE_TEST_RX_DESC_DEFAULT 128 198af75078fSIntel #define RTE_TEST_TX_DESC_DEFAULT 512 199af75078fSIntel uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */ 200af75078fSIntel uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */ 201af75078fSIntel 202af75078fSIntel /* 203af75078fSIntel * Configurable values of RX and TX ring threshold registers. 204af75078fSIntel */ 205af75078fSIntel #define RX_PTHRESH 8 /**< Default value of RX prefetch threshold register. */ 206af75078fSIntel #define RX_HTHRESH 8 /**< Default value of RX host threshold register. */ 207e9378bbcSCunming Liang #define RX_WTHRESH 0 /**< Default value of RX write-back threshold register. */ 208af75078fSIntel 209e9378bbcSCunming Liang #define TX_PTHRESH 32 /**< Default value of TX prefetch threshold register. */ 210af75078fSIntel #define TX_HTHRESH 0 /**< Default value of TX host threshold register. */ 211af75078fSIntel #define TX_WTHRESH 0 /**< Default value of TX write-back threshold register. */ 212af75078fSIntel 213af75078fSIntel struct rte_eth_thresh rx_thresh = { 214af75078fSIntel .pthresh = RX_PTHRESH, 215af75078fSIntel .hthresh = RX_HTHRESH, 216af75078fSIntel .wthresh = RX_WTHRESH, 217af75078fSIntel }; 218af75078fSIntel 219af75078fSIntel struct rte_eth_thresh tx_thresh = { 220af75078fSIntel .pthresh = TX_PTHRESH, 221af75078fSIntel .hthresh = TX_HTHRESH, 222af75078fSIntel .wthresh = TX_WTHRESH, 223af75078fSIntel }; 224af75078fSIntel 225af75078fSIntel /* 226af75078fSIntel * Configurable value of RX free threshold. 227af75078fSIntel */ 228*fa73fe80SBruce Richardson uint16_t rx_free_thresh = 32; /* Refill RX descriptors once every 32 packets, 229*fa73fe80SBruce Richardson This setting is needed for ixgbe to enable bulk alloc or vector 230*fa73fe80SBruce Richardson receive functionality. */ 231af75078fSIntel 232af75078fSIntel /* 233ce8d5614SIntel * Configurable value of RX drop enable. 234ce8d5614SIntel */ 235ce8d5614SIntel uint8_t rx_drop_en = 0; /* Drop packets when no descriptors for queue. */ 236ce8d5614SIntel 237ce8d5614SIntel /* 238af75078fSIntel * Configurable value of TX free threshold. 239af75078fSIntel */ 240af75078fSIntel uint16_t tx_free_thresh = 0; /* Use default values. */ 241af75078fSIntel 242af75078fSIntel /* 243af75078fSIntel * Configurable value of TX RS bit threshold. 244af75078fSIntel */ 245af75078fSIntel uint16_t tx_rs_thresh = 0; /* Use default values. */ 246af75078fSIntel 247af75078fSIntel /* 248ce8d5614SIntel * Configurable value of TX queue flags. 249ce8d5614SIntel */ 250ce8d5614SIntel uint32_t txq_flags = 0; /* No flags set. */ 251ce8d5614SIntel 252ce8d5614SIntel /* 253af75078fSIntel * Receive Side Scaling (RSS) configuration. 254af75078fSIntel */ 2558a387fa8SHelin Zhang uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */ 256af75078fSIntel 257af75078fSIntel /* 258af75078fSIntel * Port topology configuration 259af75078fSIntel */ 260af75078fSIntel uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */ 261af75078fSIntel 2627741e4cfSIntel /* 2637741e4cfSIntel * Avoids to flush all the RX streams before starts forwarding. 2647741e4cfSIntel */ 2657741e4cfSIntel uint8_t no_flush_rx = 0; /* flush by default */ 2667741e4cfSIntel 267af75078fSIntel /* 268bc202406SDavid Marchand * Avoids to check link status when starting/stopping a port. 269bc202406SDavid Marchand */ 270bc202406SDavid Marchand uint8_t no_link_check = 0; /* check by default */ 271bc202406SDavid Marchand 272bc202406SDavid Marchand /* 2737b7e5ba7SIntel * NIC bypass mode configuration options. 2747b7e5ba7SIntel */ 2757b7e5ba7SIntel #ifdef RTE_NIC_BYPASS 2767b7e5ba7SIntel 2777b7e5ba7SIntel /* The NIC bypass watchdog timeout. */ 2787b7e5ba7SIntel uint32_t bypass_timeout = RTE_BYPASS_TMT_OFF; 2797b7e5ba7SIntel 2807b7e5ba7SIntel #endif 2817b7e5ba7SIntel 2827b7e5ba7SIntel /* 283af75078fSIntel * Ethernet device configuration. 284af75078fSIntel */ 285af75078fSIntel struct rte_eth_rxmode rx_mode = { 286af75078fSIntel .max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */ 287af75078fSIntel .split_hdr_size = 0, 288af75078fSIntel .header_split = 0, /**< Header Split disabled. */ 289af75078fSIntel .hw_ip_checksum = 0, /**< IP checksum offload disabled. */ 290af75078fSIntel .hw_vlan_filter = 1, /**< VLAN filtering enabled. */ 291a47aa8b9SIntel .hw_vlan_strip = 1, /**< VLAN strip enabled. */ 292a47aa8b9SIntel .hw_vlan_extend = 0, /**< Extended VLAN disabled. */ 293af75078fSIntel .jumbo_frame = 0, /**< Jumbo Frame Support disabled. */ 294af75078fSIntel .hw_strip_crc = 0, /**< CRC stripping by hardware disabled. */ 295af75078fSIntel }; 296af75078fSIntel 297af75078fSIntel struct rte_fdir_conf fdir_conf = { 298af75078fSIntel .mode = RTE_FDIR_MODE_NONE, 299af75078fSIntel .pballoc = RTE_FDIR_PBALLOC_64K, 300af75078fSIntel .status = RTE_FDIR_REPORT_STATUS, 301af75078fSIntel .flexbytes_offset = 0x6, 302af75078fSIntel .drop_queue = 127, 303af75078fSIntel }; 304af75078fSIntel 3052950a769SDeclan Doherty volatile int test_done = 1; /* stop packet forwarding when set to 1. */ 306af75078fSIntel 307ed30d9b6SIntel struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS]; 308ed30d9b6SIntel struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS]; 309ed30d9b6SIntel 310ed30d9b6SIntel struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array; 311ed30d9b6SIntel struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array; 312ed30d9b6SIntel 313ed30d9b6SIntel uint16_t nb_tx_queue_stats_mappings = 0; 314ed30d9b6SIntel uint16_t nb_rx_queue_stats_mappings = 0; 315ed30d9b6SIntel 316ed30d9b6SIntel /* Forward function declarations */ 317ed30d9b6SIntel static void map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port); 318ce8d5614SIntel static void check_all_ports_link_status(uint8_t port_num, uint32_t port_mask); 319ce8d5614SIntel 320ce8d5614SIntel /* 321ce8d5614SIntel * Check if all the ports are started. 322ce8d5614SIntel * If yes, return positive value. If not, return zero. 323ce8d5614SIntel */ 324ce8d5614SIntel static int all_ports_started(void); 325ed30d9b6SIntel 326af75078fSIntel /* 327af75078fSIntel * Setup default configuration. 328af75078fSIntel */ 329af75078fSIntel static void 330af75078fSIntel set_default_fwd_lcores_config(void) 331af75078fSIntel { 332af75078fSIntel unsigned int i; 333af75078fSIntel unsigned int nb_lc; 334af75078fSIntel 335af75078fSIntel nb_lc = 0; 336af75078fSIntel for (i = 0; i < RTE_MAX_LCORE; i++) { 337af75078fSIntel if (! rte_lcore_is_enabled(i)) 338af75078fSIntel continue; 339af75078fSIntel if (i == rte_get_master_lcore()) 340af75078fSIntel continue; 341af75078fSIntel fwd_lcores_cpuids[nb_lc++] = i; 342af75078fSIntel } 343af75078fSIntel nb_lcores = (lcoreid_t) nb_lc; 344af75078fSIntel nb_cfg_lcores = nb_lcores; 345af75078fSIntel nb_fwd_lcores = 1; 346af75078fSIntel } 347af75078fSIntel 348af75078fSIntel static void 349af75078fSIntel set_def_peer_eth_addrs(void) 350af75078fSIntel { 351af75078fSIntel portid_t i; 352af75078fSIntel 353af75078fSIntel for (i = 0; i < RTE_MAX_ETHPORTS; i++) { 354af75078fSIntel peer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR; 355af75078fSIntel peer_eth_addrs[i].addr_bytes[5] = i; 356af75078fSIntel } 357af75078fSIntel } 358af75078fSIntel 359af75078fSIntel static void 360af75078fSIntel set_default_fwd_ports_config(void) 361af75078fSIntel { 362af75078fSIntel portid_t pt_id; 363af75078fSIntel 364af75078fSIntel for (pt_id = 0; pt_id < nb_ports; pt_id++) 365af75078fSIntel fwd_ports_ids[pt_id] = pt_id; 366af75078fSIntel 367af75078fSIntel nb_cfg_ports = nb_ports; 368af75078fSIntel nb_fwd_ports = nb_ports; 369af75078fSIntel } 370af75078fSIntel 371af75078fSIntel void 372af75078fSIntel set_def_fwd_config(void) 373af75078fSIntel { 374af75078fSIntel set_default_fwd_lcores_config(); 375af75078fSIntel set_def_peer_eth_addrs(); 376af75078fSIntel set_default_fwd_ports_config(); 377af75078fSIntel } 378af75078fSIntel 379af75078fSIntel /* 380af75078fSIntel * Configuration initialisation done once at init time. 381af75078fSIntel */ 382af75078fSIntel struct mbuf_ctor_arg { 383af75078fSIntel uint16_t seg_buf_offset; /**< offset of data in data segment of mbuf. */ 384af75078fSIntel uint16_t seg_buf_size; /**< size of data segment in mbuf. */ 385af75078fSIntel }; 386af75078fSIntel 387af75078fSIntel struct mbuf_pool_ctor_arg { 388af75078fSIntel uint16_t seg_buf_size; /**< size of data segment in mbuf. */ 389af75078fSIntel }; 390af75078fSIntel 391af75078fSIntel static void 392af75078fSIntel testpmd_mbuf_ctor(struct rte_mempool *mp, 393af75078fSIntel void *opaque_arg, 394af75078fSIntel void *raw_mbuf, 395af75078fSIntel __attribute__((unused)) unsigned i) 396af75078fSIntel { 397af75078fSIntel struct mbuf_ctor_arg *mb_ctor_arg; 398af75078fSIntel struct rte_mbuf *mb; 399af75078fSIntel 400af75078fSIntel mb_ctor_arg = (struct mbuf_ctor_arg *) opaque_arg; 401af75078fSIntel mb = (struct rte_mbuf *) raw_mbuf; 402af75078fSIntel 403af75078fSIntel mb->pool = mp; 404af75078fSIntel mb->buf_addr = (void *) ((char *)mb + mb_ctor_arg->seg_buf_offset); 405af75078fSIntel mb->buf_physaddr = (uint64_t) (rte_mempool_virt2phy(mp, mb) + 406af75078fSIntel mb_ctor_arg->seg_buf_offset); 407af75078fSIntel mb->buf_len = mb_ctor_arg->seg_buf_size; 408af75078fSIntel mb->ol_flags = 0; 40908b563ffSOlivier Matz mb->data_off = RTE_PKTMBUF_HEADROOM; 410ea672a8bSOlivier Matz mb->nb_segs = 1; 4117869536fSBruce Richardson mb->l2_l3_len = 0; 4127869536fSBruce Richardson mb->vlan_tci = 0; 413ea672a8bSOlivier Matz mb->hash.rss = 0; 414af75078fSIntel } 415af75078fSIntel 416af75078fSIntel static void 417af75078fSIntel testpmd_mbuf_pool_ctor(struct rte_mempool *mp, 418af75078fSIntel void *opaque_arg) 419af75078fSIntel { 420af75078fSIntel struct mbuf_pool_ctor_arg *mbp_ctor_arg; 421af75078fSIntel struct rte_pktmbuf_pool_private *mbp_priv; 422af75078fSIntel 423af75078fSIntel if (mp->private_data_size < sizeof(struct rte_pktmbuf_pool_private)) { 424af75078fSIntel printf("%s(%s) private_data_size %d < %d\n", 425af75078fSIntel __func__, mp->name, (int) mp->private_data_size, 426af75078fSIntel (int) sizeof(struct rte_pktmbuf_pool_private)); 427af75078fSIntel return; 428af75078fSIntel } 429af75078fSIntel mbp_ctor_arg = (struct mbuf_pool_ctor_arg *) opaque_arg; 430148f963fSBruce Richardson mbp_priv = rte_mempool_get_priv(mp); 431af75078fSIntel mbp_priv->mbuf_data_room_size = mbp_ctor_arg->seg_buf_size; 432af75078fSIntel } 433af75078fSIntel 434af75078fSIntel static void 435af75078fSIntel mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf, 436af75078fSIntel unsigned int socket_id) 437af75078fSIntel { 438af75078fSIntel char pool_name[RTE_MEMPOOL_NAMESIZE]; 439af75078fSIntel struct rte_mempool *rte_mp; 440af75078fSIntel struct mbuf_pool_ctor_arg mbp_ctor_arg; 441af75078fSIntel struct mbuf_ctor_arg mb_ctor_arg; 442af75078fSIntel uint32_t mb_size; 443af75078fSIntel 444af75078fSIntel mbp_ctor_arg.seg_buf_size = (uint16_t) (RTE_PKTMBUF_HEADROOM + 445af75078fSIntel mbuf_seg_size); 446af75078fSIntel mb_ctor_arg.seg_buf_offset = 447af75078fSIntel (uint16_t) CACHE_LINE_ROUNDUP(sizeof(struct rte_mbuf)); 448af75078fSIntel mb_ctor_arg.seg_buf_size = mbp_ctor_arg.seg_buf_size; 449af75078fSIntel mb_size = mb_ctor_arg.seg_buf_offset + mb_ctor_arg.seg_buf_size; 450af75078fSIntel mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name)); 451148f963fSBruce Richardson 452148f963fSBruce Richardson #ifdef RTE_LIBRTE_PMD_XENVIRT 453148f963fSBruce Richardson rte_mp = rte_mempool_gntalloc_create(pool_name, nb_mbuf, mb_size, 454af75078fSIntel (unsigned) mb_mempool_cache, 455af75078fSIntel sizeof(struct rte_pktmbuf_pool_private), 456af75078fSIntel testpmd_mbuf_pool_ctor, &mbp_ctor_arg, 457af75078fSIntel testpmd_mbuf_ctor, &mb_ctor_arg, 458af75078fSIntel socket_id, 0); 459148f963fSBruce Richardson 460148f963fSBruce Richardson 461148f963fSBruce Richardson 462148f963fSBruce Richardson #else 463148f963fSBruce Richardson if (mp_anon != 0) 464148f963fSBruce Richardson rte_mp = mempool_anon_create(pool_name, nb_mbuf, mb_size, 465148f963fSBruce Richardson (unsigned) mb_mempool_cache, 466148f963fSBruce Richardson sizeof(struct rte_pktmbuf_pool_private), 467148f963fSBruce Richardson testpmd_mbuf_pool_ctor, &mbp_ctor_arg, 468148f963fSBruce Richardson testpmd_mbuf_ctor, &mb_ctor_arg, 469148f963fSBruce Richardson socket_id, 0); 470148f963fSBruce Richardson else 471148f963fSBruce Richardson rte_mp = rte_mempool_create(pool_name, nb_mbuf, mb_size, 472148f963fSBruce Richardson (unsigned) mb_mempool_cache, 473148f963fSBruce Richardson sizeof(struct rte_pktmbuf_pool_private), 474148f963fSBruce Richardson testpmd_mbuf_pool_ctor, &mbp_ctor_arg, 475148f963fSBruce Richardson testpmd_mbuf_ctor, &mb_ctor_arg, 476148f963fSBruce Richardson socket_id, 0); 477148f963fSBruce Richardson 478148f963fSBruce Richardson #endif 479148f963fSBruce Richardson 480af75078fSIntel if (rte_mp == NULL) { 481ce8d5614SIntel rte_exit(EXIT_FAILURE, "Creation of mbuf pool for socket %u " 482ce8d5614SIntel "failed\n", socket_id); 483148f963fSBruce Richardson } else if (verbose_level > 0) { 484591a9d79SStephen Hemminger rte_mempool_dump(stdout, rte_mp); 485af75078fSIntel } 486af75078fSIntel } 487af75078fSIntel 48820a0286fSLiu Xiaofeng /* 48920a0286fSLiu Xiaofeng * Check given socket id is valid or not with NUMA mode, 49020a0286fSLiu Xiaofeng * if valid, return 0, else return -1 49120a0286fSLiu Xiaofeng */ 49220a0286fSLiu Xiaofeng static int 49320a0286fSLiu Xiaofeng check_socket_id(const unsigned int socket_id) 49420a0286fSLiu Xiaofeng { 49520a0286fSLiu Xiaofeng static int warning_once = 0; 49620a0286fSLiu Xiaofeng 49720a0286fSLiu Xiaofeng if (socket_id >= MAX_SOCKET) { 49820a0286fSLiu Xiaofeng if (!warning_once && numa_support) 49920a0286fSLiu Xiaofeng printf("Warning: NUMA should be configured manually by" 50020a0286fSLiu Xiaofeng " using --port-numa-config and" 50120a0286fSLiu Xiaofeng " --ring-numa-config parameters along with" 50220a0286fSLiu Xiaofeng " --numa.\n"); 50320a0286fSLiu Xiaofeng warning_once = 1; 50420a0286fSLiu Xiaofeng return -1; 50520a0286fSLiu Xiaofeng } 50620a0286fSLiu Xiaofeng return 0; 50720a0286fSLiu Xiaofeng } 50820a0286fSLiu Xiaofeng 509af75078fSIntel static void 510af75078fSIntel init_config(void) 511af75078fSIntel { 512ce8d5614SIntel portid_t pid; 513af75078fSIntel struct rte_port *port; 514af75078fSIntel struct rte_mempool *mbp; 515af75078fSIntel unsigned int nb_mbuf_per_pool; 516af75078fSIntel lcoreid_t lc_id; 517b6ea6408SIntel uint8_t port_per_socket[MAX_SOCKET]; 518af75078fSIntel 519b6ea6408SIntel memset(port_per_socket,0,MAX_SOCKET); 520af75078fSIntel /* Configuration of logical cores. */ 521af75078fSIntel fwd_lcores = rte_zmalloc("testpmd: fwd_lcores", 522af75078fSIntel sizeof(struct fwd_lcore *) * nb_lcores, 523af75078fSIntel CACHE_LINE_SIZE); 524af75078fSIntel if (fwd_lcores == NULL) { 525ce8d5614SIntel rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) " 526ce8d5614SIntel "failed\n", nb_lcores); 527af75078fSIntel } 528af75078fSIntel for (lc_id = 0; lc_id < nb_lcores; lc_id++) { 529af75078fSIntel fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore", 530af75078fSIntel sizeof(struct fwd_lcore), 531af75078fSIntel CACHE_LINE_SIZE); 532af75078fSIntel if (fwd_lcores[lc_id] == NULL) { 533ce8d5614SIntel rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) " 534ce8d5614SIntel "failed\n"); 535af75078fSIntel } 536af75078fSIntel fwd_lcores[lc_id]->cpuid_idx = lc_id; 537af75078fSIntel } 538af75078fSIntel 539af75078fSIntel /* 540af75078fSIntel * Create pools of mbuf. 541af75078fSIntel * If NUMA support is disabled, create a single pool of mbuf in 542b6ea6408SIntel * socket 0 memory by default. 543af75078fSIntel * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1. 544c8798818SIntel * 545c8798818SIntel * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and 546c8798818SIntel * nb_txd can be configured at run time. 547af75078fSIntel */ 548c8798818SIntel if (param_total_num_mbufs) 549c8798818SIntel nb_mbuf_per_pool = param_total_num_mbufs; 550c8798818SIntel else { 551c8798818SIntel nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX + (nb_lcores * mb_mempool_cache) 552c8798818SIntel + RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST; 553b6ea6408SIntel 554b6ea6408SIntel if (!numa_support) 555c8798818SIntel nb_mbuf_per_pool = (nb_mbuf_per_pool * nb_ports); 556c8798818SIntel } 557af75078fSIntel 558b6ea6408SIntel if (!numa_support) { 559b6ea6408SIntel if (socket_num == UMA_NO_CONFIG) 560b6ea6408SIntel mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0); 561b6ea6408SIntel else 562b6ea6408SIntel mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 563b6ea6408SIntel socket_num); 564b6ea6408SIntel } 565af75078fSIntel 566af75078fSIntel /* Configuration of Ethernet ports. */ 567af75078fSIntel ports = rte_zmalloc("testpmd: ports", 568af75078fSIntel sizeof(struct rte_port) * nb_ports, 569af75078fSIntel CACHE_LINE_SIZE); 570af75078fSIntel if (ports == NULL) { 571ce8d5614SIntel rte_exit(EXIT_FAILURE, "rte_zmalloc(%d struct rte_port) " 572ce8d5614SIntel "failed\n", nb_ports); 573af75078fSIntel } 574af75078fSIntel 575ce8d5614SIntel for (pid = 0; pid < nb_ports; pid++) { 576ce8d5614SIntel port = &ports[pid]; 577ce8d5614SIntel rte_eth_dev_info_get(pid, &port->dev_info); 578ce8d5614SIntel 579b6ea6408SIntel if (numa_support) { 580b6ea6408SIntel if (port_numa[pid] != NUMA_NO_CONFIG) 581b6ea6408SIntel port_per_socket[port_numa[pid]]++; 582b6ea6408SIntel else { 583b6ea6408SIntel uint32_t socket_id = rte_eth_dev_socket_id(pid); 58420a0286fSLiu Xiaofeng 58520a0286fSLiu Xiaofeng /* if socket_id is invalid, set to 0 */ 58620a0286fSLiu Xiaofeng if (check_socket_id(socket_id) < 0) 58720a0286fSLiu Xiaofeng socket_id = 0; 588b6ea6408SIntel port_per_socket[socket_id]++; 589b6ea6408SIntel } 590b6ea6408SIntel } 591b6ea6408SIntel 592ce8d5614SIntel /* set flag to initialize port/queue */ 593ce8d5614SIntel port->need_reconfig = 1; 594ce8d5614SIntel port->need_reconfig_queues = 1; 595ce8d5614SIntel } 596ce8d5614SIntel 597b6ea6408SIntel if (numa_support) { 598b6ea6408SIntel uint8_t i; 599b6ea6408SIntel unsigned int nb_mbuf; 600ce8d5614SIntel 601b6ea6408SIntel if (param_total_num_mbufs) 602b6ea6408SIntel nb_mbuf_per_pool = nb_mbuf_per_pool/nb_ports; 603b6ea6408SIntel 604b6ea6408SIntel for (i = 0; i < MAX_SOCKET; i++) { 605b6ea6408SIntel nb_mbuf = (nb_mbuf_per_pool * 606b6ea6408SIntel port_per_socket[i]); 607b6ea6408SIntel if (nb_mbuf) 608b6ea6408SIntel mbuf_pool_create(mbuf_data_size, 609b6ea6408SIntel nb_mbuf,i); 610b6ea6408SIntel } 611b6ea6408SIntel } 612b6ea6408SIntel init_port_config(); 6135886ae07SAdrien Mazarguil 6145886ae07SAdrien Mazarguil /* 6155886ae07SAdrien Mazarguil * Records which Mbuf pool to use by each logical core, if needed. 6165886ae07SAdrien Mazarguil */ 6175886ae07SAdrien Mazarguil for (lc_id = 0; lc_id < nb_lcores; lc_id++) { 6188fd8bebcSAdrien Mazarguil mbp = mbuf_pool_find( 6198fd8bebcSAdrien Mazarguil rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id])); 6208fd8bebcSAdrien Mazarguil 6215886ae07SAdrien Mazarguil if (mbp == NULL) 6225886ae07SAdrien Mazarguil mbp = mbuf_pool_find(0); 6235886ae07SAdrien Mazarguil fwd_lcores[lc_id]->mbp = mbp; 6245886ae07SAdrien Mazarguil } 6255886ae07SAdrien Mazarguil 626ce8d5614SIntel /* Configuration of packet forwarding streams. */ 627ce8d5614SIntel if (init_fwd_streams() < 0) 628ce8d5614SIntel rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n"); 629ce8d5614SIntel } 630ce8d5614SIntel 6312950a769SDeclan Doherty 6322950a769SDeclan Doherty void 6332950a769SDeclan Doherty reconfig(portid_t new_port_id) 6342950a769SDeclan Doherty { 6352950a769SDeclan Doherty struct rte_port *port; 6362950a769SDeclan Doherty 6372950a769SDeclan Doherty /* Reconfiguration of Ethernet ports. */ 6382950a769SDeclan Doherty ports = rte_realloc(ports, 6392950a769SDeclan Doherty sizeof(struct rte_port) * nb_ports, 6402950a769SDeclan Doherty CACHE_LINE_SIZE); 6412950a769SDeclan Doherty if (ports == NULL) { 6422950a769SDeclan Doherty rte_exit(EXIT_FAILURE, "rte_realloc(%d struct rte_port) failed\n", 6432950a769SDeclan Doherty nb_ports); 6442950a769SDeclan Doherty } 6452950a769SDeclan Doherty 6462950a769SDeclan Doherty port = &ports[new_port_id]; 6472950a769SDeclan Doherty rte_eth_dev_info_get(new_port_id, &port->dev_info); 6482950a769SDeclan Doherty 6492950a769SDeclan Doherty /* set flag to initialize port/queue */ 6502950a769SDeclan Doherty port->need_reconfig = 1; 6512950a769SDeclan Doherty port->need_reconfig_queues = 1; 6522950a769SDeclan Doherty 6532950a769SDeclan Doherty init_port_config(); 6542950a769SDeclan Doherty } 6552950a769SDeclan Doherty 6562950a769SDeclan Doherty 657ce8d5614SIntel int 658ce8d5614SIntel init_fwd_streams(void) 659ce8d5614SIntel { 660ce8d5614SIntel portid_t pid; 661ce8d5614SIntel struct rte_port *port; 662ce8d5614SIntel streamid_t sm_id, nb_fwd_streams_new; 663ce8d5614SIntel 664ce8d5614SIntel /* set socket id according to numa or not */ 665ce8d5614SIntel for (pid = 0; pid < nb_ports; pid++) { 666ce8d5614SIntel port = &ports[pid]; 667ce8d5614SIntel if (nb_rxq > port->dev_info.max_rx_queues) { 668ce8d5614SIntel printf("Fail: nb_rxq(%d) is greater than " 669ce8d5614SIntel "max_rx_queues(%d)\n", nb_rxq, 670ce8d5614SIntel port->dev_info.max_rx_queues); 671ce8d5614SIntel return -1; 672ce8d5614SIntel } 673ce8d5614SIntel if (nb_txq > port->dev_info.max_tx_queues) { 674ce8d5614SIntel printf("Fail: nb_txq(%d) is greater than " 675ce8d5614SIntel "max_tx_queues(%d)\n", nb_txq, 676ce8d5614SIntel port->dev_info.max_tx_queues); 677ce8d5614SIntel return -1; 678ce8d5614SIntel } 67920a0286fSLiu Xiaofeng if (numa_support) { 68020a0286fSLiu Xiaofeng if (port_numa[pid] != NUMA_NO_CONFIG) 68120a0286fSLiu Xiaofeng port->socket_id = port_numa[pid]; 68220a0286fSLiu Xiaofeng else { 683b6ea6408SIntel port->socket_id = rte_eth_dev_socket_id(pid); 68420a0286fSLiu Xiaofeng 68520a0286fSLiu Xiaofeng /* if socket_id is invalid, set to 0 */ 68620a0286fSLiu Xiaofeng if (check_socket_id(port->socket_id) < 0) 68720a0286fSLiu Xiaofeng port->socket_id = 0; 68820a0286fSLiu Xiaofeng } 68920a0286fSLiu Xiaofeng } 690b6ea6408SIntel else { 691b6ea6408SIntel if (socket_num == UMA_NO_CONFIG) 692af75078fSIntel port->socket_id = 0; 693b6ea6408SIntel else 694b6ea6408SIntel port->socket_id = socket_num; 695b6ea6408SIntel } 696af75078fSIntel } 697af75078fSIntel 698ce8d5614SIntel nb_fwd_streams_new = (streamid_t)(nb_ports * nb_rxq); 699ce8d5614SIntel if (nb_fwd_streams_new == nb_fwd_streams) 700ce8d5614SIntel return 0; 701ce8d5614SIntel /* clear the old */ 702ce8d5614SIntel if (fwd_streams != NULL) { 703ce8d5614SIntel for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) { 704ce8d5614SIntel if (fwd_streams[sm_id] == NULL) 705ce8d5614SIntel continue; 706ce8d5614SIntel rte_free(fwd_streams[sm_id]); 707ce8d5614SIntel fwd_streams[sm_id] = NULL; 708af75078fSIntel } 709ce8d5614SIntel rte_free(fwd_streams); 710ce8d5614SIntel fwd_streams = NULL; 711ce8d5614SIntel } 712ce8d5614SIntel 713ce8d5614SIntel /* init new */ 714ce8d5614SIntel nb_fwd_streams = nb_fwd_streams_new; 715ce8d5614SIntel fwd_streams = rte_zmalloc("testpmd: fwd_streams", 716ce8d5614SIntel sizeof(struct fwd_stream *) * nb_fwd_streams, CACHE_LINE_SIZE); 717ce8d5614SIntel if (fwd_streams == NULL) 718ce8d5614SIntel rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_stream *)) " 719ce8d5614SIntel "failed\n", nb_fwd_streams); 720ce8d5614SIntel 721af75078fSIntel for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) { 722af75078fSIntel fwd_streams[sm_id] = rte_zmalloc("testpmd: struct fwd_stream", 723ce8d5614SIntel sizeof(struct fwd_stream), CACHE_LINE_SIZE); 724ce8d5614SIntel if (fwd_streams[sm_id] == NULL) 725ce8d5614SIntel rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_stream)" 726ce8d5614SIntel " failed\n"); 727af75078fSIntel } 728ce8d5614SIntel 729ce8d5614SIntel return 0; 730af75078fSIntel } 731af75078fSIntel 732af75078fSIntel #ifdef RTE_TEST_PMD_RECORD_BURST_STATS 733af75078fSIntel static void 734af75078fSIntel pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs) 735af75078fSIntel { 736af75078fSIntel unsigned int total_burst; 737af75078fSIntel unsigned int nb_burst; 738af75078fSIntel unsigned int burst_stats[3]; 739af75078fSIntel uint16_t pktnb_stats[3]; 740af75078fSIntel uint16_t nb_pkt; 741af75078fSIntel int burst_percent[3]; 742af75078fSIntel 743af75078fSIntel /* 744af75078fSIntel * First compute the total number of packet bursts and the 745af75078fSIntel * two highest numbers of bursts of the same number of packets. 746af75078fSIntel */ 747af75078fSIntel total_burst = 0; 748af75078fSIntel burst_stats[0] = burst_stats[1] = burst_stats[2] = 0; 749af75078fSIntel pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0; 750af75078fSIntel for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) { 751af75078fSIntel nb_burst = pbs->pkt_burst_spread[nb_pkt]; 752af75078fSIntel if (nb_burst == 0) 753af75078fSIntel continue; 754af75078fSIntel total_burst += nb_burst; 755af75078fSIntel if (nb_burst > burst_stats[0]) { 756af75078fSIntel burst_stats[1] = burst_stats[0]; 757af75078fSIntel pktnb_stats[1] = pktnb_stats[0]; 758af75078fSIntel burst_stats[0] = nb_burst; 759af75078fSIntel pktnb_stats[0] = nb_pkt; 760af75078fSIntel } 761af75078fSIntel } 762af75078fSIntel if (total_burst == 0) 763af75078fSIntel return; 764af75078fSIntel burst_percent[0] = (burst_stats[0] * 100) / total_burst; 765af75078fSIntel printf(" %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst, 766af75078fSIntel burst_percent[0], (int) pktnb_stats[0]); 767af75078fSIntel if (burst_stats[0] == total_burst) { 768af75078fSIntel printf("]\n"); 769af75078fSIntel return; 770af75078fSIntel } 771af75078fSIntel if (burst_stats[0] + burst_stats[1] == total_burst) { 772af75078fSIntel printf(" + %d%% of %d pkts]\n", 773af75078fSIntel 100 - burst_percent[0], pktnb_stats[1]); 774af75078fSIntel return; 775af75078fSIntel } 776af75078fSIntel burst_percent[1] = (burst_stats[1] * 100) / total_burst; 777af75078fSIntel burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]); 778af75078fSIntel if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) { 779af75078fSIntel printf(" + %d%% of others]\n", 100 - burst_percent[0]); 780af75078fSIntel return; 781af75078fSIntel } 782af75078fSIntel printf(" + %d%% of %d pkts + %d%% of others]\n", 783af75078fSIntel burst_percent[1], (int) pktnb_stats[1], burst_percent[2]); 784af75078fSIntel } 785af75078fSIntel #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */ 786af75078fSIntel 787af75078fSIntel static void 788af75078fSIntel fwd_port_stats_display(portid_t port_id, struct rte_eth_stats *stats) 789af75078fSIntel { 790af75078fSIntel struct rte_port *port; 791013af9b6SIntel uint8_t i; 792af75078fSIntel 793af75078fSIntel static const char *fwd_stats_border = "----------------------"; 794af75078fSIntel 795af75078fSIntel port = &ports[port_id]; 796af75078fSIntel printf("\n %s Forward statistics for port %-2d %s\n", 797af75078fSIntel fwd_stats_border, port_id, fwd_stats_border); 798013af9b6SIntel 799013af9b6SIntel if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) { 800af75078fSIntel printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: " 801af75078fSIntel "%-"PRIu64"\n", 80270bdb186SIvan Boule stats->ipackets, stats->imissed, 80370bdb186SIvan Boule (uint64_t) (stats->ipackets + stats->imissed)); 804af75078fSIntel 805af75078fSIntel if (cur_fwd_eng == &csum_fwd_engine) 806af75078fSIntel printf(" Bad-ipcsum: %-14"PRIu64" Bad-l4csum: %-14"PRIu64" \n", 807af75078fSIntel port->rx_bad_ip_csum, port->rx_bad_l4_csum); 80870bdb186SIvan Boule if (((stats->ierrors - stats->imissed) + stats->rx_nombuf) > 0) { 80970bdb186SIvan Boule printf(" RX-badcrc: %-14"PRIu64" RX-badlen: %-14"PRIu64 81070bdb186SIvan Boule "RX-error: %-"PRIu64"\n", 81170bdb186SIvan Boule stats->ibadcrc, stats->ibadlen, stats->ierrors); 81270bdb186SIvan Boule printf(" RX-nombufs: %-14"PRIu64"\n", stats->rx_nombuf); 81370bdb186SIvan Boule } 814af75078fSIntel 815af75078fSIntel printf(" TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: " 816af75078fSIntel "%-"PRIu64"\n", 817af75078fSIntel stats->opackets, port->tx_dropped, 818af75078fSIntel (uint64_t) (stats->opackets + port->tx_dropped)); 819013af9b6SIntel } 820013af9b6SIntel else { 821013af9b6SIntel printf(" RX-packets: %14"PRIu64" RX-dropped:%14"PRIu64" RX-total:" 822013af9b6SIntel "%14"PRIu64"\n", 82370bdb186SIvan Boule stats->ipackets, stats->imissed, 82470bdb186SIvan Boule (uint64_t) (stats->ipackets + stats->imissed)); 825013af9b6SIntel 826013af9b6SIntel if (cur_fwd_eng == &csum_fwd_engine) 827013af9b6SIntel printf(" Bad-ipcsum:%14"PRIu64" Bad-l4csum:%14"PRIu64"\n", 828013af9b6SIntel port->rx_bad_ip_csum, port->rx_bad_l4_csum); 82970bdb186SIvan Boule if (((stats->ierrors - stats->imissed) + stats->rx_nombuf) > 0) { 83070bdb186SIvan Boule printf(" RX-badcrc: %14"PRIu64" RX-badlen: %14"PRIu64 83170bdb186SIvan Boule " RX-error:%"PRIu64"\n", 83270bdb186SIvan Boule stats->ibadcrc, stats->ibadlen, stats->ierrors); 83370bdb186SIvan Boule printf(" RX-nombufs: %14"PRIu64"\n", 83470bdb186SIvan Boule stats->rx_nombuf); 83570bdb186SIvan Boule } 836013af9b6SIntel 837013af9b6SIntel printf(" TX-packets: %14"PRIu64" TX-dropped:%14"PRIu64" TX-total:" 838013af9b6SIntel "%14"PRIu64"\n", 839013af9b6SIntel stats->opackets, port->tx_dropped, 840013af9b6SIntel (uint64_t) (stats->opackets + port->tx_dropped)); 841013af9b6SIntel } 842e659b6b4SIvan Boule 843e659b6b4SIvan Boule /* Display statistics of XON/XOFF pause frames, if any. */ 844e659b6b4SIvan Boule if ((stats->tx_pause_xon | stats->rx_pause_xon | 845e659b6b4SIvan Boule stats->tx_pause_xoff | stats->rx_pause_xoff) > 0) { 846e659b6b4SIvan Boule printf(" RX-XOFF: %-14"PRIu64" RX-XON: %-14"PRIu64"\n", 847e659b6b4SIvan Boule stats->rx_pause_xoff, stats->rx_pause_xon); 848e659b6b4SIvan Boule printf(" TX-XOFF: %-14"PRIu64" TX-XON: %-14"PRIu64"\n", 849e659b6b4SIvan Boule stats->tx_pause_xoff, stats->tx_pause_xon); 850e659b6b4SIvan Boule } 851e659b6b4SIvan Boule 852af75078fSIntel #ifdef RTE_TEST_PMD_RECORD_BURST_STATS 853af75078fSIntel if (port->rx_stream) 854013af9b6SIntel pkt_burst_stats_display("RX", 855013af9b6SIntel &port->rx_stream->rx_burst_stats); 856af75078fSIntel if (port->tx_stream) 857013af9b6SIntel pkt_burst_stats_display("TX", 858013af9b6SIntel &port->tx_stream->tx_burst_stats); 859af75078fSIntel #endif 860af75078fSIntel /* stats fdir */ 861af75078fSIntel if (fdir_conf.mode != RTE_FDIR_MODE_NONE) 862013af9b6SIntel printf(" Fdirmiss:%14"PRIu64" Fdirmatch:%14"PRIu64"\n", 863af75078fSIntel stats->fdirmiss, 864af75078fSIntel stats->fdirmatch); 865af75078fSIntel 866013af9b6SIntel if (port->rx_queue_stats_mapping_enabled) { 867013af9b6SIntel printf("\n"); 868013af9b6SIntel for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) { 869013af9b6SIntel printf(" Stats reg %2d RX-packets:%14"PRIu64 870013af9b6SIntel " RX-errors:%14"PRIu64 871013af9b6SIntel " RX-bytes:%14"PRIu64"\n", 872013af9b6SIntel i, stats->q_ipackets[i], stats->q_errors[i], stats->q_ibytes[i]); 873013af9b6SIntel } 874013af9b6SIntel printf("\n"); 875013af9b6SIntel } 876013af9b6SIntel if (port->tx_queue_stats_mapping_enabled) { 877013af9b6SIntel for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) { 878013af9b6SIntel printf(" Stats reg %2d TX-packets:%14"PRIu64 879013af9b6SIntel " TX-bytes:%14"PRIu64"\n", 880013af9b6SIntel i, stats->q_opackets[i], stats->q_obytes[i]); 881013af9b6SIntel } 882013af9b6SIntel } 883013af9b6SIntel 884af75078fSIntel printf(" %s--------------------------------%s\n", 885af75078fSIntel fwd_stats_border, fwd_stats_border); 886af75078fSIntel } 887af75078fSIntel 888af75078fSIntel static void 889af75078fSIntel fwd_stream_stats_display(streamid_t stream_id) 890af75078fSIntel { 891af75078fSIntel struct fwd_stream *fs; 892af75078fSIntel static const char *fwd_top_stats_border = "-------"; 893af75078fSIntel 894af75078fSIntel fs = fwd_streams[stream_id]; 895af75078fSIntel if ((fs->rx_packets == 0) && (fs->tx_packets == 0) && 896af75078fSIntel (fs->fwd_dropped == 0)) 897af75078fSIntel return; 898af75078fSIntel printf("\n %s Forward Stats for RX Port=%2d/Queue=%2d -> " 899af75078fSIntel "TX Port=%2d/Queue=%2d %s\n", 900af75078fSIntel fwd_top_stats_border, fs->rx_port, fs->rx_queue, 901af75078fSIntel fs->tx_port, fs->tx_queue, fwd_top_stats_border); 902af75078fSIntel printf(" RX-packets: %-14u TX-packets: %-14u TX-dropped: %-14u", 903af75078fSIntel fs->rx_packets, fs->tx_packets, fs->fwd_dropped); 904af75078fSIntel 905af75078fSIntel /* if checksum mode */ 906af75078fSIntel if (cur_fwd_eng == &csum_fwd_engine) { 907013af9b6SIntel printf(" RX- bad IP checksum: %-14u Rx- bad L4 checksum: " 908013af9b6SIntel "%-14u\n", fs->rx_bad_ip_csum, fs->rx_bad_l4_csum); 909af75078fSIntel } 910af75078fSIntel 911af75078fSIntel #ifdef RTE_TEST_PMD_RECORD_BURST_STATS 912af75078fSIntel pkt_burst_stats_display("RX", &fs->rx_burst_stats); 913af75078fSIntel pkt_burst_stats_display("TX", &fs->tx_burst_stats); 914af75078fSIntel #endif 915af75078fSIntel } 916af75078fSIntel 917af75078fSIntel static void 9187741e4cfSIntel flush_fwd_rx_queues(void) 919af75078fSIntel { 920af75078fSIntel struct rte_mbuf *pkts_burst[MAX_PKT_BURST]; 921af75078fSIntel portid_t rxp; 9227741e4cfSIntel portid_t port_id; 923af75078fSIntel queueid_t rxq; 924af75078fSIntel uint16_t nb_rx; 925af75078fSIntel uint16_t i; 926af75078fSIntel uint8_t j; 927af75078fSIntel 928af75078fSIntel for (j = 0; j < 2; j++) { 9297741e4cfSIntel for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) { 930af75078fSIntel for (rxq = 0; rxq < nb_rxq; rxq++) { 9317741e4cfSIntel port_id = fwd_ports_ids[rxp]; 932af75078fSIntel do { 9337741e4cfSIntel nb_rx = rte_eth_rx_burst(port_id, rxq, 934013af9b6SIntel pkts_burst, MAX_PKT_BURST); 935af75078fSIntel for (i = 0; i < nb_rx; i++) 936af75078fSIntel rte_pktmbuf_free(pkts_burst[i]); 937af75078fSIntel } while (nb_rx > 0); 938af75078fSIntel } 939af75078fSIntel } 940af75078fSIntel rte_delay_ms(10); /* wait 10 milli-seconds before retrying */ 941af75078fSIntel } 942af75078fSIntel } 943af75078fSIntel 944af75078fSIntel static void 945af75078fSIntel run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd) 946af75078fSIntel { 947af75078fSIntel struct fwd_stream **fsm; 948af75078fSIntel streamid_t nb_fs; 949af75078fSIntel streamid_t sm_id; 950af75078fSIntel 951af75078fSIntel fsm = &fwd_streams[fc->stream_idx]; 952af75078fSIntel nb_fs = fc->stream_nb; 953af75078fSIntel do { 954af75078fSIntel for (sm_id = 0; sm_id < nb_fs; sm_id++) 955af75078fSIntel (*pkt_fwd)(fsm[sm_id]); 956af75078fSIntel } while (! fc->stopped); 957af75078fSIntel } 958af75078fSIntel 959af75078fSIntel static int 960af75078fSIntel start_pkt_forward_on_core(void *fwd_arg) 961af75078fSIntel { 962af75078fSIntel run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg, 963af75078fSIntel cur_fwd_config.fwd_eng->packet_fwd); 964af75078fSIntel return 0; 965af75078fSIntel } 966af75078fSIntel 967af75078fSIntel /* 968af75078fSIntel * Run the TXONLY packet forwarding engine to send a single burst of packets. 969af75078fSIntel * Used to start communication flows in network loopback test configurations. 970af75078fSIntel */ 971af75078fSIntel static int 972af75078fSIntel run_one_txonly_burst_on_core(void *fwd_arg) 973af75078fSIntel { 974af75078fSIntel struct fwd_lcore *fwd_lc; 975af75078fSIntel struct fwd_lcore tmp_lcore; 976af75078fSIntel 977af75078fSIntel fwd_lc = (struct fwd_lcore *) fwd_arg; 978af75078fSIntel tmp_lcore = *fwd_lc; 979af75078fSIntel tmp_lcore.stopped = 1; 980af75078fSIntel run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd); 981af75078fSIntel return 0; 982af75078fSIntel } 983af75078fSIntel 984af75078fSIntel /* 985af75078fSIntel * Launch packet forwarding: 986af75078fSIntel * - Setup per-port forwarding context. 987af75078fSIntel * - launch logical cores with their forwarding configuration. 988af75078fSIntel */ 989af75078fSIntel static void 990af75078fSIntel launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore) 991af75078fSIntel { 992af75078fSIntel port_fwd_begin_t port_fwd_begin; 993af75078fSIntel unsigned int i; 994af75078fSIntel unsigned int lc_id; 995af75078fSIntel int diag; 996af75078fSIntel 997af75078fSIntel port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin; 998af75078fSIntel if (port_fwd_begin != NULL) { 999af75078fSIntel for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) 1000af75078fSIntel (*port_fwd_begin)(fwd_ports_ids[i]); 1001af75078fSIntel } 1002af75078fSIntel for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) { 1003af75078fSIntel lc_id = fwd_lcores_cpuids[i]; 1004af75078fSIntel if ((interactive == 0) || (lc_id != rte_lcore_id())) { 1005af75078fSIntel fwd_lcores[i]->stopped = 0; 1006af75078fSIntel diag = rte_eal_remote_launch(pkt_fwd_on_lcore, 1007af75078fSIntel fwd_lcores[i], lc_id); 1008af75078fSIntel if (diag != 0) 1009af75078fSIntel printf("launch lcore %u failed - diag=%d\n", 1010af75078fSIntel lc_id, diag); 1011af75078fSIntel } 1012af75078fSIntel } 1013af75078fSIntel } 1014af75078fSIntel 1015af75078fSIntel /* 1016af75078fSIntel * Launch packet forwarding configuration. 1017af75078fSIntel */ 1018af75078fSIntel void 1019af75078fSIntel start_packet_forwarding(int with_tx_first) 1020af75078fSIntel { 1021af75078fSIntel port_fwd_begin_t port_fwd_begin; 1022af75078fSIntel port_fwd_end_t port_fwd_end; 1023af75078fSIntel struct rte_port *port; 1024af75078fSIntel unsigned int i; 1025af75078fSIntel portid_t pt_id; 1026af75078fSIntel streamid_t sm_id; 1027af75078fSIntel 1028ce8d5614SIntel if (all_ports_started() == 0) { 1029ce8d5614SIntel printf("Not all ports were started\n"); 1030ce8d5614SIntel return; 1031ce8d5614SIntel } 1032af75078fSIntel if (test_done == 0) { 1033af75078fSIntel printf("Packet forwarding already started\n"); 1034af75078fSIntel return; 1035af75078fSIntel } 10367741e4cfSIntel if(dcb_test) { 10377741e4cfSIntel for (i = 0; i < nb_fwd_ports; i++) { 10387741e4cfSIntel pt_id = fwd_ports_ids[i]; 10397741e4cfSIntel port = &ports[pt_id]; 10407741e4cfSIntel if (!port->dcb_flag) { 10417741e4cfSIntel printf("In DCB mode, all forwarding ports must " 10427741e4cfSIntel "be configured in this mode.\n"); 1043013af9b6SIntel return; 1044013af9b6SIntel } 10457741e4cfSIntel } 10467741e4cfSIntel if (nb_fwd_lcores == 1) { 10477741e4cfSIntel printf("In DCB mode,the nb forwarding cores " 10487741e4cfSIntel "should be larger than 1.\n"); 10497741e4cfSIntel return; 10507741e4cfSIntel } 10517741e4cfSIntel } 1052af75078fSIntel test_done = 0; 10537741e4cfSIntel 10547741e4cfSIntel if(!no_flush_rx) 10557741e4cfSIntel flush_fwd_rx_queues(); 10567741e4cfSIntel 1057af75078fSIntel fwd_config_setup(); 1058af75078fSIntel rxtx_config_display(); 1059af75078fSIntel 1060af75078fSIntel for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) { 1061af75078fSIntel pt_id = fwd_ports_ids[i]; 1062af75078fSIntel port = &ports[pt_id]; 1063af75078fSIntel rte_eth_stats_get(pt_id, &port->stats); 1064af75078fSIntel port->tx_dropped = 0; 1065013af9b6SIntel 1066013af9b6SIntel map_port_queue_stats_mapping_registers(pt_id, port); 1067af75078fSIntel } 1068af75078fSIntel for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) { 1069af75078fSIntel fwd_streams[sm_id]->rx_packets = 0; 1070af75078fSIntel fwd_streams[sm_id]->tx_packets = 0; 1071af75078fSIntel fwd_streams[sm_id]->fwd_dropped = 0; 1072af75078fSIntel fwd_streams[sm_id]->rx_bad_ip_csum = 0; 1073af75078fSIntel fwd_streams[sm_id]->rx_bad_l4_csum = 0; 1074af75078fSIntel 1075af75078fSIntel #ifdef RTE_TEST_PMD_RECORD_BURST_STATS 1076af75078fSIntel memset(&fwd_streams[sm_id]->rx_burst_stats, 0, 1077af75078fSIntel sizeof(fwd_streams[sm_id]->rx_burst_stats)); 1078af75078fSIntel memset(&fwd_streams[sm_id]->tx_burst_stats, 0, 1079af75078fSIntel sizeof(fwd_streams[sm_id]->tx_burst_stats)); 1080af75078fSIntel #endif 1081af75078fSIntel #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES 1082af75078fSIntel fwd_streams[sm_id]->core_cycles = 0; 1083af75078fSIntel #endif 1084af75078fSIntel } 1085af75078fSIntel if (with_tx_first) { 1086af75078fSIntel port_fwd_begin = tx_only_engine.port_fwd_begin; 1087af75078fSIntel if (port_fwd_begin != NULL) { 1088af75078fSIntel for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) 1089af75078fSIntel (*port_fwd_begin)(fwd_ports_ids[i]); 1090af75078fSIntel } 1091af75078fSIntel launch_packet_forwarding(run_one_txonly_burst_on_core); 1092af75078fSIntel rte_eal_mp_wait_lcore(); 1093af75078fSIntel port_fwd_end = tx_only_engine.port_fwd_end; 1094af75078fSIntel if (port_fwd_end != NULL) { 1095af75078fSIntel for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) 1096af75078fSIntel (*port_fwd_end)(fwd_ports_ids[i]); 1097af75078fSIntel } 1098af75078fSIntel } 1099af75078fSIntel launch_packet_forwarding(start_pkt_forward_on_core); 1100af75078fSIntel } 1101af75078fSIntel 1102af75078fSIntel void 1103af75078fSIntel stop_packet_forwarding(void) 1104af75078fSIntel { 1105af75078fSIntel struct rte_eth_stats stats; 1106af75078fSIntel struct rte_port *port; 1107af75078fSIntel port_fwd_end_t port_fwd_end; 1108af75078fSIntel int i; 1109af75078fSIntel portid_t pt_id; 1110af75078fSIntel streamid_t sm_id; 1111af75078fSIntel lcoreid_t lc_id; 1112af75078fSIntel uint64_t total_recv; 1113af75078fSIntel uint64_t total_xmit; 1114af75078fSIntel uint64_t total_rx_dropped; 1115af75078fSIntel uint64_t total_tx_dropped; 1116af75078fSIntel uint64_t total_rx_nombuf; 1117af75078fSIntel uint64_t tx_dropped; 1118af75078fSIntel uint64_t rx_bad_ip_csum; 1119af75078fSIntel uint64_t rx_bad_l4_csum; 1120af75078fSIntel #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES 1121af75078fSIntel uint64_t fwd_cycles; 1122af75078fSIntel #endif 1123af75078fSIntel static const char *acc_stats_border = "+++++++++++++++"; 1124af75078fSIntel 1125ce8d5614SIntel if (all_ports_started() == 0) { 1126ce8d5614SIntel printf("Not all ports were started\n"); 1127ce8d5614SIntel return; 1128ce8d5614SIntel } 1129af75078fSIntel if (test_done) { 1130af75078fSIntel printf("Packet forwarding not started\n"); 1131af75078fSIntel return; 1132af75078fSIntel } 1133af75078fSIntel printf("Telling cores to stop..."); 1134af75078fSIntel for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) 1135af75078fSIntel fwd_lcores[lc_id]->stopped = 1; 1136af75078fSIntel printf("\nWaiting for lcores to finish...\n"); 1137af75078fSIntel rte_eal_mp_wait_lcore(); 1138af75078fSIntel port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end; 1139af75078fSIntel if (port_fwd_end != NULL) { 1140af75078fSIntel for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) { 1141af75078fSIntel pt_id = fwd_ports_ids[i]; 1142af75078fSIntel (*port_fwd_end)(pt_id); 1143af75078fSIntel } 1144af75078fSIntel } 1145af75078fSIntel #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES 1146af75078fSIntel fwd_cycles = 0; 1147af75078fSIntel #endif 1148af75078fSIntel for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) { 1149af75078fSIntel if (cur_fwd_config.nb_fwd_streams > 1150af75078fSIntel cur_fwd_config.nb_fwd_ports) { 1151af75078fSIntel fwd_stream_stats_display(sm_id); 1152af75078fSIntel ports[fwd_streams[sm_id]->tx_port].tx_stream = NULL; 1153af75078fSIntel ports[fwd_streams[sm_id]->rx_port].rx_stream = NULL; 1154af75078fSIntel } else { 1155af75078fSIntel ports[fwd_streams[sm_id]->tx_port].tx_stream = 1156af75078fSIntel fwd_streams[sm_id]; 1157af75078fSIntel ports[fwd_streams[sm_id]->rx_port].rx_stream = 1158af75078fSIntel fwd_streams[sm_id]; 1159af75078fSIntel } 1160af75078fSIntel tx_dropped = ports[fwd_streams[sm_id]->tx_port].tx_dropped; 1161af75078fSIntel tx_dropped = (uint64_t) (tx_dropped + 1162af75078fSIntel fwd_streams[sm_id]->fwd_dropped); 1163af75078fSIntel ports[fwd_streams[sm_id]->tx_port].tx_dropped = tx_dropped; 1164af75078fSIntel 1165013af9b6SIntel rx_bad_ip_csum = 1166013af9b6SIntel ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum; 1167af75078fSIntel rx_bad_ip_csum = (uint64_t) (rx_bad_ip_csum + 1168af75078fSIntel fwd_streams[sm_id]->rx_bad_ip_csum); 1169013af9b6SIntel ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum = 1170013af9b6SIntel rx_bad_ip_csum; 1171af75078fSIntel 1172013af9b6SIntel rx_bad_l4_csum = 1173013af9b6SIntel ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum; 1174af75078fSIntel rx_bad_l4_csum = (uint64_t) (rx_bad_l4_csum + 1175af75078fSIntel fwd_streams[sm_id]->rx_bad_l4_csum); 1176013af9b6SIntel ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum = 1177013af9b6SIntel rx_bad_l4_csum; 1178af75078fSIntel 1179af75078fSIntel #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES 1180af75078fSIntel fwd_cycles = (uint64_t) (fwd_cycles + 1181af75078fSIntel fwd_streams[sm_id]->core_cycles); 1182af75078fSIntel #endif 1183af75078fSIntel } 1184af75078fSIntel total_recv = 0; 1185af75078fSIntel total_xmit = 0; 1186af75078fSIntel total_rx_dropped = 0; 1187af75078fSIntel total_tx_dropped = 0; 1188af75078fSIntel total_rx_nombuf = 0; 11897741e4cfSIntel for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) { 1190af75078fSIntel pt_id = fwd_ports_ids[i]; 1191af75078fSIntel 1192af75078fSIntel port = &ports[pt_id]; 1193af75078fSIntel rte_eth_stats_get(pt_id, &stats); 1194af75078fSIntel stats.ipackets -= port->stats.ipackets; 1195af75078fSIntel port->stats.ipackets = 0; 1196af75078fSIntel stats.opackets -= port->stats.opackets; 1197af75078fSIntel port->stats.opackets = 0; 1198af75078fSIntel stats.ibytes -= port->stats.ibytes; 1199af75078fSIntel port->stats.ibytes = 0; 1200af75078fSIntel stats.obytes -= port->stats.obytes; 1201af75078fSIntel port->stats.obytes = 0; 120270bdb186SIvan Boule stats.imissed -= port->stats.imissed; 120370bdb186SIvan Boule port->stats.imissed = 0; 1204af75078fSIntel stats.oerrors -= port->stats.oerrors; 1205af75078fSIntel port->stats.oerrors = 0; 1206af75078fSIntel stats.rx_nombuf -= port->stats.rx_nombuf; 1207af75078fSIntel port->stats.rx_nombuf = 0; 1208af75078fSIntel stats.fdirmatch -= port->stats.fdirmatch; 1209af75078fSIntel port->stats.rx_nombuf = 0; 1210af75078fSIntel stats.fdirmiss -= port->stats.fdirmiss; 1211af75078fSIntel port->stats.rx_nombuf = 0; 1212af75078fSIntel 1213af75078fSIntel total_recv += stats.ipackets; 1214af75078fSIntel total_xmit += stats.opackets; 121570bdb186SIvan Boule total_rx_dropped += stats.imissed; 1216af75078fSIntel total_tx_dropped += port->tx_dropped; 1217af75078fSIntel total_rx_nombuf += stats.rx_nombuf; 1218af75078fSIntel 1219af75078fSIntel fwd_port_stats_display(pt_id, &stats); 1220af75078fSIntel } 1221af75078fSIntel printf("\n %s Accumulated forward statistics for all ports" 1222af75078fSIntel "%s\n", 1223af75078fSIntel acc_stats_border, acc_stats_border); 1224af75078fSIntel printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: " 1225af75078fSIntel "%-"PRIu64"\n" 1226af75078fSIntel " TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: " 1227af75078fSIntel "%-"PRIu64"\n", 1228af75078fSIntel total_recv, total_rx_dropped, total_recv + total_rx_dropped, 1229af75078fSIntel total_xmit, total_tx_dropped, total_xmit + total_tx_dropped); 1230af75078fSIntel if (total_rx_nombuf > 0) 1231af75078fSIntel printf(" RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf); 1232af75078fSIntel printf(" %s++++++++++++++++++++++++++++++++++++++++++++++" 1233af75078fSIntel "%s\n", 1234af75078fSIntel acc_stats_border, acc_stats_border); 1235af75078fSIntel #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES 1236af75078fSIntel if (total_recv > 0) 1237af75078fSIntel printf("\n CPU cycles/packet=%u (total cycles=" 1238af75078fSIntel "%"PRIu64" / total RX packets=%"PRIu64")\n", 1239af75078fSIntel (unsigned int)(fwd_cycles / total_recv), 1240af75078fSIntel fwd_cycles, total_recv); 1241af75078fSIntel #endif 1242af75078fSIntel printf("\nDone.\n"); 1243af75078fSIntel test_done = 1; 1244af75078fSIntel } 1245af75078fSIntel 1246cfae07fdSOuyang Changchun void 1247cfae07fdSOuyang Changchun dev_set_link_up(portid_t pid) 1248cfae07fdSOuyang Changchun { 1249cfae07fdSOuyang Changchun if (rte_eth_dev_set_link_up((uint8_t)pid) < 0) 1250cfae07fdSOuyang Changchun printf("\nSet link up fail.\n"); 1251cfae07fdSOuyang Changchun } 1252cfae07fdSOuyang Changchun 1253cfae07fdSOuyang Changchun void 1254cfae07fdSOuyang Changchun dev_set_link_down(portid_t pid) 1255cfae07fdSOuyang Changchun { 1256cfae07fdSOuyang Changchun if (rte_eth_dev_set_link_down((uint8_t)pid) < 0) 1257cfae07fdSOuyang Changchun printf("\nSet link down fail.\n"); 1258cfae07fdSOuyang Changchun } 1259cfae07fdSOuyang Changchun 1260ce8d5614SIntel static int 1261ce8d5614SIntel all_ports_started(void) 1262ce8d5614SIntel { 1263ce8d5614SIntel portid_t pi; 1264ce8d5614SIntel struct rte_port *port; 1265ce8d5614SIntel 1266ce8d5614SIntel for (pi = 0; pi < nb_ports; pi++) { 1267ce8d5614SIntel port = &ports[pi]; 1268ce8d5614SIntel /* Check if there is a port which is not started */ 1269ce8d5614SIntel if (port->port_status != RTE_PORT_STARTED) 1270ce8d5614SIntel return 0; 1271ce8d5614SIntel } 1272ce8d5614SIntel 1273ce8d5614SIntel /* No port is not started */ 1274ce8d5614SIntel return 1; 1275ce8d5614SIntel } 1276ce8d5614SIntel 1277148f963fSBruce Richardson int 1278ce8d5614SIntel start_port(portid_t pid) 1279ce8d5614SIntel { 1280ce8d5614SIntel int diag, need_check_link_status = 0; 1281ce8d5614SIntel portid_t pi; 1282ce8d5614SIntel queueid_t qi; 1283ce8d5614SIntel struct rte_port *port; 12842950a769SDeclan Doherty struct ether_addr mac_addr; 1285ce8d5614SIntel 1286ce8d5614SIntel if (test_done == 0) { 1287ce8d5614SIntel printf("Please stop forwarding first\n"); 1288148f963fSBruce Richardson return -1; 1289ce8d5614SIntel } 1290ce8d5614SIntel 1291ce8d5614SIntel if (init_fwd_streams() < 0) { 1292ce8d5614SIntel printf("Fail from init_fwd_streams()\n"); 1293148f963fSBruce Richardson return -1; 1294ce8d5614SIntel } 1295ce8d5614SIntel 1296ce8d5614SIntel if(dcb_config) 1297ce8d5614SIntel dcb_test = 1; 1298ce8d5614SIntel for (pi = 0; pi < nb_ports; pi++) { 1299ce8d5614SIntel if (pid < nb_ports && pid != pi) 1300ce8d5614SIntel continue; 1301ce8d5614SIntel 1302ce8d5614SIntel port = &ports[pi]; 1303ce8d5614SIntel if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED, 1304ce8d5614SIntel RTE_PORT_HANDLING) == 0) { 1305ce8d5614SIntel printf("Port %d is now not stopped\n", pi); 1306ce8d5614SIntel continue; 1307ce8d5614SIntel } 1308ce8d5614SIntel 1309ce8d5614SIntel if (port->need_reconfig > 0) { 1310ce8d5614SIntel port->need_reconfig = 0; 1311ce8d5614SIntel 13125706de65SJulien Cretin printf("Configuring Port %d (socket %u)\n", pi, 131320a0286fSLiu Xiaofeng port->socket_id); 1314ce8d5614SIntel /* configure port */ 1315ce8d5614SIntel diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq, 1316ce8d5614SIntel &(port->dev_conf)); 1317ce8d5614SIntel if (diag != 0) { 1318ce8d5614SIntel if (rte_atomic16_cmpset(&(port->port_status), 1319ce8d5614SIntel RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0) 1320ce8d5614SIntel printf("Port %d can not be set back " 1321ce8d5614SIntel "to stopped\n", pi); 1322ce8d5614SIntel printf("Fail to configure port %d\n", pi); 1323ce8d5614SIntel /* try to reconfigure port next time */ 1324ce8d5614SIntel port->need_reconfig = 1; 1325148f963fSBruce Richardson return -1; 1326ce8d5614SIntel } 1327ce8d5614SIntel } 1328ce8d5614SIntel if (port->need_reconfig_queues > 0) { 1329ce8d5614SIntel port->need_reconfig_queues = 0; 1330ce8d5614SIntel /* setup tx queues */ 1331ce8d5614SIntel for (qi = 0; qi < nb_txq; qi++) { 1332b6ea6408SIntel if ((numa_support) && 1333b6ea6408SIntel (txring_numa[pi] != NUMA_NO_CONFIG)) 1334b6ea6408SIntel diag = rte_eth_tx_queue_setup(pi, qi, 1335b6ea6408SIntel nb_txd,txring_numa[pi], 1336b6ea6408SIntel &(port->tx_conf)); 1337b6ea6408SIntel else 1338b6ea6408SIntel diag = rte_eth_tx_queue_setup(pi, qi, 1339b6ea6408SIntel nb_txd,port->socket_id, 1340b6ea6408SIntel &(port->tx_conf)); 1341b6ea6408SIntel 1342ce8d5614SIntel if (diag == 0) 1343ce8d5614SIntel continue; 1344ce8d5614SIntel 1345ce8d5614SIntel /* Fail to setup tx queue, return */ 1346ce8d5614SIntel if (rte_atomic16_cmpset(&(port->port_status), 1347ce8d5614SIntel RTE_PORT_HANDLING, 1348ce8d5614SIntel RTE_PORT_STOPPED) == 0) 1349ce8d5614SIntel printf("Port %d can not be set back " 1350ce8d5614SIntel "to stopped\n", pi); 1351ce8d5614SIntel printf("Fail to configure port %d tx queues\n", pi); 1352ce8d5614SIntel /* try to reconfigure queues next time */ 1353ce8d5614SIntel port->need_reconfig_queues = 1; 1354148f963fSBruce Richardson return -1; 1355ce8d5614SIntel } 1356ce8d5614SIntel /* setup rx queues */ 1357ce8d5614SIntel for (qi = 0; qi < nb_rxq; qi++) { 1358b6ea6408SIntel if ((numa_support) && 1359b6ea6408SIntel (rxring_numa[pi] != NUMA_NO_CONFIG)) { 1360b6ea6408SIntel struct rte_mempool * mp = 1361b6ea6408SIntel mbuf_pool_find(rxring_numa[pi]); 1362b6ea6408SIntel if (mp == NULL) { 1363b6ea6408SIntel printf("Failed to setup RX queue:" 1364b6ea6408SIntel "No mempool allocation" 1365b6ea6408SIntel "on the socket %d\n", 1366b6ea6408SIntel rxring_numa[pi]); 1367148f963fSBruce Richardson return -1; 1368b6ea6408SIntel } 1369b6ea6408SIntel 1370b6ea6408SIntel diag = rte_eth_rx_queue_setup(pi, qi, 1371b6ea6408SIntel nb_rxd,rxring_numa[pi], 1372b6ea6408SIntel &(port->rx_conf),mp); 1373b6ea6408SIntel } 1374b6ea6408SIntel else 1375b6ea6408SIntel diag = rte_eth_rx_queue_setup(pi, qi, 1376b6ea6408SIntel nb_rxd,port->socket_id, 1377b6ea6408SIntel &(port->rx_conf), 1378ce8d5614SIntel mbuf_pool_find(port->socket_id)); 1379b6ea6408SIntel 1380ce8d5614SIntel if (diag == 0) 1381ce8d5614SIntel continue; 1382ce8d5614SIntel 1383b6ea6408SIntel 1384ce8d5614SIntel /* Fail to setup rx queue, return */ 1385ce8d5614SIntel if (rte_atomic16_cmpset(&(port->port_status), 1386ce8d5614SIntel RTE_PORT_HANDLING, 1387ce8d5614SIntel RTE_PORT_STOPPED) == 0) 1388ce8d5614SIntel printf("Port %d can not be set back " 1389ce8d5614SIntel "to stopped\n", pi); 1390ce8d5614SIntel printf("Fail to configure port %d rx queues\n", pi); 1391ce8d5614SIntel /* try to reconfigure queues next time */ 1392ce8d5614SIntel port->need_reconfig_queues = 1; 1393148f963fSBruce Richardson return -1; 1394ce8d5614SIntel } 1395ce8d5614SIntel } 1396ce8d5614SIntel /* start port */ 1397ce8d5614SIntel if (rte_eth_dev_start(pi) < 0) { 1398ce8d5614SIntel printf("Fail to start port %d\n", pi); 1399ce8d5614SIntel 1400ce8d5614SIntel /* Fail to setup rx queue, return */ 1401ce8d5614SIntel if (rte_atomic16_cmpset(&(port->port_status), 1402ce8d5614SIntel RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0) 1403ce8d5614SIntel printf("Port %d can not be set back to " 1404ce8d5614SIntel "stopped\n", pi); 1405ce8d5614SIntel continue; 1406ce8d5614SIntel } 1407ce8d5614SIntel 1408ce8d5614SIntel if (rte_atomic16_cmpset(&(port->port_status), 1409ce8d5614SIntel RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0) 1410ce8d5614SIntel printf("Port %d can not be set into started\n", pi); 1411ce8d5614SIntel 14122950a769SDeclan Doherty rte_eth_macaddr_get(pi, &mac_addr); 1413d8c89163SZijie Pan printf("Port %d: %02X:%02X:%02X:%02X:%02X:%02X\n", pi, 14142950a769SDeclan Doherty mac_addr.addr_bytes[0], mac_addr.addr_bytes[1], 14152950a769SDeclan Doherty mac_addr.addr_bytes[2], mac_addr.addr_bytes[3], 14162950a769SDeclan Doherty mac_addr.addr_bytes[4], mac_addr.addr_bytes[5]); 1417d8c89163SZijie Pan 1418ce8d5614SIntel /* at least one port started, need checking link status */ 1419ce8d5614SIntel need_check_link_status = 1; 1420ce8d5614SIntel } 1421ce8d5614SIntel 1422bc202406SDavid Marchand if (need_check_link_status && !no_link_check) 1423ce8d5614SIntel check_all_ports_link_status(nb_ports, RTE_PORT_ALL); 1424ce8d5614SIntel else 1425ce8d5614SIntel printf("Please stop the ports first\n"); 1426ce8d5614SIntel 1427ce8d5614SIntel printf("Done\n"); 1428148f963fSBruce Richardson return 0; 1429ce8d5614SIntel } 1430ce8d5614SIntel 1431ce8d5614SIntel void 1432ce8d5614SIntel stop_port(portid_t pid) 1433ce8d5614SIntel { 1434ce8d5614SIntel portid_t pi; 1435ce8d5614SIntel struct rte_port *port; 1436ce8d5614SIntel int need_check_link_status = 0; 1437ce8d5614SIntel 1438ce8d5614SIntel if (test_done == 0) { 1439ce8d5614SIntel printf("Please stop forwarding first\n"); 1440ce8d5614SIntel return; 1441ce8d5614SIntel } 1442ce8d5614SIntel if (dcb_test) { 1443ce8d5614SIntel dcb_test = 0; 1444ce8d5614SIntel dcb_config = 0; 1445ce8d5614SIntel } 1446ce8d5614SIntel printf("Stopping ports...\n"); 1447ce8d5614SIntel 1448ce8d5614SIntel for (pi = 0; pi < nb_ports; pi++) { 1449ce8d5614SIntel if (pid < nb_ports && pid != pi) 1450ce8d5614SIntel continue; 1451ce8d5614SIntel 1452ce8d5614SIntel port = &ports[pi]; 1453ce8d5614SIntel if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED, 1454ce8d5614SIntel RTE_PORT_HANDLING) == 0) 1455ce8d5614SIntel continue; 1456ce8d5614SIntel 1457ce8d5614SIntel rte_eth_dev_stop(pi); 1458ce8d5614SIntel 1459ce8d5614SIntel if (rte_atomic16_cmpset(&(port->port_status), 1460ce8d5614SIntel RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0) 1461ce8d5614SIntel printf("Port %d can not be set into stopped\n", pi); 1462ce8d5614SIntel need_check_link_status = 1; 1463ce8d5614SIntel } 1464bc202406SDavid Marchand if (need_check_link_status && !no_link_check) 1465ce8d5614SIntel check_all_ports_link_status(nb_ports, RTE_PORT_ALL); 1466ce8d5614SIntel 1467ce8d5614SIntel printf("Done\n"); 1468ce8d5614SIntel } 1469ce8d5614SIntel 1470ce8d5614SIntel void 1471ce8d5614SIntel close_port(portid_t pid) 1472ce8d5614SIntel { 1473ce8d5614SIntel portid_t pi; 1474ce8d5614SIntel struct rte_port *port; 1475ce8d5614SIntel 1476ce8d5614SIntel if (test_done == 0) { 1477ce8d5614SIntel printf("Please stop forwarding first\n"); 1478ce8d5614SIntel return; 1479ce8d5614SIntel } 1480ce8d5614SIntel 1481ce8d5614SIntel printf("Closing ports...\n"); 1482ce8d5614SIntel 1483ce8d5614SIntel for (pi = 0; pi < nb_ports; pi++) { 1484ce8d5614SIntel if (pid < nb_ports && pid != pi) 1485ce8d5614SIntel continue; 1486ce8d5614SIntel 1487ce8d5614SIntel port = &ports[pi]; 1488ce8d5614SIntel if (rte_atomic16_cmpset(&(port->port_status), 1489ce8d5614SIntel RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) { 1490ce8d5614SIntel printf("Port %d is now not stopped\n", pi); 1491ce8d5614SIntel continue; 1492ce8d5614SIntel } 1493ce8d5614SIntel 1494ce8d5614SIntel rte_eth_dev_close(pi); 1495ce8d5614SIntel 1496ce8d5614SIntel if (rte_atomic16_cmpset(&(port->port_status), 1497ce8d5614SIntel RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0) 1498ce8d5614SIntel printf("Port %d can not be set into stopped\n", pi); 1499ce8d5614SIntel } 1500ce8d5614SIntel 1501ce8d5614SIntel printf("Done\n"); 1502ce8d5614SIntel } 1503ce8d5614SIntel 1504ce8d5614SIntel int 1505ce8d5614SIntel all_ports_stopped(void) 1506ce8d5614SIntel { 1507ce8d5614SIntel portid_t pi; 1508ce8d5614SIntel struct rte_port *port; 1509ce8d5614SIntel 1510ce8d5614SIntel for (pi = 0; pi < nb_ports; pi++) { 1511ce8d5614SIntel port = &ports[pi]; 1512ce8d5614SIntel if (port->port_status != RTE_PORT_STOPPED) 1513ce8d5614SIntel return 0; 1514ce8d5614SIntel } 1515ce8d5614SIntel 1516ce8d5614SIntel return 1; 1517ce8d5614SIntel } 1518ce8d5614SIntel 15195f4ec54fSChen Jing D(Mark) int 15205f4ec54fSChen Jing D(Mark) port_is_started(portid_t port_id) 15215f4ec54fSChen Jing D(Mark) { 15225f4ec54fSChen Jing D(Mark) if (port_id_is_invalid(port_id)) 15235f4ec54fSChen Jing D(Mark) return -1; 15245f4ec54fSChen Jing D(Mark) 15255f4ec54fSChen Jing D(Mark) if (ports[port_id].port_status != RTE_PORT_STARTED) 15265f4ec54fSChen Jing D(Mark) return 0; 15275f4ec54fSChen Jing D(Mark) 15285f4ec54fSChen Jing D(Mark) return 1; 15295f4ec54fSChen Jing D(Mark) } 15305f4ec54fSChen Jing D(Mark) 1531af75078fSIntel void 1532af75078fSIntel pmd_test_exit(void) 1533af75078fSIntel { 1534af75078fSIntel portid_t pt_id; 1535af75078fSIntel 1536af75078fSIntel for (pt_id = 0; pt_id < nb_ports; pt_id++) { 1537af75078fSIntel printf("Stopping port %d...", pt_id); 1538af75078fSIntel fflush(stdout); 1539af75078fSIntel rte_eth_dev_close(pt_id); 1540af75078fSIntel printf("done\n"); 1541af75078fSIntel } 1542af75078fSIntel printf("bye...\n"); 1543af75078fSIntel } 1544af75078fSIntel 1545af75078fSIntel typedef void (*cmd_func_t)(void); 1546af75078fSIntel struct pmd_test_command { 1547af75078fSIntel const char *cmd_name; 1548af75078fSIntel cmd_func_t cmd_func; 1549af75078fSIntel }; 1550af75078fSIntel 1551af75078fSIntel #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0])) 1552af75078fSIntel 1553ce8d5614SIntel /* Check the link status of all ports in up to 9s, and print them finally */ 1554af75078fSIntel static void 1555ce8d5614SIntel check_all_ports_link_status(uint8_t port_num, uint32_t port_mask) 1556af75078fSIntel { 1557ce8d5614SIntel #define CHECK_INTERVAL 100 /* 100ms */ 1558ce8d5614SIntel #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */ 1559ce8d5614SIntel uint8_t portid, count, all_ports_up, print_flag = 0; 1560ce8d5614SIntel struct rte_eth_link link; 1561ce8d5614SIntel 1562ce8d5614SIntel printf("Checking link statuses...\n"); 1563ce8d5614SIntel fflush(stdout); 1564ce8d5614SIntel for (count = 0; count <= MAX_CHECK_TIME; count++) { 1565ce8d5614SIntel all_ports_up = 1; 1566ce8d5614SIntel for (portid = 0; portid < port_num; portid++) { 1567ce8d5614SIntel if ((port_mask & (1 << portid)) == 0) 1568ce8d5614SIntel continue; 1569ce8d5614SIntel memset(&link, 0, sizeof(link)); 1570ce8d5614SIntel rte_eth_link_get_nowait(portid, &link); 1571ce8d5614SIntel /* print link status if flag set */ 1572ce8d5614SIntel if (print_flag == 1) { 1573ce8d5614SIntel if (link.link_status) 1574ce8d5614SIntel printf("Port %d Link Up - speed %u " 1575ce8d5614SIntel "Mbps - %s\n", (uint8_t)portid, 1576ce8d5614SIntel (unsigned)link.link_speed, 1577ce8d5614SIntel (link.link_duplex == ETH_LINK_FULL_DUPLEX) ? 1578ce8d5614SIntel ("full-duplex") : ("half-duplex\n")); 1579ce8d5614SIntel else 1580ce8d5614SIntel printf("Port %d Link Down\n", 1581ce8d5614SIntel (uint8_t)portid); 1582ce8d5614SIntel continue; 1583ce8d5614SIntel } 1584ce8d5614SIntel /* clear all_ports_up flag if any link down */ 1585ce8d5614SIntel if (link.link_status == 0) { 1586ce8d5614SIntel all_ports_up = 0; 1587ce8d5614SIntel break; 1588ce8d5614SIntel } 1589ce8d5614SIntel } 1590ce8d5614SIntel /* after finally printing all link status, get out */ 1591ce8d5614SIntel if (print_flag == 1) 1592ce8d5614SIntel break; 1593ce8d5614SIntel 1594ce8d5614SIntel if (all_ports_up == 0) { 1595ce8d5614SIntel fflush(stdout); 1596ce8d5614SIntel rte_delay_ms(CHECK_INTERVAL); 1597ce8d5614SIntel } 1598ce8d5614SIntel 1599ce8d5614SIntel /* set the print_flag if all ports up or timeout */ 1600ce8d5614SIntel if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) { 1601ce8d5614SIntel print_flag = 1; 1602ce8d5614SIntel } 1603ce8d5614SIntel } 1604af75078fSIntel } 1605af75078fSIntel 1606013af9b6SIntel static int 1607013af9b6SIntel set_tx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port) 1608af75078fSIntel { 1609013af9b6SIntel uint16_t i; 1610af75078fSIntel int diag; 1611013af9b6SIntel uint8_t mapping_found = 0; 1612af75078fSIntel 1613013af9b6SIntel for (i = 0; i < nb_tx_queue_stats_mappings; i++) { 1614013af9b6SIntel if ((tx_queue_stats_mappings[i].port_id == port_id) && 1615013af9b6SIntel (tx_queue_stats_mappings[i].queue_id < nb_txq )) { 1616013af9b6SIntel diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id, 1617013af9b6SIntel tx_queue_stats_mappings[i].queue_id, 1618013af9b6SIntel tx_queue_stats_mappings[i].stats_counter_id); 1619013af9b6SIntel if (diag != 0) 1620013af9b6SIntel return diag; 1621013af9b6SIntel mapping_found = 1; 1622af75078fSIntel } 1623013af9b6SIntel } 1624013af9b6SIntel if (mapping_found) 1625013af9b6SIntel port->tx_queue_stats_mapping_enabled = 1; 1626013af9b6SIntel return 0; 1627013af9b6SIntel } 1628013af9b6SIntel 1629013af9b6SIntel static int 1630013af9b6SIntel set_rx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port) 1631013af9b6SIntel { 1632013af9b6SIntel uint16_t i; 1633013af9b6SIntel int diag; 1634013af9b6SIntel uint8_t mapping_found = 0; 1635013af9b6SIntel 1636013af9b6SIntel for (i = 0; i < nb_rx_queue_stats_mappings; i++) { 1637013af9b6SIntel if ((rx_queue_stats_mappings[i].port_id == port_id) && 1638013af9b6SIntel (rx_queue_stats_mappings[i].queue_id < nb_rxq )) { 1639013af9b6SIntel diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id, 1640013af9b6SIntel rx_queue_stats_mappings[i].queue_id, 1641013af9b6SIntel rx_queue_stats_mappings[i].stats_counter_id); 1642013af9b6SIntel if (diag != 0) 1643013af9b6SIntel return diag; 1644013af9b6SIntel mapping_found = 1; 1645013af9b6SIntel } 1646013af9b6SIntel } 1647013af9b6SIntel if (mapping_found) 1648013af9b6SIntel port->rx_queue_stats_mapping_enabled = 1; 1649013af9b6SIntel return 0; 1650013af9b6SIntel } 1651013af9b6SIntel 1652013af9b6SIntel static void 1653013af9b6SIntel map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port) 1654013af9b6SIntel { 1655013af9b6SIntel int diag = 0; 1656013af9b6SIntel 1657013af9b6SIntel diag = set_tx_queue_stats_mapping_registers(pi, port); 1658af75078fSIntel if (diag != 0) { 1659013af9b6SIntel if (diag == -ENOTSUP) { 1660013af9b6SIntel port->tx_queue_stats_mapping_enabled = 0; 1661013af9b6SIntel printf("TX queue stats mapping not supported port id=%d\n", pi); 1662013af9b6SIntel } 1663013af9b6SIntel else 1664013af9b6SIntel rte_exit(EXIT_FAILURE, 1665013af9b6SIntel "set_tx_queue_stats_mapping_registers " 1666013af9b6SIntel "failed for port id=%d diag=%d\n", 1667af75078fSIntel pi, diag); 1668af75078fSIntel } 1669013af9b6SIntel 1670013af9b6SIntel diag = set_rx_queue_stats_mapping_registers(pi, port); 1671af75078fSIntel if (diag != 0) { 1672013af9b6SIntel if (diag == -ENOTSUP) { 1673013af9b6SIntel port->rx_queue_stats_mapping_enabled = 0; 1674013af9b6SIntel printf("RX queue stats mapping not supported port id=%d\n", pi); 1675013af9b6SIntel } 1676013af9b6SIntel else 1677013af9b6SIntel rte_exit(EXIT_FAILURE, 1678013af9b6SIntel "set_rx_queue_stats_mapping_registers " 1679013af9b6SIntel "failed for port id=%d diag=%d\n", 1680af75078fSIntel pi, diag); 1681af75078fSIntel } 1682af75078fSIntel } 1683af75078fSIntel 1684013af9b6SIntel void 1685013af9b6SIntel init_port_config(void) 1686013af9b6SIntel { 1687013af9b6SIntel portid_t pid; 1688013af9b6SIntel struct rte_port *port; 1689013af9b6SIntel 1690013af9b6SIntel for (pid = 0; pid < nb_ports; pid++) { 1691013af9b6SIntel port = &ports[pid]; 1692013af9b6SIntel port->dev_conf.rxmode = rx_mode; 1693013af9b6SIntel port->dev_conf.fdir_conf = fdir_conf; 16943ce690d3SBruce Richardson if (nb_rxq > 1) { 1695013af9b6SIntel port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL; 1696013af9b6SIntel port->dev_conf.rx_adv_conf.rss_conf.rss_hf = rss_hf; 1697af75078fSIntel } else { 1698013af9b6SIntel port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL; 1699013af9b6SIntel port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0; 1700af75078fSIntel } 17013ce690d3SBruce Richardson 17023ce690d3SBruce Richardson /* In SR-IOV mode, RSS mode is not available */ 17033ce690d3SBruce Richardson if (port->dcb_flag == 0 && port->dev_info.max_vfs == 0) { 17043ce690d3SBruce Richardson if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0) 17053ce690d3SBruce Richardson port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_RSS; 17063ce690d3SBruce Richardson else 17073ce690d3SBruce Richardson port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE; 17083ce690d3SBruce Richardson } 17093ce690d3SBruce Richardson 1710013af9b6SIntel port->rx_conf.rx_thresh = rx_thresh; 1711013af9b6SIntel port->rx_conf.rx_free_thresh = rx_free_thresh; 1712013af9b6SIntel port->rx_conf.rx_drop_en = rx_drop_en; 1713013af9b6SIntel port->tx_conf.tx_thresh = tx_thresh; 1714013af9b6SIntel port->tx_conf.tx_rs_thresh = tx_rs_thresh; 1715013af9b6SIntel port->tx_conf.tx_free_thresh = tx_free_thresh; 1716013af9b6SIntel port->tx_conf.txq_flags = txq_flags; 1717013af9b6SIntel 1718013af9b6SIntel rte_eth_macaddr_get(pid, &port->eth_addr); 1719013af9b6SIntel 1720013af9b6SIntel map_port_queue_stats_mapping_registers(pid, port); 17217b7e5ba7SIntel #ifdef RTE_NIC_BYPASS 17227b7e5ba7SIntel rte_eth_dev_bypass_init(pid); 17237b7e5ba7SIntel #endif 1724013af9b6SIntel } 1725013af9b6SIntel } 1726013af9b6SIntel 1727013af9b6SIntel const uint16_t vlan_tags[] = { 1728013af9b6SIntel 0, 1, 2, 3, 4, 5, 6, 7, 1729013af9b6SIntel 8, 9, 10, 11, 12, 13, 14, 15, 1730013af9b6SIntel 16, 17, 18, 19, 20, 21, 22, 23, 1731013af9b6SIntel 24, 25, 26, 27, 28, 29, 30, 31 1732013af9b6SIntel }; 1733013af9b6SIntel 1734013af9b6SIntel static int 1735013af9b6SIntel get_eth_dcb_conf(struct rte_eth_conf *eth_conf, struct dcb_config *dcb_conf) 1736013af9b6SIntel { 1737013af9b6SIntel uint8_t i; 1738af75078fSIntel 1739af75078fSIntel /* 1740013af9b6SIntel * Builds up the correct configuration for dcb+vt based on the vlan tags array 1741013af9b6SIntel * given above, and the number of traffic classes available for use. 1742af75078fSIntel */ 1743013af9b6SIntel if (dcb_conf->dcb_mode == DCB_VT_ENABLED) { 1744013af9b6SIntel struct rte_eth_vmdq_dcb_conf vmdq_rx_conf; 1745013af9b6SIntel struct rte_eth_vmdq_dcb_tx_conf vmdq_tx_conf; 1746013af9b6SIntel 1747013af9b6SIntel /* VMDQ+DCB RX and TX configrations */ 1748013af9b6SIntel vmdq_rx_conf.enable_default_pool = 0; 1749013af9b6SIntel vmdq_rx_conf.default_pool = 0; 1750013af9b6SIntel vmdq_rx_conf.nb_queue_pools = 1751013af9b6SIntel (dcb_conf->num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS); 1752013af9b6SIntel vmdq_tx_conf.nb_queue_pools = 1753013af9b6SIntel (dcb_conf->num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS); 1754013af9b6SIntel 1755013af9b6SIntel vmdq_rx_conf.nb_pool_maps = sizeof( vlan_tags )/sizeof( vlan_tags[ 0 ]); 1756013af9b6SIntel for (i = 0; i < vmdq_rx_conf.nb_pool_maps; i++) { 1757013af9b6SIntel vmdq_rx_conf.pool_map[i].vlan_id = vlan_tags[ i ]; 1758013af9b6SIntel vmdq_rx_conf.pool_map[i].pools = 1 << (i % vmdq_rx_conf.nb_queue_pools); 1759af75078fSIntel } 1760013af9b6SIntel for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) { 1761013af9b6SIntel vmdq_rx_conf.dcb_queue[i] = i; 1762013af9b6SIntel vmdq_tx_conf.dcb_queue[i] = i; 1763013af9b6SIntel } 1764013af9b6SIntel 1765013af9b6SIntel /*set DCB mode of RX and TX of multiple queues*/ 176632e7aa0bSIntel eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB; 176732e7aa0bSIntel eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB; 1768013af9b6SIntel if (dcb_conf->pfc_en) 1769013af9b6SIntel eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT|ETH_DCB_PFC_SUPPORT; 1770013af9b6SIntel else 1771013af9b6SIntel eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT; 1772013af9b6SIntel 1773013af9b6SIntel (void)(rte_memcpy(ð_conf->rx_adv_conf.vmdq_dcb_conf, &vmdq_rx_conf, 1774013af9b6SIntel sizeof(struct rte_eth_vmdq_dcb_conf))); 1775013af9b6SIntel (void)(rte_memcpy(ð_conf->tx_adv_conf.vmdq_dcb_tx_conf, &vmdq_tx_conf, 1776013af9b6SIntel sizeof(struct rte_eth_vmdq_dcb_tx_conf))); 1777013af9b6SIntel } 1778013af9b6SIntel else { 1779013af9b6SIntel struct rte_eth_dcb_rx_conf rx_conf; 1780013af9b6SIntel struct rte_eth_dcb_tx_conf tx_conf; 1781013af9b6SIntel 1782013af9b6SIntel /* queue mapping configuration of DCB RX and TX */ 1783013af9b6SIntel if (dcb_conf->num_tcs == ETH_4_TCS) 1784013af9b6SIntel dcb_q_mapping = DCB_4_TCS_Q_MAPPING; 1785013af9b6SIntel else 1786013af9b6SIntel dcb_q_mapping = DCB_8_TCS_Q_MAPPING; 1787013af9b6SIntel 1788013af9b6SIntel rx_conf.nb_tcs = dcb_conf->num_tcs; 1789013af9b6SIntel tx_conf.nb_tcs = dcb_conf->num_tcs; 1790013af9b6SIntel 1791013af9b6SIntel for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++){ 1792013af9b6SIntel rx_conf.dcb_queue[i] = i; 1793013af9b6SIntel tx_conf.dcb_queue[i] = i; 1794013af9b6SIntel } 179532e7aa0bSIntel eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB; 179632e7aa0bSIntel eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB; 1797013af9b6SIntel if (dcb_conf->pfc_en) 1798013af9b6SIntel eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT|ETH_DCB_PFC_SUPPORT; 1799013af9b6SIntel else 1800013af9b6SIntel eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT; 1801013af9b6SIntel 1802013af9b6SIntel (void)(rte_memcpy(ð_conf->rx_adv_conf.dcb_rx_conf, &rx_conf, 1803013af9b6SIntel sizeof(struct rte_eth_dcb_rx_conf))); 1804013af9b6SIntel (void)(rte_memcpy(ð_conf->tx_adv_conf.dcb_tx_conf, &tx_conf, 1805013af9b6SIntel sizeof(struct rte_eth_dcb_tx_conf))); 1806013af9b6SIntel } 1807013af9b6SIntel 1808013af9b6SIntel return 0; 1809013af9b6SIntel } 1810013af9b6SIntel 1811013af9b6SIntel int 1812013af9b6SIntel init_port_dcb_config(portid_t pid,struct dcb_config *dcb_conf) 1813013af9b6SIntel { 1814013af9b6SIntel struct rte_eth_conf port_conf; 1815013af9b6SIntel struct rte_port *rte_port; 1816013af9b6SIntel int retval; 1817013af9b6SIntel uint16_t nb_vlan; 1818013af9b6SIntel uint16_t i; 1819013af9b6SIntel 1820013af9b6SIntel /* rxq and txq configuration in dcb mode */ 1821013af9b6SIntel nb_rxq = 128; 1822013af9b6SIntel nb_txq = 128; 1823013af9b6SIntel rx_free_thresh = 64; 1824013af9b6SIntel 1825013af9b6SIntel memset(&port_conf,0,sizeof(struct rte_eth_conf)); 1826013af9b6SIntel /* Enter DCB configuration status */ 1827013af9b6SIntel dcb_config = 1; 1828013af9b6SIntel 1829013af9b6SIntel nb_vlan = sizeof( vlan_tags )/sizeof( vlan_tags[ 0 ]); 1830013af9b6SIntel /*set configuration of DCB in vt mode and DCB in non-vt mode*/ 1831013af9b6SIntel retval = get_eth_dcb_conf(&port_conf, dcb_conf); 1832013af9b6SIntel if (retval < 0) 1833013af9b6SIntel return retval; 1834013af9b6SIntel 1835013af9b6SIntel rte_port = &ports[pid]; 1836013af9b6SIntel memcpy(&rte_port->dev_conf, &port_conf,sizeof(struct rte_eth_conf)); 1837013af9b6SIntel 1838013af9b6SIntel rte_port->rx_conf.rx_thresh = rx_thresh; 1839013af9b6SIntel rte_port->rx_conf.rx_free_thresh = rx_free_thresh; 1840013af9b6SIntel rte_port->tx_conf.tx_thresh = tx_thresh; 1841013af9b6SIntel rte_port->tx_conf.tx_rs_thresh = tx_rs_thresh; 1842013af9b6SIntel rte_port->tx_conf.tx_free_thresh = tx_free_thresh; 1843013af9b6SIntel /* VLAN filter */ 1844013af9b6SIntel rte_port->dev_conf.rxmode.hw_vlan_filter = 1; 1845013af9b6SIntel for (i = 0; i < nb_vlan; i++){ 1846013af9b6SIntel rx_vft_set(pid, vlan_tags[i], 1); 1847013af9b6SIntel } 1848013af9b6SIntel 1849013af9b6SIntel rte_eth_macaddr_get(pid, &rte_port->eth_addr); 1850013af9b6SIntel map_port_queue_stats_mapping_registers(pid, rte_port); 1851013af9b6SIntel 18527741e4cfSIntel rte_port->dcb_flag = 1; 18537741e4cfSIntel 1854013af9b6SIntel return 0; 1855af75078fSIntel } 1856af75078fSIntel 1857af75078fSIntel #ifdef RTE_EXEC_ENV_BAREMETAL 1858af75078fSIntel #define main _main 1859af75078fSIntel #endif 1860af75078fSIntel 1861af75078fSIntel int 1862af75078fSIntel main(int argc, char** argv) 1863af75078fSIntel { 1864af75078fSIntel int diag; 1865013af9b6SIntel uint8_t port_id; 1866af75078fSIntel 1867af75078fSIntel diag = rte_eal_init(argc, argv); 1868af75078fSIntel if (diag < 0) 1869af75078fSIntel rte_panic("Cannot init EAL\n"); 1870af75078fSIntel 1871af75078fSIntel nb_ports = (portid_t) rte_eth_dev_count(); 1872af75078fSIntel if (nb_ports == 0) 1873013af9b6SIntel rte_exit(EXIT_FAILURE, "No probed ethernet devices - " 1874013af9b6SIntel "check that " 1875af75078fSIntel "CONFIG_RTE_LIBRTE_IGB_PMD=y and that " 187669d22b8eSIntel "CONFIG_RTE_LIBRTE_EM_PMD=y and that " 1877af75078fSIntel "CONFIG_RTE_LIBRTE_IXGBE_PMD=y in your " 1878af75078fSIntel "configuration file\n"); 1879af75078fSIntel 1880af75078fSIntel set_def_fwd_config(); 1881af75078fSIntel if (nb_lcores == 0) 1882af75078fSIntel rte_panic("Empty set of forwarding logical cores - check the " 1883af75078fSIntel "core mask supplied in the command parameters\n"); 1884af75078fSIntel 1885af75078fSIntel argc -= diag; 1886af75078fSIntel argv += diag; 1887af75078fSIntel if (argc > 1) 1888af75078fSIntel launch_args_parse(argc, argv); 1889af75078fSIntel 1890af75078fSIntel if (nb_rxq > nb_txq) 1891af75078fSIntel printf("Warning: nb_rxq=%d enables RSS configuration, " 1892af75078fSIntel "but nb_txq=%d will prevent to fully test it.\n", 1893af75078fSIntel nb_rxq, nb_txq); 1894af75078fSIntel 1895af75078fSIntel init_config(); 1896148f963fSBruce Richardson if (start_port(RTE_PORT_ALL) != 0) 1897148f963fSBruce Richardson rte_exit(EXIT_FAILURE, "Start ports failed\n"); 1898af75078fSIntel 1899ce8d5614SIntel /* set all ports to promiscuous mode by default */ 1900ce8d5614SIntel for (port_id = 0; port_id < nb_ports; port_id++) 1901ce8d5614SIntel rte_eth_promiscuous_enable(port_id); 1902af75078fSIntel 19030d56cb81SThomas Monjalon #ifdef RTE_LIBRTE_CMDLINE 1904ca7feb22SCyril Chemparathy if (interactive == 1) { 1905ca7feb22SCyril Chemparathy if (auto_start) { 1906ca7feb22SCyril Chemparathy printf("Start automatic packet forwarding\n"); 1907ca7feb22SCyril Chemparathy start_packet_forwarding(0); 1908ca7feb22SCyril Chemparathy } 1909af75078fSIntel prompt(); 1910ca7feb22SCyril Chemparathy } else 19110d56cb81SThomas Monjalon #endif 19120d56cb81SThomas Monjalon { 1913af75078fSIntel char c; 1914af75078fSIntel int rc; 1915af75078fSIntel 1916af75078fSIntel printf("No commandline core given, start packet forwarding\n"); 1917af75078fSIntel start_packet_forwarding(0); 1918af75078fSIntel printf("Press enter to exit\n"); 1919af75078fSIntel rc = read(0, &c, 1); 1920af75078fSIntel if (rc < 0) 1921af75078fSIntel return 1; 1922af75078fSIntel } 1923af75078fSIntel 1924af75078fSIntel return 0; 1925af75078fSIntel } 1926