1af75078fSIntel /*- 2af75078fSIntel * BSD LICENSE 3af75078fSIntel * 45a8fb55cSReshma Pattan * Copyright(c) 2010-2016 Intel Corporation. All rights reserved. 5af75078fSIntel * All rights reserved. 6af75078fSIntel * 7af75078fSIntel * Redistribution and use in source and binary forms, with or without 8af75078fSIntel * modification, are permitted provided that the following conditions 9af75078fSIntel * are met: 10af75078fSIntel * 11af75078fSIntel * * Redistributions of source code must retain the above copyright 12af75078fSIntel * notice, this list of conditions and the following disclaimer. 13af75078fSIntel * * Redistributions in binary form must reproduce the above copyright 14af75078fSIntel * notice, this list of conditions and the following disclaimer in 15af75078fSIntel * the documentation and/or other materials provided with the 16af75078fSIntel * distribution. 17af75078fSIntel * * Neither the name of Intel Corporation nor the names of its 18af75078fSIntel * contributors may be used to endorse or promote products derived 19af75078fSIntel * from this software without specific prior written permission. 20af75078fSIntel * 21af75078fSIntel * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22af75078fSIntel * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23af75078fSIntel * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24af75078fSIntel * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25af75078fSIntel * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26af75078fSIntel * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27af75078fSIntel * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28af75078fSIntel * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29af75078fSIntel * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30af75078fSIntel * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31af75078fSIntel * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32af75078fSIntel */ 33af75078fSIntel 34af75078fSIntel #include <stdarg.h> 35af75078fSIntel #include <stdio.h> 36af75078fSIntel #include <stdlib.h> 37af75078fSIntel #include <signal.h> 38af75078fSIntel #include <string.h> 39af75078fSIntel #include <time.h> 40af75078fSIntel #include <fcntl.h> 41af75078fSIntel #include <sys/types.h> 42af75078fSIntel #include <errno.h> 43af75078fSIntel 44af75078fSIntel #include <sys/queue.h> 45af75078fSIntel #include <sys/stat.h> 46af75078fSIntel 47af75078fSIntel #include <stdint.h> 48af75078fSIntel #include <unistd.h> 49af75078fSIntel #include <inttypes.h> 50af75078fSIntel 51af75078fSIntel #include <rte_common.h> 52af75078fSIntel #include <rte_byteorder.h> 53af75078fSIntel #include <rte_log.h> 54af75078fSIntel #include <rte_debug.h> 55af75078fSIntel #include <rte_cycles.h> 56af75078fSIntel #include <rte_memory.h> 57af75078fSIntel #include <rte_memcpy.h> 58af75078fSIntel #include <rte_memzone.h> 59af75078fSIntel #include <rte_launch.h> 60af75078fSIntel #include <rte_eal.h> 61af75078fSIntel #include <rte_per_lcore.h> 62af75078fSIntel #include <rte_lcore.h> 63af75078fSIntel #include <rte_atomic.h> 64af75078fSIntel #include <rte_branch_prediction.h> 65af75078fSIntel #include <rte_ring.h> 66af75078fSIntel #include <rte_mempool.h> 67af75078fSIntel #include <rte_malloc.h> 68af75078fSIntel #include <rte_mbuf.h> 69af75078fSIntel #include <rte_interrupts.h> 70af75078fSIntel #include <rte_pci.h> 71af75078fSIntel #include <rte_ether.h> 72af75078fSIntel #include <rte_ethdev.h> 73edab33b1STetsuya Mukawa #include <rte_dev.h> 74af75078fSIntel #include <rte_string_fns.h> 75148f963fSBruce Richardson #ifdef RTE_LIBRTE_PMD_XENVIRT 76148f963fSBruce Richardson #include <rte_eth_xenvirt.h> 77148f963fSBruce Richardson #endif 78af75078fSIntel 79af75078fSIntel #include "testpmd.h" 80af75078fSIntel 81af75078fSIntel uint16_t verbose_level = 0; /**< Silent by default. */ 82af75078fSIntel 83af75078fSIntel /* use master core for command line ? */ 84af75078fSIntel uint8_t interactive = 0; 85ca7feb22SCyril Chemparathy uint8_t auto_start = 0; 86af75078fSIntel 87af75078fSIntel /* 88af75078fSIntel * NUMA support configuration. 89af75078fSIntel * When set, the NUMA support attempts to dispatch the allocation of the 90af75078fSIntel * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the 91af75078fSIntel * probed ports among the CPU sockets 0 and 1. 92af75078fSIntel * Otherwise, all memory is allocated from CPU socket 0. 93af75078fSIntel */ 94af75078fSIntel uint8_t numa_support = 0; /**< No numa support by default */ 95af75078fSIntel 96af75078fSIntel /* 97b6ea6408SIntel * In UMA mode,all memory is allocated from socket 0 if --socket-num is 98b6ea6408SIntel * not configured. 99b6ea6408SIntel */ 100b6ea6408SIntel uint8_t socket_num = UMA_NO_CONFIG; 101b6ea6408SIntel 102b6ea6408SIntel /* 103148f963fSBruce Richardson * Use ANONYMOUS mapped memory (might be not physically continuous) for mbufs. 104148f963fSBruce Richardson */ 105148f963fSBruce Richardson uint8_t mp_anon = 0; 106148f963fSBruce Richardson 107148f963fSBruce Richardson /* 108af75078fSIntel * Record the Ethernet address of peer target ports to which packets are 109af75078fSIntel * forwarded. 110af75078fSIntel * Must be instanciated with the ethernet addresses of peer traffic generator 111af75078fSIntel * ports. 112af75078fSIntel */ 113af75078fSIntel struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS]; 114af75078fSIntel portid_t nb_peer_eth_addrs = 0; 115af75078fSIntel 116af75078fSIntel /* 117af75078fSIntel * Probed Target Environment. 118af75078fSIntel */ 119af75078fSIntel struct rte_port *ports; /**< For all probed ethernet ports. */ 120af75078fSIntel portid_t nb_ports; /**< Number of probed ethernet ports. */ 121af75078fSIntel struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */ 122af75078fSIntel lcoreid_t nb_lcores; /**< Number of probed logical cores. */ 123af75078fSIntel 124af75078fSIntel /* 125af75078fSIntel * Test Forwarding Configuration. 126af75078fSIntel * nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores 127af75078fSIntel * nb_fwd_ports <= nb_cfg_ports <= nb_ports 128af75078fSIntel */ 129af75078fSIntel lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */ 130af75078fSIntel lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */ 131af75078fSIntel portid_t nb_cfg_ports; /**< Number of configured ports. */ 132af75078fSIntel portid_t nb_fwd_ports; /**< Number of forwarding ports. */ 133af75078fSIntel 134af75078fSIntel unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */ 135af75078fSIntel portid_t fwd_ports_ids[RTE_MAX_ETHPORTS]; /**< Port ids configuration. */ 136af75078fSIntel 137af75078fSIntel struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */ 138af75078fSIntel streamid_t nb_fwd_streams; /**< Is equal to (nb_ports * nb_rxq). */ 139af75078fSIntel 140af75078fSIntel /* 141af75078fSIntel * Forwarding engines. 142af75078fSIntel */ 143af75078fSIntel struct fwd_engine * fwd_engines[] = { 144af75078fSIntel &io_fwd_engine, 145af75078fSIntel &mac_fwd_engine, 14657e85242SBruce Richardson &mac_retry_fwd_engine, 147d47388f1SCyril Chemparathy &mac_swap_engine, 148e9e23a61SCyril Chemparathy &flow_gen_engine, 149af75078fSIntel &rx_only_engine, 150af75078fSIntel &tx_only_engine, 151af75078fSIntel &csum_fwd_engine, 152168dfa61SIvan Boule &icmp_echo_engine, 153af75078fSIntel #ifdef RTE_LIBRTE_IEEE1588 154af75078fSIntel &ieee1588_fwd_engine, 155af75078fSIntel #endif 156af75078fSIntel NULL, 157af75078fSIntel }; 158af75078fSIntel 159af75078fSIntel struct fwd_config cur_fwd_config; 160af75078fSIntel struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */ 161af75078fSIntel 162af75078fSIntel uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */ 163c8798818SIntel uint32_t param_total_num_mbufs = 0; /**< number of mbufs in all pools - if 164c8798818SIntel * specified on command-line. */ 165af75078fSIntel 166af75078fSIntel /* 167af75078fSIntel * Configuration of packet segments used by the "txonly" processing engine. 168af75078fSIntel */ 169af75078fSIntel uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */ 170af75078fSIntel uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = { 171af75078fSIntel TXONLY_DEF_PACKET_LEN, 172af75078fSIntel }; 173af75078fSIntel uint8_t tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */ 174af75078fSIntel 17579bec05bSKonstantin Ananyev enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF; 17679bec05bSKonstantin Ananyev /**< Split policy for packets to TX. */ 17779bec05bSKonstantin Ananyev 178af75078fSIntel uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */ 179e9378bbcSCunming Liang uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */ 180af75078fSIntel 181900550deSIntel /* current configuration is in DCB or not,0 means it is not in DCB mode */ 182900550deSIntel uint8_t dcb_config = 0; 183900550deSIntel 184900550deSIntel /* Whether the dcb is in testing status */ 185900550deSIntel uint8_t dcb_test = 0; 186900550deSIntel 187af75078fSIntel /* 188af75078fSIntel * Configurable number of RX/TX queues. 189af75078fSIntel */ 190af75078fSIntel queueid_t nb_rxq = 1; /**< Number of RX queues per port. */ 191af75078fSIntel queueid_t nb_txq = 1; /**< Number of TX queues per port. */ 192af75078fSIntel 193af75078fSIntel /* 194af75078fSIntel * Configurable number of RX/TX ring descriptors. 195af75078fSIntel */ 196af75078fSIntel #define RTE_TEST_RX_DESC_DEFAULT 128 197af75078fSIntel #define RTE_TEST_TX_DESC_DEFAULT 512 198af75078fSIntel uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */ 199af75078fSIntel uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */ 200af75078fSIntel 201f2c5125aSPablo de Lara #define RTE_PMD_PARAM_UNSET -1 202af75078fSIntel /* 203af75078fSIntel * Configurable values of RX and TX ring threshold registers. 204af75078fSIntel */ 205af75078fSIntel 206f2c5125aSPablo de Lara int8_t rx_pthresh = RTE_PMD_PARAM_UNSET; 207f2c5125aSPablo de Lara int8_t rx_hthresh = RTE_PMD_PARAM_UNSET; 208f2c5125aSPablo de Lara int8_t rx_wthresh = RTE_PMD_PARAM_UNSET; 209af75078fSIntel 210f2c5125aSPablo de Lara int8_t tx_pthresh = RTE_PMD_PARAM_UNSET; 211f2c5125aSPablo de Lara int8_t tx_hthresh = RTE_PMD_PARAM_UNSET; 212f2c5125aSPablo de Lara int8_t tx_wthresh = RTE_PMD_PARAM_UNSET; 213af75078fSIntel 214af75078fSIntel /* 215af75078fSIntel * Configurable value of RX free threshold. 216af75078fSIntel */ 217f2c5125aSPablo de Lara int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET; 218af75078fSIntel 219af75078fSIntel /* 220ce8d5614SIntel * Configurable value of RX drop enable. 221ce8d5614SIntel */ 222f2c5125aSPablo de Lara int8_t rx_drop_en = RTE_PMD_PARAM_UNSET; 223ce8d5614SIntel 224ce8d5614SIntel /* 225af75078fSIntel * Configurable value of TX free threshold. 226af75078fSIntel */ 227f2c5125aSPablo de Lara int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET; 228af75078fSIntel 229af75078fSIntel /* 230af75078fSIntel * Configurable value of TX RS bit threshold. 231af75078fSIntel */ 232f2c5125aSPablo de Lara int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET; 233af75078fSIntel 234af75078fSIntel /* 235ce8d5614SIntel * Configurable value of TX queue flags. 236ce8d5614SIntel */ 237f2c5125aSPablo de Lara int32_t txq_flags = RTE_PMD_PARAM_UNSET; 238ce8d5614SIntel 239ce8d5614SIntel /* 240af75078fSIntel * Receive Side Scaling (RSS) configuration. 241af75078fSIntel */ 2428a387fa8SHelin Zhang uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */ 243af75078fSIntel 244af75078fSIntel /* 245af75078fSIntel * Port topology configuration 246af75078fSIntel */ 247af75078fSIntel uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */ 248af75078fSIntel 2497741e4cfSIntel /* 2507741e4cfSIntel * Avoids to flush all the RX streams before starts forwarding. 2517741e4cfSIntel */ 2527741e4cfSIntel uint8_t no_flush_rx = 0; /* flush by default */ 2537741e4cfSIntel 254af75078fSIntel /* 255bc202406SDavid Marchand * Avoids to check link status when starting/stopping a port. 256bc202406SDavid Marchand */ 257bc202406SDavid Marchand uint8_t no_link_check = 0; /* check by default */ 258bc202406SDavid Marchand 259bc202406SDavid Marchand /* 2607b7e5ba7SIntel * NIC bypass mode configuration options. 2617b7e5ba7SIntel */ 2627b7e5ba7SIntel #ifdef RTE_NIC_BYPASS 2637b7e5ba7SIntel 2647b7e5ba7SIntel /* The NIC bypass watchdog timeout. */ 2657b7e5ba7SIntel uint32_t bypass_timeout = RTE_BYPASS_TMT_OFF; 2667b7e5ba7SIntel 2677b7e5ba7SIntel #endif 2687b7e5ba7SIntel 2697b7e5ba7SIntel /* 270af75078fSIntel * Ethernet device configuration. 271af75078fSIntel */ 272af75078fSIntel struct rte_eth_rxmode rx_mode = { 273af75078fSIntel .max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */ 274af75078fSIntel .split_hdr_size = 0, 275af75078fSIntel .header_split = 0, /**< Header Split disabled. */ 276af75078fSIntel .hw_ip_checksum = 0, /**< IP checksum offload disabled. */ 277af75078fSIntel .hw_vlan_filter = 1, /**< VLAN filtering enabled. */ 278a47aa8b9SIntel .hw_vlan_strip = 1, /**< VLAN strip enabled. */ 279a47aa8b9SIntel .hw_vlan_extend = 0, /**< Extended VLAN disabled. */ 280af75078fSIntel .jumbo_frame = 0, /**< Jumbo Frame Support disabled. */ 281af75078fSIntel .hw_strip_crc = 0, /**< CRC stripping by hardware disabled. */ 282af75078fSIntel }; 283af75078fSIntel 284af75078fSIntel struct rte_fdir_conf fdir_conf = { 285af75078fSIntel .mode = RTE_FDIR_MODE_NONE, 286af75078fSIntel .pballoc = RTE_FDIR_PBALLOC_64K, 287af75078fSIntel .status = RTE_FDIR_REPORT_STATUS, 288d9d5e6f2SJingjing Wu .mask = { 289d9d5e6f2SJingjing Wu .vlan_tci_mask = 0x0, 290d9d5e6f2SJingjing Wu .ipv4_mask = { 291d9d5e6f2SJingjing Wu .src_ip = 0xFFFFFFFF, 292d9d5e6f2SJingjing Wu .dst_ip = 0xFFFFFFFF, 293d9d5e6f2SJingjing Wu }, 294d9d5e6f2SJingjing Wu .ipv6_mask = { 295d9d5e6f2SJingjing Wu .src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF}, 296d9d5e6f2SJingjing Wu .dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF}, 297d9d5e6f2SJingjing Wu }, 298d9d5e6f2SJingjing Wu .src_port_mask = 0xFFFF, 299d9d5e6f2SJingjing Wu .dst_port_mask = 0xFFFF, 30047b3ac6bSWenzhuo Lu .mac_addr_byte_mask = 0xFF, 30147b3ac6bSWenzhuo Lu .tunnel_type_mask = 1, 30247b3ac6bSWenzhuo Lu .tunnel_id_mask = 0xFFFFFFFF, 303d9d5e6f2SJingjing Wu }, 304af75078fSIntel .drop_queue = 127, 305af75078fSIntel }; 306af75078fSIntel 3072950a769SDeclan Doherty volatile int test_done = 1; /* stop packet forwarding when set to 1. */ 308af75078fSIntel 309ed30d9b6SIntel struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS]; 310ed30d9b6SIntel struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS]; 311ed30d9b6SIntel 312ed30d9b6SIntel struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array; 313ed30d9b6SIntel struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array; 314ed30d9b6SIntel 315ed30d9b6SIntel uint16_t nb_tx_queue_stats_mappings = 0; 316ed30d9b6SIntel uint16_t nb_rx_queue_stats_mappings = 0; 317ed30d9b6SIntel 3187acf894dSStephen Hurd unsigned max_socket = 0; 3197acf894dSStephen Hurd 320ed30d9b6SIntel /* Forward function declarations */ 321ed30d9b6SIntel static void map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port); 322edab33b1STetsuya Mukawa static void check_all_ports_link_status(uint32_t port_mask); 323ce8d5614SIntel 324ce8d5614SIntel /* 325ce8d5614SIntel * Check if all the ports are started. 326ce8d5614SIntel * If yes, return positive value. If not, return zero. 327ce8d5614SIntel */ 328ce8d5614SIntel static int all_ports_started(void); 329ed30d9b6SIntel 330af75078fSIntel /* 331edab33b1STetsuya Mukawa * Find next enabled port 332edab33b1STetsuya Mukawa */ 333edab33b1STetsuya Mukawa portid_t 334edab33b1STetsuya Mukawa find_next_port(portid_t p, struct rte_port *ports, int size) 335edab33b1STetsuya Mukawa { 336edab33b1STetsuya Mukawa if (ports == NULL) 337edab33b1STetsuya Mukawa rte_exit(-EINVAL, "failed to find a next port id\n"); 338edab33b1STetsuya Mukawa 33912a8e30fSJulien Cretin while ((p < size) && (ports[p].enabled == 0)) 340edab33b1STetsuya Mukawa p++; 341edab33b1STetsuya Mukawa return p; 342edab33b1STetsuya Mukawa } 343edab33b1STetsuya Mukawa 344edab33b1STetsuya Mukawa /* 345af75078fSIntel * Setup default configuration. 346af75078fSIntel */ 347af75078fSIntel static void 348af75078fSIntel set_default_fwd_lcores_config(void) 349af75078fSIntel { 350af75078fSIntel unsigned int i; 351af75078fSIntel unsigned int nb_lc; 3527acf894dSStephen Hurd unsigned int sock_num; 353af75078fSIntel 354af75078fSIntel nb_lc = 0; 355af75078fSIntel for (i = 0; i < RTE_MAX_LCORE; i++) { 3567acf894dSStephen Hurd sock_num = rte_lcore_to_socket_id(i) + 1; 3577acf894dSStephen Hurd if (sock_num > max_socket) { 3587acf894dSStephen Hurd if (sock_num > RTE_MAX_NUMA_NODES) 3597acf894dSStephen Hurd rte_exit(EXIT_FAILURE, "Total sockets greater than %u\n", RTE_MAX_NUMA_NODES); 3607acf894dSStephen Hurd max_socket = sock_num; 3617acf894dSStephen Hurd } 362f54fe5eeSStephen Hurd if (!rte_lcore_is_enabled(i)) 363f54fe5eeSStephen Hurd continue; 364f54fe5eeSStephen Hurd if (i == rte_get_master_lcore()) 365f54fe5eeSStephen Hurd continue; 366f54fe5eeSStephen Hurd fwd_lcores_cpuids[nb_lc++] = i; 367af75078fSIntel } 368af75078fSIntel nb_lcores = (lcoreid_t) nb_lc; 369af75078fSIntel nb_cfg_lcores = nb_lcores; 370af75078fSIntel nb_fwd_lcores = 1; 371af75078fSIntel } 372af75078fSIntel 373af75078fSIntel static void 374af75078fSIntel set_def_peer_eth_addrs(void) 375af75078fSIntel { 376af75078fSIntel portid_t i; 377af75078fSIntel 378af75078fSIntel for (i = 0; i < RTE_MAX_ETHPORTS; i++) { 379af75078fSIntel peer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR; 380af75078fSIntel peer_eth_addrs[i].addr_bytes[5] = i; 381af75078fSIntel } 382af75078fSIntel } 383af75078fSIntel 384af75078fSIntel static void 385af75078fSIntel set_default_fwd_ports_config(void) 386af75078fSIntel { 387af75078fSIntel portid_t pt_id; 388af75078fSIntel 389af75078fSIntel for (pt_id = 0; pt_id < nb_ports; pt_id++) 390af75078fSIntel fwd_ports_ids[pt_id] = pt_id; 391af75078fSIntel 392af75078fSIntel nb_cfg_ports = nb_ports; 393af75078fSIntel nb_fwd_ports = nb_ports; 394af75078fSIntel } 395af75078fSIntel 396af75078fSIntel void 397af75078fSIntel set_def_fwd_config(void) 398af75078fSIntel { 399af75078fSIntel set_default_fwd_lcores_config(); 400af75078fSIntel set_def_peer_eth_addrs(); 401af75078fSIntel set_default_fwd_ports_config(); 402af75078fSIntel } 403af75078fSIntel 404af75078fSIntel /* 405af75078fSIntel * Configuration initialisation done once at init time. 406af75078fSIntel */ 407af75078fSIntel static void 408af75078fSIntel mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf, 409af75078fSIntel unsigned int socket_id) 410af75078fSIntel { 411af75078fSIntel char pool_name[RTE_MEMPOOL_NAMESIZE]; 412bece7b6cSChristian Ehrhardt struct rte_mempool *rte_mp = NULL; 413af75078fSIntel uint32_t mb_size; 414af75078fSIntel 415dfb03bbeSOlivier Matz mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size; 416af75078fSIntel mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name)); 417148f963fSBruce Richardson 418148f963fSBruce Richardson #ifdef RTE_LIBRTE_PMD_XENVIRT 419148f963fSBruce Richardson rte_mp = rte_mempool_gntalloc_create(pool_name, nb_mbuf, mb_size, 420af75078fSIntel (unsigned) mb_mempool_cache, 421af75078fSIntel sizeof(struct rte_pktmbuf_pool_private), 422dfb03bbeSOlivier Matz rte_pktmbuf_pool_init, NULL, 423dfb03bbeSOlivier Matz rte_pktmbuf_init, NULL, 424af75078fSIntel socket_id, 0); 425bece7b6cSChristian Ehrhardt #endif 426148f963fSBruce Richardson 427bece7b6cSChristian Ehrhardt /* if the former XEN allocation failed fall back to normal allocation */ 428bece7b6cSChristian Ehrhardt if (rte_mp == NULL) { 429*b19a0c75SOlivier Matz if (mp_anon != 0) { 430*b19a0c75SOlivier Matz rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf, 431bece7b6cSChristian Ehrhardt mb_size, (unsigned) mb_mempool_cache, 432148f963fSBruce Richardson sizeof(struct rte_pktmbuf_pool_private), 433148f963fSBruce Richardson socket_id, 0); 434*b19a0c75SOlivier Matz 435*b19a0c75SOlivier Matz if (rte_mempool_populate_anon(rte_mp) == 0) { 436*b19a0c75SOlivier Matz rte_mempool_free(rte_mp); 437*b19a0c75SOlivier Matz rte_mp = NULL; 438*b19a0c75SOlivier Matz } 439*b19a0c75SOlivier Matz rte_pktmbuf_pool_init(rte_mp, NULL); 440*b19a0c75SOlivier Matz rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL); 441*b19a0c75SOlivier Matz } else { 442ea0c20eaSOlivier Matz /* wrapper to rte_mempool_create() */ 443ea0c20eaSOlivier Matz rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf, 444ea0c20eaSOlivier Matz mb_mempool_cache, 0, mbuf_seg_size, socket_id); 445bece7b6cSChristian Ehrhardt } 446*b19a0c75SOlivier Matz } 447148f963fSBruce Richardson 448af75078fSIntel if (rte_mp == NULL) { 449ce8d5614SIntel rte_exit(EXIT_FAILURE, "Creation of mbuf pool for socket %u " 450ce8d5614SIntel "failed\n", socket_id); 451148f963fSBruce Richardson } else if (verbose_level > 0) { 452591a9d79SStephen Hemminger rte_mempool_dump(stdout, rte_mp); 453af75078fSIntel } 454af75078fSIntel } 455af75078fSIntel 45620a0286fSLiu Xiaofeng /* 45720a0286fSLiu Xiaofeng * Check given socket id is valid or not with NUMA mode, 45820a0286fSLiu Xiaofeng * if valid, return 0, else return -1 45920a0286fSLiu Xiaofeng */ 46020a0286fSLiu Xiaofeng static int 46120a0286fSLiu Xiaofeng check_socket_id(const unsigned int socket_id) 46220a0286fSLiu Xiaofeng { 46320a0286fSLiu Xiaofeng static int warning_once = 0; 46420a0286fSLiu Xiaofeng 4657acf894dSStephen Hurd if (socket_id >= max_socket) { 46620a0286fSLiu Xiaofeng if (!warning_once && numa_support) 46720a0286fSLiu Xiaofeng printf("Warning: NUMA should be configured manually by" 46820a0286fSLiu Xiaofeng " using --port-numa-config and" 46920a0286fSLiu Xiaofeng " --ring-numa-config parameters along with" 47020a0286fSLiu Xiaofeng " --numa.\n"); 47120a0286fSLiu Xiaofeng warning_once = 1; 47220a0286fSLiu Xiaofeng return -1; 47320a0286fSLiu Xiaofeng } 47420a0286fSLiu Xiaofeng return 0; 47520a0286fSLiu Xiaofeng } 47620a0286fSLiu Xiaofeng 477af75078fSIntel static void 478af75078fSIntel init_config(void) 479af75078fSIntel { 480ce8d5614SIntel portid_t pid; 481af75078fSIntel struct rte_port *port; 482af75078fSIntel struct rte_mempool *mbp; 483af75078fSIntel unsigned int nb_mbuf_per_pool; 484af75078fSIntel lcoreid_t lc_id; 4857acf894dSStephen Hurd uint8_t port_per_socket[RTE_MAX_NUMA_NODES]; 486af75078fSIntel 4877acf894dSStephen Hurd memset(port_per_socket,0,RTE_MAX_NUMA_NODES); 488af75078fSIntel /* Configuration of logical cores. */ 489af75078fSIntel fwd_lcores = rte_zmalloc("testpmd: fwd_lcores", 490af75078fSIntel sizeof(struct fwd_lcore *) * nb_lcores, 491fdf20fa7SSergio Gonzalez Monroy RTE_CACHE_LINE_SIZE); 492af75078fSIntel if (fwd_lcores == NULL) { 493ce8d5614SIntel rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) " 494ce8d5614SIntel "failed\n", nb_lcores); 495af75078fSIntel } 496af75078fSIntel for (lc_id = 0; lc_id < nb_lcores; lc_id++) { 497af75078fSIntel fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore", 498af75078fSIntel sizeof(struct fwd_lcore), 499fdf20fa7SSergio Gonzalez Monroy RTE_CACHE_LINE_SIZE); 500af75078fSIntel if (fwd_lcores[lc_id] == NULL) { 501ce8d5614SIntel rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) " 502ce8d5614SIntel "failed\n"); 503af75078fSIntel } 504af75078fSIntel fwd_lcores[lc_id]->cpuid_idx = lc_id; 505af75078fSIntel } 506af75078fSIntel 507af75078fSIntel /* 508af75078fSIntel * Create pools of mbuf. 509af75078fSIntel * If NUMA support is disabled, create a single pool of mbuf in 510b6ea6408SIntel * socket 0 memory by default. 511af75078fSIntel * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1. 512c8798818SIntel * 513c8798818SIntel * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and 514c8798818SIntel * nb_txd can be configured at run time. 515af75078fSIntel */ 516c8798818SIntel if (param_total_num_mbufs) 517c8798818SIntel nb_mbuf_per_pool = param_total_num_mbufs; 518c8798818SIntel else { 519c8798818SIntel nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX + (nb_lcores * mb_mempool_cache) 520c8798818SIntel + RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST; 521b6ea6408SIntel 522b6ea6408SIntel if (!numa_support) 523edab33b1STetsuya Mukawa nb_mbuf_per_pool = 524edab33b1STetsuya Mukawa (nb_mbuf_per_pool * RTE_MAX_ETHPORTS); 525c8798818SIntel } 526af75078fSIntel 527b6ea6408SIntel if (!numa_support) { 528b6ea6408SIntel if (socket_num == UMA_NO_CONFIG) 529b6ea6408SIntel mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0); 530b6ea6408SIntel else 531b6ea6408SIntel mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 532b6ea6408SIntel socket_num); 533b6ea6408SIntel } 534af75078fSIntel 535edab33b1STetsuya Mukawa FOREACH_PORT(pid, ports) { 536ce8d5614SIntel port = &ports[pid]; 537ce8d5614SIntel rte_eth_dev_info_get(pid, &port->dev_info); 538ce8d5614SIntel 539b6ea6408SIntel if (numa_support) { 540b6ea6408SIntel if (port_numa[pid] != NUMA_NO_CONFIG) 541b6ea6408SIntel port_per_socket[port_numa[pid]]++; 542b6ea6408SIntel else { 543b6ea6408SIntel uint32_t socket_id = rte_eth_dev_socket_id(pid); 54420a0286fSLiu Xiaofeng 54520a0286fSLiu Xiaofeng /* if socket_id is invalid, set to 0 */ 54620a0286fSLiu Xiaofeng if (check_socket_id(socket_id) < 0) 54720a0286fSLiu Xiaofeng socket_id = 0; 548b6ea6408SIntel port_per_socket[socket_id]++; 549b6ea6408SIntel } 550b6ea6408SIntel } 551b6ea6408SIntel 552ce8d5614SIntel /* set flag to initialize port/queue */ 553ce8d5614SIntel port->need_reconfig = 1; 554ce8d5614SIntel port->need_reconfig_queues = 1; 555ce8d5614SIntel } 556ce8d5614SIntel 557b6ea6408SIntel if (numa_support) { 558b6ea6408SIntel uint8_t i; 559b6ea6408SIntel unsigned int nb_mbuf; 560ce8d5614SIntel 561b6ea6408SIntel if (param_total_num_mbufs) 562b6ea6408SIntel nb_mbuf_per_pool = nb_mbuf_per_pool/nb_ports; 563b6ea6408SIntel 5647acf894dSStephen Hurd for (i = 0; i < max_socket; i++) { 565edab33b1STetsuya Mukawa nb_mbuf = (nb_mbuf_per_pool * RTE_MAX_ETHPORTS); 566b6ea6408SIntel if (nb_mbuf) 567b6ea6408SIntel mbuf_pool_create(mbuf_data_size, 568b6ea6408SIntel nb_mbuf,i); 569b6ea6408SIntel } 570b6ea6408SIntel } 571b6ea6408SIntel init_port_config(); 5725886ae07SAdrien Mazarguil 5735886ae07SAdrien Mazarguil /* 5745886ae07SAdrien Mazarguil * Records which Mbuf pool to use by each logical core, if needed. 5755886ae07SAdrien Mazarguil */ 5765886ae07SAdrien Mazarguil for (lc_id = 0; lc_id < nb_lcores; lc_id++) { 5778fd8bebcSAdrien Mazarguil mbp = mbuf_pool_find( 5788fd8bebcSAdrien Mazarguil rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id])); 5798fd8bebcSAdrien Mazarguil 5805886ae07SAdrien Mazarguil if (mbp == NULL) 5815886ae07SAdrien Mazarguil mbp = mbuf_pool_find(0); 5825886ae07SAdrien Mazarguil fwd_lcores[lc_id]->mbp = mbp; 5835886ae07SAdrien Mazarguil } 5845886ae07SAdrien Mazarguil 585ce8d5614SIntel /* Configuration of packet forwarding streams. */ 586ce8d5614SIntel if (init_fwd_streams() < 0) 587ce8d5614SIntel rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n"); 588ce8d5614SIntel } 589ce8d5614SIntel 5902950a769SDeclan Doherty 5912950a769SDeclan Doherty void 592a21d5a4bSDeclan Doherty reconfig(portid_t new_port_id, unsigned socket_id) 5932950a769SDeclan Doherty { 5942950a769SDeclan Doherty struct rte_port *port; 5952950a769SDeclan Doherty 5962950a769SDeclan Doherty /* Reconfiguration of Ethernet ports. */ 5972950a769SDeclan Doherty port = &ports[new_port_id]; 5982950a769SDeclan Doherty rte_eth_dev_info_get(new_port_id, &port->dev_info); 5992950a769SDeclan Doherty 6002950a769SDeclan Doherty /* set flag to initialize port/queue */ 6012950a769SDeclan Doherty port->need_reconfig = 1; 6022950a769SDeclan Doherty port->need_reconfig_queues = 1; 603a21d5a4bSDeclan Doherty port->socket_id = socket_id; 6042950a769SDeclan Doherty 6052950a769SDeclan Doherty init_port_config(); 6062950a769SDeclan Doherty } 6072950a769SDeclan Doherty 6082950a769SDeclan Doherty 609ce8d5614SIntel int 610ce8d5614SIntel init_fwd_streams(void) 611ce8d5614SIntel { 612ce8d5614SIntel portid_t pid; 613ce8d5614SIntel struct rte_port *port; 614ce8d5614SIntel streamid_t sm_id, nb_fwd_streams_new; 6155a8fb55cSReshma Pattan queueid_t q; 616ce8d5614SIntel 617ce8d5614SIntel /* set socket id according to numa or not */ 618edab33b1STetsuya Mukawa FOREACH_PORT(pid, ports) { 619ce8d5614SIntel port = &ports[pid]; 620ce8d5614SIntel if (nb_rxq > port->dev_info.max_rx_queues) { 621ce8d5614SIntel printf("Fail: nb_rxq(%d) is greater than " 622ce8d5614SIntel "max_rx_queues(%d)\n", nb_rxq, 623ce8d5614SIntel port->dev_info.max_rx_queues); 624ce8d5614SIntel return -1; 625ce8d5614SIntel } 626ce8d5614SIntel if (nb_txq > port->dev_info.max_tx_queues) { 627ce8d5614SIntel printf("Fail: nb_txq(%d) is greater than " 628ce8d5614SIntel "max_tx_queues(%d)\n", nb_txq, 629ce8d5614SIntel port->dev_info.max_tx_queues); 630ce8d5614SIntel return -1; 631ce8d5614SIntel } 63220a0286fSLiu Xiaofeng if (numa_support) { 63320a0286fSLiu Xiaofeng if (port_numa[pid] != NUMA_NO_CONFIG) 63420a0286fSLiu Xiaofeng port->socket_id = port_numa[pid]; 63520a0286fSLiu Xiaofeng else { 636b6ea6408SIntel port->socket_id = rte_eth_dev_socket_id(pid); 63720a0286fSLiu Xiaofeng 63820a0286fSLiu Xiaofeng /* if socket_id is invalid, set to 0 */ 63920a0286fSLiu Xiaofeng if (check_socket_id(port->socket_id) < 0) 64020a0286fSLiu Xiaofeng port->socket_id = 0; 64120a0286fSLiu Xiaofeng } 64220a0286fSLiu Xiaofeng } 643b6ea6408SIntel else { 644b6ea6408SIntel if (socket_num == UMA_NO_CONFIG) 645af75078fSIntel port->socket_id = 0; 646b6ea6408SIntel else 647b6ea6408SIntel port->socket_id = socket_num; 648b6ea6408SIntel } 649af75078fSIntel } 650af75078fSIntel 6515a8fb55cSReshma Pattan q = RTE_MAX(nb_rxq, nb_txq); 6525a8fb55cSReshma Pattan if (q == 0) { 6535a8fb55cSReshma Pattan printf("Fail: Cannot allocate fwd streams as number of queues is 0\n"); 6545a8fb55cSReshma Pattan return -1; 6555a8fb55cSReshma Pattan } 6565a8fb55cSReshma Pattan nb_fwd_streams_new = (streamid_t)(nb_ports * q); 657ce8d5614SIntel if (nb_fwd_streams_new == nb_fwd_streams) 658ce8d5614SIntel return 0; 659ce8d5614SIntel /* clear the old */ 660ce8d5614SIntel if (fwd_streams != NULL) { 661ce8d5614SIntel for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) { 662ce8d5614SIntel if (fwd_streams[sm_id] == NULL) 663ce8d5614SIntel continue; 664ce8d5614SIntel rte_free(fwd_streams[sm_id]); 665ce8d5614SIntel fwd_streams[sm_id] = NULL; 666af75078fSIntel } 667ce8d5614SIntel rte_free(fwd_streams); 668ce8d5614SIntel fwd_streams = NULL; 669ce8d5614SIntel } 670ce8d5614SIntel 671ce8d5614SIntel /* init new */ 672ce8d5614SIntel nb_fwd_streams = nb_fwd_streams_new; 673ce8d5614SIntel fwd_streams = rte_zmalloc("testpmd: fwd_streams", 674fdf20fa7SSergio Gonzalez Monroy sizeof(struct fwd_stream *) * nb_fwd_streams, RTE_CACHE_LINE_SIZE); 675ce8d5614SIntel if (fwd_streams == NULL) 676ce8d5614SIntel rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_stream *)) " 677ce8d5614SIntel "failed\n", nb_fwd_streams); 678ce8d5614SIntel 679af75078fSIntel for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) { 680af75078fSIntel fwd_streams[sm_id] = rte_zmalloc("testpmd: struct fwd_stream", 681fdf20fa7SSergio Gonzalez Monroy sizeof(struct fwd_stream), RTE_CACHE_LINE_SIZE); 682ce8d5614SIntel if (fwd_streams[sm_id] == NULL) 683ce8d5614SIntel rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_stream)" 684ce8d5614SIntel " failed\n"); 685af75078fSIntel } 686ce8d5614SIntel 687ce8d5614SIntel return 0; 688af75078fSIntel } 689af75078fSIntel 690af75078fSIntel #ifdef RTE_TEST_PMD_RECORD_BURST_STATS 691af75078fSIntel static void 692af75078fSIntel pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs) 693af75078fSIntel { 694af75078fSIntel unsigned int total_burst; 695af75078fSIntel unsigned int nb_burst; 696af75078fSIntel unsigned int burst_stats[3]; 697af75078fSIntel uint16_t pktnb_stats[3]; 698af75078fSIntel uint16_t nb_pkt; 699af75078fSIntel int burst_percent[3]; 700af75078fSIntel 701af75078fSIntel /* 702af75078fSIntel * First compute the total number of packet bursts and the 703af75078fSIntel * two highest numbers of bursts of the same number of packets. 704af75078fSIntel */ 705af75078fSIntel total_burst = 0; 706af75078fSIntel burst_stats[0] = burst_stats[1] = burst_stats[2] = 0; 707af75078fSIntel pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0; 708af75078fSIntel for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) { 709af75078fSIntel nb_burst = pbs->pkt_burst_spread[nb_pkt]; 710af75078fSIntel if (nb_burst == 0) 711af75078fSIntel continue; 712af75078fSIntel total_burst += nb_burst; 713af75078fSIntel if (nb_burst > burst_stats[0]) { 714af75078fSIntel burst_stats[1] = burst_stats[0]; 715af75078fSIntel pktnb_stats[1] = pktnb_stats[0]; 716af75078fSIntel burst_stats[0] = nb_burst; 717af75078fSIntel pktnb_stats[0] = nb_pkt; 718af75078fSIntel } 719af75078fSIntel } 720af75078fSIntel if (total_burst == 0) 721af75078fSIntel return; 722af75078fSIntel burst_percent[0] = (burst_stats[0] * 100) / total_burst; 723af75078fSIntel printf(" %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst, 724af75078fSIntel burst_percent[0], (int) pktnb_stats[0]); 725af75078fSIntel if (burst_stats[0] == total_burst) { 726af75078fSIntel printf("]\n"); 727af75078fSIntel return; 728af75078fSIntel } 729af75078fSIntel if (burst_stats[0] + burst_stats[1] == total_burst) { 730af75078fSIntel printf(" + %d%% of %d pkts]\n", 731af75078fSIntel 100 - burst_percent[0], pktnb_stats[1]); 732af75078fSIntel return; 733af75078fSIntel } 734af75078fSIntel burst_percent[1] = (burst_stats[1] * 100) / total_burst; 735af75078fSIntel burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]); 736af75078fSIntel if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) { 737af75078fSIntel printf(" + %d%% of others]\n", 100 - burst_percent[0]); 738af75078fSIntel return; 739af75078fSIntel } 740af75078fSIntel printf(" + %d%% of %d pkts + %d%% of others]\n", 741af75078fSIntel burst_percent[1], (int) pktnb_stats[1], burst_percent[2]); 742af75078fSIntel } 743af75078fSIntel #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */ 744af75078fSIntel 745af75078fSIntel static void 746af75078fSIntel fwd_port_stats_display(portid_t port_id, struct rte_eth_stats *stats) 747af75078fSIntel { 748af75078fSIntel struct rte_port *port; 749013af9b6SIntel uint8_t i; 750af75078fSIntel 751af75078fSIntel static const char *fwd_stats_border = "----------------------"; 752af75078fSIntel 753af75078fSIntel port = &ports[port_id]; 754af75078fSIntel printf("\n %s Forward statistics for port %-2d %s\n", 755af75078fSIntel fwd_stats_border, port_id, fwd_stats_border); 756013af9b6SIntel 757013af9b6SIntel if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) { 758af75078fSIntel printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: " 759af75078fSIntel "%-"PRIu64"\n", 76070bdb186SIvan Boule stats->ipackets, stats->imissed, 76170bdb186SIvan Boule (uint64_t) (stats->ipackets + stats->imissed)); 762af75078fSIntel 763af75078fSIntel if (cur_fwd_eng == &csum_fwd_engine) 764af75078fSIntel printf(" Bad-ipcsum: %-14"PRIu64" Bad-l4csum: %-14"PRIu64" \n", 765af75078fSIntel port->rx_bad_ip_csum, port->rx_bad_l4_csum); 76686057c99SIgor Ryzhov if ((stats->ierrors + stats->rx_nombuf) > 0) { 767f72a0fa6SStephen Hemminger printf(" RX-error: %-"PRIu64"\n", stats->ierrors); 76870bdb186SIvan Boule printf(" RX-nombufs: %-14"PRIu64"\n", stats->rx_nombuf); 76970bdb186SIvan Boule } 770af75078fSIntel 771af75078fSIntel printf(" TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: " 772af75078fSIntel "%-"PRIu64"\n", 773af75078fSIntel stats->opackets, port->tx_dropped, 774af75078fSIntel (uint64_t) (stats->opackets + port->tx_dropped)); 775013af9b6SIntel } 776013af9b6SIntel else { 777013af9b6SIntel printf(" RX-packets: %14"PRIu64" RX-dropped:%14"PRIu64" RX-total:" 778013af9b6SIntel "%14"PRIu64"\n", 77970bdb186SIvan Boule stats->ipackets, stats->imissed, 78070bdb186SIvan Boule (uint64_t) (stats->ipackets + stats->imissed)); 781013af9b6SIntel 782013af9b6SIntel if (cur_fwd_eng == &csum_fwd_engine) 783013af9b6SIntel printf(" Bad-ipcsum:%14"PRIu64" Bad-l4csum:%14"PRIu64"\n", 784013af9b6SIntel port->rx_bad_ip_csum, port->rx_bad_l4_csum); 78586057c99SIgor Ryzhov if ((stats->ierrors + stats->rx_nombuf) > 0) { 786f72a0fa6SStephen Hemminger printf(" RX-error:%"PRIu64"\n", stats->ierrors); 78770bdb186SIvan Boule printf(" RX-nombufs: %14"PRIu64"\n", 78870bdb186SIvan Boule stats->rx_nombuf); 78970bdb186SIvan Boule } 790013af9b6SIntel 791013af9b6SIntel printf(" TX-packets: %14"PRIu64" TX-dropped:%14"PRIu64" TX-total:" 792013af9b6SIntel "%14"PRIu64"\n", 793013af9b6SIntel stats->opackets, port->tx_dropped, 794013af9b6SIntel (uint64_t) (stats->opackets + port->tx_dropped)); 795013af9b6SIntel } 796e659b6b4SIvan Boule 797af75078fSIntel #ifdef RTE_TEST_PMD_RECORD_BURST_STATS 798af75078fSIntel if (port->rx_stream) 799013af9b6SIntel pkt_burst_stats_display("RX", 800013af9b6SIntel &port->rx_stream->rx_burst_stats); 801af75078fSIntel if (port->tx_stream) 802013af9b6SIntel pkt_burst_stats_display("TX", 803013af9b6SIntel &port->tx_stream->tx_burst_stats); 804af75078fSIntel #endif 805af75078fSIntel 806013af9b6SIntel if (port->rx_queue_stats_mapping_enabled) { 807013af9b6SIntel printf("\n"); 808013af9b6SIntel for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) { 809013af9b6SIntel printf(" Stats reg %2d RX-packets:%14"PRIu64 810013af9b6SIntel " RX-errors:%14"PRIu64 811013af9b6SIntel " RX-bytes:%14"PRIu64"\n", 812013af9b6SIntel i, stats->q_ipackets[i], stats->q_errors[i], stats->q_ibytes[i]); 813013af9b6SIntel } 814013af9b6SIntel printf("\n"); 815013af9b6SIntel } 816013af9b6SIntel if (port->tx_queue_stats_mapping_enabled) { 817013af9b6SIntel for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) { 818013af9b6SIntel printf(" Stats reg %2d TX-packets:%14"PRIu64 819013af9b6SIntel " TX-bytes:%14"PRIu64"\n", 820013af9b6SIntel i, stats->q_opackets[i], stats->q_obytes[i]); 821013af9b6SIntel } 822013af9b6SIntel } 823013af9b6SIntel 824af75078fSIntel printf(" %s--------------------------------%s\n", 825af75078fSIntel fwd_stats_border, fwd_stats_border); 826af75078fSIntel } 827af75078fSIntel 828af75078fSIntel static void 829af75078fSIntel fwd_stream_stats_display(streamid_t stream_id) 830af75078fSIntel { 831af75078fSIntel struct fwd_stream *fs; 832af75078fSIntel static const char *fwd_top_stats_border = "-------"; 833af75078fSIntel 834af75078fSIntel fs = fwd_streams[stream_id]; 835af75078fSIntel if ((fs->rx_packets == 0) && (fs->tx_packets == 0) && 836af75078fSIntel (fs->fwd_dropped == 0)) 837af75078fSIntel return; 838af75078fSIntel printf("\n %s Forward Stats for RX Port=%2d/Queue=%2d -> " 839af75078fSIntel "TX Port=%2d/Queue=%2d %s\n", 840af75078fSIntel fwd_top_stats_border, fs->rx_port, fs->rx_queue, 841af75078fSIntel fs->tx_port, fs->tx_queue, fwd_top_stats_border); 842af75078fSIntel printf(" RX-packets: %-14u TX-packets: %-14u TX-dropped: %-14u", 843af75078fSIntel fs->rx_packets, fs->tx_packets, fs->fwd_dropped); 844af75078fSIntel 845af75078fSIntel /* if checksum mode */ 846af75078fSIntel if (cur_fwd_eng == &csum_fwd_engine) { 847013af9b6SIntel printf(" RX- bad IP checksum: %-14u Rx- bad L4 checksum: " 848013af9b6SIntel "%-14u\n", fs->rx_bad_ip_csum, fs->rx_bad_l4_csum); 849af75078fSIntel } 850af75078fSIntel 851af75078fSIntel #ifdef RTE_TEST_PMD_RECORD_BURST_STATS 852af75078fSIntel pkt_burst_stats_display("RX", &fs->rx_burst_stats); 853af75078fSIntel pkt_burst_stats_display("TX", &fs->tx_burst_stats); 854af75078fSIntel #endif 855af75078fSIntel } 856af75078fSIntel 857af75078fSIntel static void 8587741e4cfSIntel flush_fwd_rx_queues(void) 859af75078fSIntel { 860af75078fSIntel struct rte_mbuf *pkts_burst[MAX_PKT_BURST]; 861af75078fSIntel portid_t rxp; 8627741e4cfSIntel portid_t port_id; 863af75078fSIntel queueid_t rxq; 864af75078fSIntel uint16_t nb_rx; 865af75078fSIntel uint16_t i; 866af75078fSIntel uint8_t j; 867af75078fSIntel 868af75078fSIntel for (j = 0; j < 2; j++) { 8697741e4cfSIntel for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) { 870af75078fSIntel for (rxq = 0; rxq < nb_rxq; rxq++) { 8717741e4cfSIntel port_id = fwd_ports_ids[rxp]; 872af75078fSIntel do { 8737741e4cfSIntel nb_rx = rte_eth_rx_burst(port_id, rxq, 874013af9b6SIntel pkts_burst, MAX_PKT_BURST); 875af75078fSIntel for (i = 0; i < nb_rx; i++) 876af75078fSIntel rte_pktmbuf_free(pkts_burst[i]); 877af75078fSIntel } while (nb_rx > 0); 878af75078fSIntel } 879af75078fSIntel } 880af75078fSIntel rte_delay_ms(10); /* wait 10 milli-seconds before retrying */ 881af75078fSIntel } 882af75078fSIntel } 883af75078fSIntel 884af75078fSIntel static void 885af75078fSIntel run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd) 886af75078fSIntel { 887af75078fSIntel struct fwd_stream **fsm; 888af75078fSIntel streamid_t nb_fs; 889af75078fSIntel streamid_t sm_id; 890af75078fSIntel 891af75078fSIntel fsm = &fwd_streams[fc->stream_idx]; 892af75078fSIntel nb_fs = fc->stream_nb; 893af75078fSIntel do { 894af75078fSIntel for (sm_id = 0; sm_id < nb_fs; sm_id++) 895af75078fSIntel (*pkt_fwd)(fsm[sm_id]); 896af75078fSIntel } while (! fc->stopped); 897af75078fSIntel } 898af75078fSIntel 899af75078fSIntel static int 900af75078fSIntel start_pkt_forward_on_core(void *fwd_arg) 901af75078fSIntel { 902af75078fSIntel run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg, 903af75078fSIntel cur_fwd_config.fwd_eng->packet_fwd); 904af75078fSIntel return 0; 905af75078fSIntel } 906af75078fSIntel 907af75078fSIntel /* 908af75078fSIntel * Run the TXONLY packet forwarding engine to send a single burst of packets. 909af75078fSIntel * Used to start communication flows in network loopback test configurations. 910af75078fSIntel */ 911af75078fSIntel static int 912af75078fSIntel run_one_txonly_burst_on_core(void *fwd_arg) 913af75078fSIntel { 914af75078fSIntel struct fwd_lcore *fwd_lc; 915af75078fSIntel struct fwd_lcore tmp_lcore; 916af75078fSIntel 917af75078fSIntel fwd_lc = (struct fwd_lcore *) fwd_arg; 918af75078fSIntel tmp_lcore = *fwd_lc; 919af75078fSIntel tmp_lcore.stopped = 1; 920af75078fSIntel run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd); 921af75078fSIntel return 0; 922af75078fSIntel } 923af75078fSIntel 924af75078fSIntel /* 925af75078fSIntel * Launch packet forwarding: 926af75078fSIntel * - Setup per-port forwarding context. 927af75078fSIntel * - launch logical cores with their forwarding configuration. 928af75078fSIntel */ 929af75078fSIntel static void 930af75078fSIntel launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore) 931af75078fSIntel { 932af75078fSIntel port_fwd_begin_t port_fwd_begin; 933af75078fSIntel unsigned int i; 934af75078fSIntel unsigned int lc_id; 935af75078fSIntel int diag; 936af75078fSIntel 937af75078fSIntel port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin; 938af75078fSIntel if (port_fwd_begin != NULL) { 939af75078fSIntel for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) 940af75078fSIntel (*port_fwd_begin)(fwd_ports_ids[i]); 941af75078fSIntel } 942af75078fSIntel for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) { 943af75078fSIntel lc_id = fwd_lcores_cpuids[i]; 944af75078fSIntel if ((interactive == 0) || (lc_id != rte_lcore_id())) { 945af75078fSIntel fwd_lcores[i]->stopped = 0; 946af75078fSIntel diag = rte_eal_remote_launch(pkt_fwd_on_lcore, 947af75078fSIntel fwd_lcores[i], lc_id); 948af75078fSIntel if (diag != 0) 949af75078fSIntel printf("launch lcore %u failed - diag=%d\n", 950af75078fSIntel lc_id, diag); 951af75078fSIntel } 952af75078fSIntel } 953af75078fSIntel } 954af75078fSIntel 955af75078fSIntel /* 956af75078fSIntel * Launch packet forwarding configuration. 957af75078fSIntel */ 958af75078fSIntel void 959af75078fSIntel start_packet_forwarding(int with_tx_first) 960af75078fSIntel { 961af75078fSIntel port_fwd_begin_t port_fwd_begin; 962af75078fSIntel port_fwd_end_t port_fwd_end; 963af75078fSIntel struct rte_port *port; 964af75078fSIntel unsigned int i; 965af75078fSIntel portid_t pt_id; 966af75078fSIntel streamid_t sm_id; 967af75078fSIntel 9685a8fb55cSReshma Pattan if (strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") == 0 && !nb_rxq) 9695a8fb55cSReshma Pattan rte_exit(EXIT_FAILURE, "rxq are 0, cannot use rxonly fwd mode\n"); 9705a8fb55cSReshma Pattan 9715a8fb55cSReshma Pattan if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 && !nb_txq) 9725a8fb55cSReshma Pattan rte_exit(EXIT_FAILURE, "txq are 0, cannot use txonly fwd mode\n"); 9735a8fb55cSReshma Pattan 9745a8fb55cSReshma Pattan if ((strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") != 0 && 9755a8fb55cSReshma Pattan strcmp(cur_fwd_eng->fwd_mode_name, "txonly") != 0) && 9765a8fb55cSReshma Pattan (!nb_rxq || !nb_txq)) 9775a8fb55cSReshma Pattan rte_exit(EXIT_FAILURE, 9785a8fb55cSReshma Pattan "Either rxq or txq are 0, cannot use %s fwd mode\n", 9795a8fb55cSReshma Pattan cur_fwd_eng->fwd_mode_name); 9805a8fb55cSReshma Pattan 981ce8d5614SIntel if (all_ports_started() == 0) { 982ce8d5614SIntel printf("Not all ports were started\n"); 983ce8d5614SIntel return; 984ce8d5614SIntel } 985af75078fSIntel if (test_done == 0) { 986af75078fSIntel printf("Packet forwarding already started\n"); 987af75078fSIntel return; 988af75078fSIntel } 9897741e4cfSIntel if(dcb_test) { 9907741e4cfSIntel for (i = 0; i < nb_fwd_ports; i++) { 9917741e4cfSIntel pt_id = fwd_ports_ids[i]; 9927741e4cfSIntel port = &ports[pt_id]; 9937741e4cfSIntel if (!port->dcb_flag) { 9947741e4cfSIntel printf("In DCB mode, all forwarding ports must " 9957741e4cfSIntel "be configured in this mode.\n"); 996013af9b6SIntel return; 997013af9b6SIntel } 9987741e4cfSIntel } 9997741e4cfSIntel if (nb_fwd_lcores == 1) { 10007741e4cfSIntel printf("In DCB mode,the nb forwarding cores " 10017741e4cfSIntel "should be larger than 1.\n"); 10027741e4cfSIntel return; 10037741e4cfSIntel } 10047741e4cfSIntel } 1005af75078fSIntel test_done = 0; 10067741e4cfSIntel 10077741e4cfSIntel if(!no_flush_rx) 10087741e4cfSIntel flush_fwd_rx_queues(); 10097741e4cfSIntel 1010af75078fSIntel fwd_config_setup(); 1011af75078fSIntel rxtx_config_display(); 1012af75078fSIntel 1013af75078fSIntel for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) { 1014af75078fSIntel pt_id = fwd_ports_ids[i]; 1015af75078fSIntel port = &ports[pt_id]; 1016af75078fSIntel rte_eth_stats_get(pt_id, &port->stats); 1017af75078fSIntel port->tx_dropped = 0; 1018013af9b6SIntel 1019013af9b6SIntel map_port_queue_stats_mapping_registers(pt_id, port); 1020af75078fSIntel } 1021af75078fSIntel for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) { 1022af75078fSIntel fwd_streams[sm_id]->rx_packets = 0; 1023af75078fSIntel fwd_streams[sm_id]->tx_packets = 0; 1024af75078fSIntel fwd_streams[sm_id]->fwd_dropped = 0; 1025af75078fSIntel fwd_streams[sm_id]->rx_bad_ip_csum = 0; 1026af75078fSIntel fwd_streams[sm_id]->rx_bad_l4_csum = 0; 1027af75078fSIntel 1028af75078fSIntel #ifdef RTE_TEST_PMD_RECORD_BURST_STATS 1029af75078fSIntel memset(&fwd_streams[sm_id]->rx_burst_stats, 0, 1030af75078fSIntel sizeof(fwd_streams[sm_id]->rx_burst_stats)); 1031af75078fSIntel memset(&fwd_streams[sm_id]->tx_burst_stats, 0, 1032af75078fSIntel sizeof(fwd_streams[sm_id]->tx_burst_stats)); 1033af75078fSIntel #endif 1034af75078fSIntel #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES 1035af75078fSIntel fwd_streams[sm_id]->core_cycles = 0; 1036af75078fSIntel #endif 1037af75078fSIntel } 1038af75078fSIntel if (with_tx_first) { 1039af75078fSIntel port_fwd_begin = tx_only_engine.port_fwd_begin; 1040af75078fSIntel if (port_fwd_begin != NULL) { 1041af75078fSIntel for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) 1042af75078fSIntel (*port_fwd_begin)(fwd_ports_ids[i]); 1043af75078fSIntel } 1044af75078fSIntel launch_packet_forwarding(run_one_txonly_burst_on_core); 1045af75078fSIntel rte_eal_mp_wait_lcore(); 1046af75078fSIntel port_fwd_end = tx_only_engine.port_fwd_end; 1047af75078fSIntel if (port_fwd_end != NULL) { 1048af75078fSIntel for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) 1049af75078fSIntel (*port_fwd_end)(fwd_ports_ids[i]); 1050af75078fSIntel } 1051af75078fSIntel } 1052af75078fSIntel launch_packet_forwarding(start_pkt_forward_on_core); 1053af75078fSIntel } 1054af75078fSIntel 1055af75078fSIntel void 1056af75078fSIntel stop_packet_forwarding(void) 1057af75078fSIntel { 1058af75078fSIntel struct rte_eth_stats stats; 1059af75078fSIntel struct rte_port *port; 1060af75078fSIntel port_fwd_end_t port_fwd_end; 1061af75078fSIntel int i; 1062af75078fSIntel portid_t pt_id; 1063af75078fSIntel streamid_t sm_id; 1064af75078fSIntel lcoreid_t lc_id; 1065af75078fSIntel uint64_t total_recv; 1066af75078fSIntel uint64_t total_xmit; 1067af75078fSIntel uint64_t total_rx_dropped; 1068af75078fSIntel uint64_t total_tx_dropped; 1069af75078fSIntel uint64_t total_rx_nombuf; 1070af75078fSIntel uint64_t tx_dropped; 1071af75078fSIntel uint64_t rx_bad_ip_csum; 1072af75078fSIntel uint64_t rx_bad_l4_csum; 1073af75078fSIntel #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES 1074af75078fSIntel uint64_t fwd_cycles; 1075af75078fSIntel #endif 1076af75078fSIntel static const char *acc_stats_border = "+++++++++++++++"; 1077af75078fSIntel 1078ce8d5614SIntel if (all_ports_started() == 0) { 1079ce8d5614SIntel printf("Not all ports were started\n"); 1080ce8d5614SIntel return; 1081ce8d5614SIntel } 1082af75078fSIntel if (test_done) { 1083af75078fSIntel printf("Packet forwarding not started\n"); 1084af75078fSIntel return; 1085af75078fSIntel } 1086af75078fSIntel printf("Telling cores to stop..."); 1087af75078fSIntel for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) 1088af75078fSIntel fwd_lcores[lc_id]->stopped = 1; 1089af75078fSIntel printf("\nWaiting for lcores to finish...\n"); 1090af75078fSIntel rte_eal_mp_wait_lcore(); 1091af75078fSIntel port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end; 1092af75078fSIntel if (port_fwd_end != NULL) { 1093af75078fSIntel for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) { 1094af75078fSIntel pt_id = fwd_ports_ids[i]; 1095af75078fSIntel (*port_fwd_end)(pt_id); 1096af75078fSIntel } 1097af75078fSIntel } 1098af75078fSIntel #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES 1099af75078fSIntel fwd_cycles = 0; 1100af75078fSIntel #endif 1101af75078fSIntel for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) { 1102af75078fSIntel if (cur_fwd_config.nb_fwd_streams > 1103af75078fSIntel cur_fwd_config.nb_fwd_ports) { 1104af75078fSIntel fwd_stream_stats_display(sm_id); 1105af75078fSIntel ports[fwd_streams[sm_id]->tx_port].tx_stream = NULL; 1106af75078fSIntel ports[fwd_streams[sm_id]->rx_port].rx_stream = NULL; 1107af75078fSIntel } else { 1108af75078fSIntel ports[fwd_streams[sm_id]->tx_port].tx_stream = 1109af75078fSIntel fwd_streams[sm_id]; 1110af75078fSIntel ports[fwd_streams[sm_id]->rx_port].rx_stream = 1111af75078fSIntel fwd_streams[sm_id]; 1112af75078fSIntel } 1113af75078fSIntel tx_dropped = ports[fwd_streams[sm_id]->tx_port].tx_dropped; 1114af75078fSIntel tx_dropped = (uint64_t) (tx_dropped + 1115af75078fSIntel fwd_streams[sm_id]->fwd_dropped); 1116af75078fSIntel ports[fwd_streams[sm_id]->tx_port].tx_dropped = tx_dropped; 1117af75078fSIntel 1118013af9b6SIntel rx_bad_ip_csum = 1119013af9b6SIntel ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum; 1120af75078fSIntel rx_bad_ip_csum = (uint64_t) (rx_bad_ip_csum + 1121af75078fSIntel fwd_streams[sm_id]->rx_bad_ip_csum); 1122013af9b6SIntel ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum = 1123013af9b6SIntel rx_bad_ip_csum; 1124af75078fSIntel 1125013af9b6SIntel rx_bad_l4_csum = 1126013af9b6SIntel ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum; 1127af75078fSIntel rx_bad_l4_csum = (uint64_t) (rx_bad_l4_csum + 1128af75078fSIntel fwd_streams[sm_id]->rx_bad_l4_csum); 1129013af9b6SIntel ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum = 1130013af9b6SIntel rx_bad_l4_csum; 1131af75078fSIntel 1132af75078fSIntel #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES 1133af75078fSIntel fwd_cycles = (uint64_t) (fwd_cycles + 1134af75078fSIntel fwd_streams[sm_id]->core_cycles); 1135af75078fSIntel #endif 1136af75078fSIntel } 1137af75078fSIntel total_recv = 0; 1138af75078fSIntel total_xmit = 0; 1139af75078fSIntel total_rx_dropped = 0; 1140af75078fSIntel total_tx_dropped = 0; 1141af75078fSIntel total_rx_nombuf = 0; 11427741e4cfSIntel for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) { 1143af75078fSIntel pt_id = fwd_ports_ids[i]; 1144af75078fSIntel 1145af75078fSIntel port = &ports[pt_id]; 1146af75078fSIntel rte_eth_stats_get(pt_id, &stats); 1147af75078fSIntel stats.ipackets -= port->stats.ipackets; 1148af75078fSIntel port->stats.ipackets = 0; 1149af75078fSIntel stats.opackets -= port->stats.opackets; 1150af75078fSIntel port->stats.opackets = 0; 1151af75078fSIntel stats.ibytes -= port->stats.ibytes; 1152af75078fSIntel port->stats.ibytes = 0; 1153af75078fSIntel stats.obytes -= port->stats.obytes; 1154af75078fSIntel port->stats.obytes = 0; 115570bdb186SIvan Boule stats.imissed -= port->stats.imissed; 115670bdb186SIvan Boule port->stats.imissed = 0; 1157af75078fSIntel stats.oerrors -= port->stats.oerrors; 1158af75078fSIntel port->stats.oerrors = 0; 1159af75078fSIntel stats.rx_nombuf -= port->stats.rx_nombuf; 1160af75078fSIntel port->stats.rx_nombuf = 0; 1161af75078fSIntel 1162af75078fSIntel total_recv += stats.ipackets; 1163af75078fSIntel total_xmit += stats.opackets; 116470bdb186SIvan Boule total_rx_dropped += stats.imissed; 1165af75078fSIntel total_tx_dropped += port->tx_dropped; 1166af75078fSIntel total_rx_nombuf += stats.rx_nombuf; 1167af75078fSIntel 1168af75078fSIntel fwd_port_stats_display(pt_id, &stats); 1169af75078fSIntel } 1170af75078fSIntel printf("\n %s Accumulated forward statistics for all ports" 1171af75078fSIntel "%s\n", 1172af75078fSIntel acc_stats_border, acc_stats_border); 1173af75078fSIntel printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: " 1174af75078fSIntel "%-"PRIu64"\n" 1175af75078fSIntel " TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: " 1176af75078fSIntel "%-"PRIu64"\n", 1177af75078fSIntel total_recv, total_rx_dropped, total_recv + total_rx_dropped, 1178af75078fSIntel total_xmit, total_tx_dropped, total_xmit + total_tx_dropped); 1179af75078fSIntel if (total_rx_nombuf > 0) 1180af75078fSIntel printf(" RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf); 1181af75078fSIntel printf(" %s++++++++++++++++++++++++++++++++++++++++++++++" 1182af75078fSIntel "%s\n", 1183af75078fSIntel acc_stats_border, acc_stats_border); 1184af75078fSIntel #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES 1185af75078fSIntel if (total_recv > 0) 1186af75078fSIntel printf("\n CPU cycles/packet=%u (total cycles=" 1187af75078fSIntel "%"PRIu64" / total RX packets=%"PRIu64")\n", 1188af75078fSIntel (unsigned int)(fwd_cycles / total_recv), 1189af75078fSIntel fwd_cycles, total_recv); 1190af75078fSIntel #endif 1191af75078fSIntel printf("\nDone.\n"); 1192af75078fSIntel test_done = 1; 1193af75078fSIntel } 1194af75078fSIntel 1195cfae07fdSOuyang Changchun void 1196cfae07fdSOuyang Changchun dev_set_link_up(portid_t pid) 1197cfae07fdSOuyang Changchun { 1198cfae07fdSOuyang Changchun if (rte_eth_dev_set_link_up((uint8_t)pid) < 0) 1199cfae07fdSOuyang Changchun printf("\nSet link up fail.\n"); 1200cfae07fdSOuyang Changchun } 1201cfae07fdSOuyang Changchun 1202cfae07fdSOuyang Changchun void 1203cfae07fdSOuyang Changchun dev_set_link_down(portid_t pid) 1204cfae07fdSOuyang Changchun { 1205cfae07fdSOuyang Changchun if (rte_eth_dev_set_link_down((uint8_t)pid) < 0) 1206cfae07fdSOuyang Changchun printf("\nSet link down fail.\n"); 1207cfae07fdSOuyang Changchun } 1208cfae07fdSOuyang Changchun 1209ce8d5614SIntel static int 1210ce8d5614SIntel all_ports_started(void) 1211ce8d5614SIntel { 1212ce8d5614SIntel portid_t pi; 1213ce8d5614SIntel struct rte_port *port; 1214ce8d5614SIntel 1215edab33b1STetsuya Mukawa FOREACH_PORT(pi, ports) { 1216ce8d5614SIntel port = &ports[pi]; 1217ce8d5614SIntel /* Check if there is a port which is not started */ 121841b05095SBernard Iremonger if ((port->port_status != RTE_PORT_STARTED) && 121941b05095SBernard Iremonger (port->slave_flag == 0)) 1220ce8d5614SIntel return 0; 1221ce8d5614SIntel } 1222ce8d5614SIntel 1223ce8d5614SIntel /* No port is not started */ 1224ce8d5614SIntel return 1; 1225ce8d5614SIntel } 1226ce8d5614SIntel 1227148f963fSBruce Richardson int 1228edab33b1STetsuya Mukawa all_ports_stopped(void) 1229edab33b1STetsuya Mukawa { 1230edab33b1STetsuya Mukawa portid_t pi; 1231edab33b1STetsuya Mukawa struct rte_port *port; 1232edab33b1STetsuya Mukawa 1233edab33b1STetsuya Mukawa FOREACH_PORT(pi, ports) { 1234edab33b1STetsuya Mukawa port = &ports[pi]; 123541b05095SBernard Iremonger if ((port->port_status != RTE_PORT_STOPPED) && 123641b05095SBernard Iremonger (port->slave_flag == 0)) 1237edab33b1STetsuya Mukawa return 0; 1238edab33b1STetsuya Mukawa } 1239edab33b1STetsuya Mukawa 1240edab33b1STetsuya Mukawa return 1; 1241edab33b1STetsuya Mukawa } 1242edab33b1STetsuya Mukawa 1243edab33b1STetsuya Mukawa int 1244edab33b1STetsuya Mukawa port_is_started(portid_t port_id) 1245edab33b1STetsuya Mukawa { 1246edab33b1STetsuya Mukawa if (port_id_is_invalid(port_id, ENABLED_WARN)) 1247edab33b1STetsuya Mukawa return 0; 1248edab33b1STetsuya Mukawa 1249edab33b1STetsuya Mukawa if (ports[port_id].port_status != RTE_PORT_STARTED) 1250edab33b1STetsuya Mukawa return 0; 1251edab33b1STetsuya Mukawa 1252edab33b1STetsuya Mukawa return 1; 1253edab33b1STetsuya Mukawa } 1254edab33b1STetsuya Mukawa 1255edab33b1STetsuya Mukawa static int 1256edab33b1STetsuya Mukawa port_is_closed(portid_t port_id) 1257edab33b1STetsuya Mukawa { 1258edab33b1STetsuya Mukawa if (port_id_is_invalid(port_id, ENABLED_WARN)) 1259edab33b1STetsuya Mukawa return 0; 1260edab33b1STetsuya Mukawa 1261edab33b1STetsuya Mukawa if (ports[port_id].port_status != RTE_PORT_CLOSED) 1262edab33b1STetsuya Mukawa return 0; 1263edab33b1STetsuya Mukawa 1264edab33b1STetsuya Mukawa return 1; 1265edab33b1STetsuya Mukawa } 1266edab33b1STetsuya Mukawa 1267edab33b1STetsuya Mukawa int 1268ce8d5614SIntel start_port(portid_t pid) 1269ce8d5614SIntel { 127092d2703eSMichael Qiu int diag, need_check_link_status = -1; 1271ce8d5614SIntel portid_t pi; 1272ce8d5614SIntel queueid_t qi; 1273ce8d5614SIntel struct rte_port *port; 12742950a769SDeclan Doherty struct ether_addr mac_addr; 1275ce8d5614SIntel 1276ce8d5614SIntel if (test_done == 0) { 1277ce8d5614SIntel printf("Please stop forwarding first\n"); 1278148f963fSBruce Richardson return -1; 1279ce8d5614SIntel } 1280ce8d5614SIntel 12814468635fSMichael Qiu if (port_id_is_invalid(pid, ENABLED_WARN)) 12824468635fSMichael Qiu return 0; 12834468635fSMichael Qiu 1284ce8d5614SIntel if (init_fwd_streams() < 0) { 1285ce8d5614SIntel printf("Fail from init_fwd_streams()\n"); 1286148f963fSBruce Richardson return -1; 1287ce8d5614SIntel } 1288ce8d5614SIntel 1289ce8d5614SIntel if(dcb_config) 1290ce8d5614SIntel dcb_test = 1; 1291edab33b1STetsuya Mukawa FOREACH_PORT(pi, ports) { 1292edab33b1STetsuya Mukawa if (pid != pi && pid != (portid_t)RTE_PORT_ALL) 1293ce8d5614SIntel continue; 1294ce8d5614SIntel 129592d2703eSMichael Qiu need_check_link_status = 0; 1296ce8d5614SIntel port = &ports[pi]; 1297ce8d5614SIntel if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED, 1298ce8d5614SIntel RTE_PORT_HANDLING) == 0) { 1299ce8d5614SIntel printf("Port %d is now not stopped\n", pi); 1300ce8d5614SIntel continue; 1301ce8d5614SIntel } 1302ce8d5614SIntel 1303ce8d5614SIntel if (port->need_reconfig > 0) { 1304ce8d5614SIntel port->need_reconfig = 0; 1305ce8d5614SIntel 13065706de65SJulien Cretin printf("Configuring Port %d (socket %u)\n", pi, 130720a0286fSLiu Xiaofeng port->socket_id); 1308ce8d5614SIntel /* configure port */ 1309ce8d5614SIntel diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq, 1310ce8d5614SIntel &(port->dev_conf)); 1311ce8d5614SIntel if (diag != 0) { 1312ce8d5614SIntel if (rte_atomic16_cmpset(&(port->port_status), 1313ce8d5614SIntel RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0) 1314ce8d5614SIntel printf("Port %d can not be set back " 1315ce8d5614SIntel "to stopped\n", pi); 1316ce8d5614SIntel printf("Fail to configure port %d\n", pi); 1317ce8d5614SIntel /* try to reconfigure port next time */ 1318ce8d5614SIntel port->need_reconfig = 1; 1319148f963fSBruce Richardson return -1; 1320ce8d5614SIntel } 1321ce8d5614SIntel } 1322ce8d5614SIntel if (port->need_reconfig_queues > 0) { 1323ce8d5614SIntel port->need_reconfig_queues = 0; 1324ce8d5614SIntel /* setup tx queues */ 1325ce8d5614SIntel for (qi = 0; qi < nb_txq; qi++) { 1326b6ea6408SIntel if ((numa_support) && 1327b6ea6408SIntel (txring_numa[pi] != NUMA_NO_CONFIG)) 1328b6ea6408SIntel diag = rte_eth_tx_queue_setup(pi, qi, 1329b6ea6408SIntel nb_txd,txring_numa[pi], 1330b6ea6408SIntel &(port->tx_conf)); 1331b6ea6408SIntel else 1332b6ea6408SIntel diag = rte_eth_tx_queue_setup(pi, qi, 1333b6ea6408SIntel nb_txd,port->socket_id, 1334b6ea6408SIntel &(port->tx_conf)); 1335b6ea6408SIntel 1336ce8d5614SIntel if (diag == 0) 1337ce8d5614SIntel continue; 1338ce8d5614SIntel 1339ce8d5614SIntel /* Fail to setup tx queue, return */ 1340ce8d5614SIntel if (rte_atomic16_cmpset(&(port->port_status), 1341ce8d5614SIntel RTE_PORT_HANDLING, 1342ce8d5614SIntel RTE_PORT_STOPPED) == 0) 1343ce8d5614SIntel printf("Port %d can not be set back " 1344ce8d5614SIntel "to stopped\n", pi); 1345ce8d5614SIntel printf("Fail to configure port %d tx queues\n", pi); 1346ce8d5614SIntel /* try to reconfigure queues next time */ 1347ce8d5614SIntel port->need_reconfig_queues = 1; 1348148f963fSBruce Richardson return -1; 1349ce8d5614SIntel } 1350ce8d5614SIntel /* setup rx queues */ 1351ce8d5614SIntel for (qi = 0; qi < nb_rxq; qi++) { 1352b6ea6408SIntel if ((numa_support) && 1353b6ea6408SIntel (rxring_numa[pi] != NUMA_NO_CONFIG)) { 1354b6ea6408SIntel struct rte_mempool * mp = 1355b6ea6408SIntel mbuf_pool_find(rxring_numa[pi]); 1356b6ea6408SIntel if (mp == NULL) { 1357b6ea6408SIntel printf("Failed to setup RX queue:" 1358b6ea6408SIntel "No mempool allocation" 1359b6ea6408SIntel "on the socket %d\n", 1360b6ea6408SIntel rxring_numa[pi]); 1361148f963fSBruce Richardson return -1; 1362b6ea6408SIntel } 1363b6ea6408SIntel 1364b6ea6408SIntel diag = rte_eth_rx_queue_setup(pi, qi, 1365b6ea6408SIntel nb_rxd,rxring_numa[pi], 1366b6ea6408SIntel &(port->rx_conf),mp); 1367b6ea6408SIntel } 1368b6ea6408SIntel else 1369b6ea6408SIntel diag = rte_eth_rx_queue_setup(pi, qi, 1370b6ea6408SIntel nb_rxd,port->socket_id, 1371b6ea6408SIntel &(port->rx_conf), 1372ce8d5614SIntel mbuf_pool_find(port->socket_id)); 1373b6ea6408SIntel 1374ce8d5614SIntel if (diag == 0) 1375ce8d5614SIntel continue; 1376ce8d5614SIntel 1377b6ea6408SIntel 1378ce8d5614SIntel /* Fail to setup rx queue, return */ 1379ce8d5614SIntel if (rte_atomic16_cmpset(&(port->port_status), 1380ce8d5614SIntel RTE_PORT_HANDLING, 1381ce8d5614SIntel RTE_PORT_STOPPED) == 0) 1382ce8d5614SIntel printf("Port %d can not be set back " 1383ce8d5614SIntel "to stopped\n", pi); 1384ce8d5614SIntel printf("Fail to configure port %d rx queues\n", pi); 1385ce8d5614SIntel /* try to reconfigure queues next time */ 1386ce8d5614SIntel port->need_reconfig_queues = 1; 1387148f963fSBruce Richardson return -1; 1388ce8d5614SIntel } 1389ce8d5614SIntel } 1390ce8d5614SIntel /* start port */ 1391ce8d5614SIntel if (rte_eth_dev_start(pi) < 0) { 1392ce8d5614SIntel printf("Fail to start port %d\n", pi); 1393ce8d5614SIntel 1394ce8d5614SIntel /* Fail to setup rx queue, return */ 1395ce8d5614SIntel if (rte_atomic16_cmpset(&(port->port_status), 1396ce8d5614SIntel RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0) 1397ce8d5614SIntel printf("Port %d can not be set back to " 1398ce8d5614SIntel "stopped\n", pi); 1399ce8d5614SIntel continue; 1400ce8d5614SIntel } 1401ce8d5614SIntel 1402ce8d5614SIntel if (rte_atomic16_cmpset(&(port->port_status), 1403ce8d5614SIntel RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0) 1404ce8d5614SIntel printf("Port %d can not be set into started\n", pi); 1405ce8d5614SIntel 14062950a769SDeclan Doherty rte_eth_macaddr_get(pi, &mac_addr); 1407d8c89163SZijie Pan printf("Port %d: %02X:%02X:%02X:%02X:%02X:%02X\n", pi, 14082950a769SDeclan Doherty mac_addr.addr_bytes[0], mac_addr.addr_bytes[1], 14092950a769SDeclan Doherty mac_addr.addr_bytes[2], mac_addr.addr_bytes[3], 14102950a769SDeclan Doherty mac_addr.addr_bytes[4], mac_addr.addr_bytes[5]); 1411d8c89163SZijie Pan 1412ce8d5614SIntel /* at least one port started, need checking link status */ 1413ce8d5614SIntel need_check_link_status = 1; 1414ce8d5614SIntel } 1415ce8d5614SIntel 141692d2703eSMichael Qiu if (need_check_link_status == 1 && !no_link_check) 1417edab33b1STetsuya Mukawa check_all_ports_link_status(RTE_PORT_ALL); 141892d2703eSMichael Qiu else if (need_check_link_status == 0) 1419ce8d5614SIntel printf("Please stop the ports first\n"); 1420ce8d5614SIntel 1421ce8d5614SIntel printf("Done\n"); 1422148f963fSBruce Richardson return 0; 1423ce8d5614SIntel } 1424ce8d5614SIntel 1425ce8d5614SIntel void 1426ce8d5614SIntel stop_port(portid_t pid) 1427ce8d5614SIntel { 1428ce8d5614SIntel portid_t pi; 1429ce8d5614SIntel struct rte_port *port; 1430ce8d5614SIntel int need_check_link_status = 0; 1431ce8d5614SIntel 1432ce8d5614SIntel if (test_done == 0) { 1433ce8d5614SIntel printf("Please stop forwarding first\n"); 1434ce8d5614SIntel return; 1435ce8d5614SIntel } 1436ce8d5614SIntel if (dcb_test) { 1437ce8d5614SIntel dcb_test = 0; 1438ce8d5614SIntel dcb_config = 0; 1439ce8d5614SIntel } 14404468635fSMichael Qiu 14414468635fSMichael Qiu if (port_id_is_invalid(pid, ENABLED_WARN)) 14424468635fSMichael Qiu return; 14434468635fSMichael Qiu 1444ce8d5614SIntel printf("Stopping ports...\n"); 1445ce8d5614SIntel 1446edab33b1STetsuya Mukawa FOREACH_PORT(pi, ports) { 14474468635fSMichael Qiu if (pid != pi && pid != (portid_t)RTE_PORT_ALL) 1448ce8d5614SIntel continue; 1449ce8d5614SIntel 1450ce8d5614SIntel port = &ports[pi]; 1451ce8d5614SIntel if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED, 1452ce8d5614SIntel RTE_PORT_HANDLING) == 0) 1453ce8d5614SIntel continue; 1454ce8d5614SIntel 1455ce8d5614SIntel rte_eth_dev_stop(pi); 1456ce8d5614SIntel 1457ce8d5614SIntel if (rte_atomic16_cmpset(&(port->port_status), 1458ce8d5614SIntel RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0) 1459ce8d5614SIntel printf("Port %d can not be set into stopped\n", pi); 1460ce8d5614SIntel need_check_link_status = 1; 1461ce8d5614SIntel } 1462bc202406SDavid Marchand if (need_check_link_status && !no_link_check) 1463edab33b1STetsuya Mukawa check_all_ports_link_status(RTE_PORT_ALL); 1464ce8d5614SIntel 1465ce8d5614SIntel printf("Done\n"); 1466ce8d5614SIntel } 1467ce8d5614SIntel 1468ce8d5614SIntel void 1469ce8d5614SIntel close_port(portid_t pid) 1470ce8d5614SIntel { 1471ce8d5614SIntel portid_t pi; 1472ce8d5614SIntel struct rte_port *port; 1473ce8d5614SIntel 1474ce8d5614SIntel if (test_done == 0) { 1475ce8d5614SIntel printf("Please stop forwarding first\n"); 1476ce8d5614SIntel return; 1477ce8d5614SIntel } 1478ce8d5614SIntel 14794468635fSMichael Qiu if (port_id_is_invalid(pid, ENABLED_WARN)) 14804468635fSMichael Qiu return; 14814468635fSMichael Qiu 1482ce8d5614SIntel printf("Closing ports...\n"); 1483ce8d5614SIntel 1484edab33b1STetsuya Mukawa FOREACH_PORT(pi, ports) { 14854468635fSMichael Qiu if (pid != pi && pid != (portid_t)RTE_PORT_ALL) 1486ce8d5614SIntel continue; 1487ce8d5614SIntel 1488ce8d5614SIntel port = &ports[pi]; 1489ce8d5614SIntel if (rte_atomic16_cmpset(&(port->port_status), 1490d4e8ad64SMichael Qiu RTE_PORT_CLOSED, RTE_PORT_CLOSED) == 1) { 1491d4e8ad64SMichael Qiu printf("Port %d is already closed\n", pi); 1492d4e8ad64SMichael Qiu continue; 1493d4e8ad64SMichael Qiu } 1494d4e8ad64SMichael Qiu 1495d4e8ad64SMichael Qiu if (rte_atomic16_cmpset(&(port->port_status), 1496ce8d5614SIntel RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) { 1497ce8d5614SIntel printf("Port %d is now not stopped\n", pi); 1498ce8d5614SIntel continue; 1499ce8d5614SIntel } 1500ce8d5614SIntel 1501ce8d5614SIntel rte_eth_dev_close(pi); 1502ce8d5614SIntel 1503ce8d5614SIntel if (rte_atomic16_cmpset(&(port->port_status), 1504ce8d5614SIntel RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0) 1505ce8d5614SIntel printf("Port %d can not be set into stopped\n", pi); 1506ce8d5614SIntel } 1507ce8d5614SIntel 1508ce8d5614SIntel printf("Done\n"); 1509ce8d5614SIntel } 1510ce8d5614SIntel 1511edab33b1STetsuya Mukawa void 1512edab33b1STetsuya Mukawa attach_port(char *identifier) 1513ce8d5614SIntel { 1514edab33b1STetsuya Mukawa portid_t i, j, pi = 0; 1515ce8d5614SIntel 1516edab33b1STetsuya Mukawa printf("Attaching a new port...\n"); 1517edab33b1STetsuya Mukawa 1518edab33b1STetsuya Mukawa if (identifier == NULL) { 1519edab33b1STetsuya Mukawa printf("Invalid parameters are specified\n"); 1520edab33b1STetsuya Mukawa return; 1521ce8d5614SIntel } 1522ce8d5614SIntel 1523edab33b1STetsuya Mukawa if (test_done == 0) { 1524edab33b1STetsuya Mukawa printf("Please stop forwarding first\n"); 1525edab33b1STetsuya Mukawa return; 1526ce8d5614SIntel } 1527ce8d5614SIntel 1528edab33b1STetsuya Mukawa if (rte_eth_dev_attach(identifier, &pi)) 1529edab33b1STetsuya Mukawa return; 1530edab33b1STetsuya Mukawa 1531edab33b1STetsuya Mukawa ports[pi].enabled = 1; 1532edab33b1STetsuya Mukawa reconfig(pi, rte_eth_dev_socket_id(pi)); 1533edab33b1STetsuya Mukawa rte_eth_promiscuous_enable(pi); 1534edab33b1STetsuya Mukawa 1535edab33b1STetsuya Mukawa nb_ports = rte_eth_dev_count(); 1536edab33b1STetsuya Mukawa 1537edab33b1STetsuya Mukawa /* set_default_fwd_ports_config(); */ 15386c76533cSStephen Hemminger memset(fwd_ports_ids, 0, sizeof(fwd_ports_ids)); 1539edab33b1STetsuya Mukawa i = 0; 1540edab33b1STetsuya Mukawa FOREACH_PORT(j, ports) { 1541edab33b1STetsuya Mukawa fwd_ports_ids[i] = j; 1542edab33b1STetsuya Mukawa i++; 1543edab33b1STetsuya Mukawa } 1544edab33b1STetsuya Mukawa nb_cfg_ports = nb_ports; 1545edab33b1STetsuya Mukawa nb_fwd_ports++; 1546edab33b1STetsuya Mukawa 1547edab33b1STetsuya Mukawa ports[pi].port_status = RTE_PORT_STOPPED; 1548edab33b1STetsuya Mukawa 1549edab33b1STetsuya Mukawa printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports); 1550edab33b1STetsuya Mukawa printf("Done\n"); 1551edab33b1STetsuya Mukawa } 1552edab33b1STetsuya Mukawa 1553edab33b1STetsuya Mukawa void 1554edab33b1STetsuya Mukawa detach_port(uint8_t port_id) 15555f4ec54fSChen Jing D(Mark) { 1556edab33b1STetsuya Mukawa portid_t i, pi = 0; 1557edab33b1STetsuya Mukawa char name[RTE_ETH_NAME_MAX_LEN]; 15585f4ec54fSChen Jing D(Mark) 1559edab33b1STetsuya Mukawa printf("Detaching a port...\n"); 15605f4ec54fSChen Jing D(Mark) 1561edab33b1STetsuya Mukawa if (!port_is_closed(port_id)) { 1562edab33b1STetsuya Mukawa printf("Please close port first\n"); 1563edab33b1STetsuya Mukawa return; 1564edab33b1STetsuya Mukawa } 1565edab33b1STetsuya Mukawa 1566edab33b1STetsuya Mukawa if (rte_eth_dev_detach(port_id, name)) 1567edab33b1STetsuya Mukawa return; 1568edab33b1STetsuya Mukawa 1569edab33b1STetsuya Mukawa ports[port_id].enabled = 0; 1570edab33b1STetsuya Mukawa nb_ports = rte_eth_dev_count(); 1571edab33b1STetsuya Mukawa 1572edab33b1STetsuya Mukawa /* set_default_fwd_ports_config(); */ 15736c76533cSStephen Hemminger memset(fwd_ports_ids, 0, sizeof(fwd_ports_ids)); 1574edab33b1STetsuya Mukawa i = 0; 1575edab33b1STetsuya Mukawa FOREACH_PORT(pi, ports) { 1576edab33b1STetsuya Mukawa fwd_ports_ids[i] = pi; 1577edab33b1STetsuya Mukawa i++; 1578edab33b1STetsuya Mukawa } 1579edab33b1STetsuya Mukawa nb_cfg_ports = nb_ports; 1580edab33b1STetsuya Mukawa nb_fwd_ports--; 1581edab33b1STetsuya Mukawa 1582edab33b1STetsuya Mukawa printf("Port '%s' is detached. Now total ports is %d\n", 1583edab33b1STetsuya Mukawa name, nb_ports); 1584edab33b1STetsuya Mukawa printf("Done\n"); 1585edab33b1STetsuya Mukawa return; 15865f4ec54fSChen Jing D(Mark) } 15875f4ec54fSChen Jing D(Mark) 1588af75078fSIntel void 1589af75078fSIntel pmd_test_exit(void) 1590af75078fSIntel { 1591af75078fSIntel portid_t pt_id; 1592af75078fSIntel 15938210ec25SPablo de Lara if (test_done == 0) 15948210ec25SPablo de Lara stop_packet_forwarding(); 15958210ec25SPablo de Lara 1596d3a274ceSZhihong Wang if (ports != NULL) { 1597d3a274ceSZhihong Wang no_link_check = 1; 1598edab33b1STetsuya Mukawa FOREACH_PORT(pt_id, ports) { 1599d3a274ceSZhihong Wang printf("\nShutting down port %d...\n", pt_id); 1600af75078fSIntel fflush(stdout); 1601d3a274ceSZhihong Wang stop_port(pt_id); 1602d3a274ceSZhihong Wang close_port(pt_id); 1603af75078fSIntel } 1604d3a274ceSZhihong Wang } 1605d3a274ceSZhihong Wang printf("\nBye...\n"); 1606af75078fSIntel } 1607af75078fSIntel 1608af75078fSIntel typedef void (*cmd_func_t)(void); 1609af75078fSIntel struct pmd_test_command { 1610af75078fSIntel const char *cmd_name; 1611af75078fSIntel cmd_func_t cmd_func; 1612af75078fSIntel }; 1613af75078fSIntel 1614af75078fSIntel #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0])) 1615af75078fSIntel 1616ce8d5614SIntel /* Check the link status of all ports in up to 9s, and print them finally */ 1617af75078fSIntel static void 1618edab33b1STetsuya Mukawa check_all_ports_link_status(uint32_t port_mask) 1619af75078fSIntel { 1620ce8d5614SIntel #define CHECK_INTERVAL 100 /* 100ms */ 1621ce8d5614SIntel #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */ 1622ce8d5614SIntel uint8_t portid, count, all_ports_up, print_flag = 0; 1623ce8d5614SIntel struct rte_eth_link link; 1624ce8d5614SIntel 1625ce8d5614SIntel printf("Checking link statuses...\n"); 1626ce8d5614SIntel fflush(stdout); 1627ce8d5614SIntel for (count = 0; count <= MAX_CHECK_TIME; count++) { 1628ce8d5614SIntel all_ports_up = 1; 1629edab33b1STetsuya Mukawa FOREACH_PORT(portid, ports) { 1630ce8d5614SIntel if ((port_mask & (1 << portid)) == 0) 1631ce8d5614SIntel continue; 1632ce8d5614SIntel memset(&link, 0, sizeof(link)); 1633ce8d5614SIntel rte_eth_link_get_nowait(portid, &link); 1634ce8d5614SIntel /* print link status if flag set */ 1635ce8d5614SIntel if (print_flag == 1) { 1636ce8d5614SIntel if (link.link_status) 1637ce8d5614SIntel printf("Port %d Link Up - speed %u " 1638ce8d5614SIntel "Mbps - %s\n", (uint8_t)portid, 1639ce8d5614SIntel (unsigned)link.link_speed, 1640ce8d5614SIntel (link.link_duplex == ETH_LINK_FULL_DUPLEX) ? 1641ce8d5614SIntel ("full-duplex") : ("half-duplex\n")); 1642ce8d5614SIntel else 1643ce8d5614SIntel printf("Port %d Link Down\n", 1644ce8d5614SIntel (uint8_t)portid); 1645ce8d5614SIntel continue; 1646ce8d5614SIntel } 1647ce8d5614SIntel /* clear all_ports_up flag if any link down */ 164809419f23SThomas Monjalon if (link.link_status == ETH_LINK_DOWN) { 1649ce8d5614SIntel all_ports_up = 0; 1650ce8d5614SIntel break; 1651ce8d5614SIntel } 1652ce8d5614SIntel } 1653ce8d5614SIntel /* after finally printing all link status, get out */ 1654ce8d5614SIntel if (print_flag == 1) 1655ce8d5614SIntel break; 1656ce8d5614SIntel 1657ce8d5614SIntel if (all_ports_up == 0) { 1658ce8d5614SIntel fflush(stdout); 1659ce8d5614SIntel rte_delay_ms(CHECK_INTERVAL); 1660ce8d5614SIntel } 1661ce8d5614SIntel 1662ce8d5614SIntel /* set the print_flag if all ports up or timeout */ 1663ce8d5614SIntel if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) { 1664ce8d5614SIntel print_flag = 1; 1665ce8d5614SIntel } 1666ce8d5614SIntel } 1667af75078fSIntel } 1668af75078fSIntel 1669013af9b6SIntel static int 1670013af9b6SIntel set_tx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port) 1671af75078fSIntel { 1672013af9b6SIntel uint16_t i; 1673af75078fSIntel int diag; 1674013af9b6SIntel uint8_t mapping_found = 0; 1675af75078fSIntel 1676013af9b6SIntel for (i = 0; i < nb_tx_queue_stats_mappings; i++) { 1677013af9b6SIntel if ((tx_queue_stats_mappings[i].port_id == port_id) && 1678013af9b6SIntel (tx_queue_stats_mappings[i].queue_id < nb_txq )) { 1679013af9b6SIntel diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id, 1680013af9b6SIntel tx_queue_stats_mappings[i].queue_id, 1681013af9b6SIntel tx_queue_stats_mappings[i].stats_counter_id); 1682013af9b6SIntel if (diag != 0) 1683013af9b6SIntel return diag; 1684013af9b6SIntel mapping_found = 1; 1685af75078fSIntel } 1686013af9b6SIntel } 1687013af9b6SIntel if (mapping_found) 1688013af9b6SIntel port->tx_queue_stats_mapping_enabled = 1; 1689013af9b6SIntel return 0; 1690013af9b6SIntel } 1691013af9b6SIntel 1692013af9b6SIntel static int 1693013af9b6SIntel set_rx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port) 1694013af9b6SIntel { 1695013af9b6SIntel uint16_t i; 1696013af9b6SIntel int diag; 1697013af9b6SIntel uint8_t mapping_found = 0; 1698013af9b6SIntel 1699013af9b6SIntel for (i = 0; i < nb_rx_queue_stats_mappings; i++) { 1700013af9b6SIntel if ((rx_queue_stats_mappings[i].port_id == port_id) && 1701013af9b6SIntel (rx_queue_stats_mappings[i].queue_id < nb_rxq )) { 1702013af9b6SIntel diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id, 1703013af9b6SIntel rx_queue_stats_mappings[i].queue_id, 1704013af9b6SIntel rx_queue_stats_mappings[i].stats_counter_id); 1705013af9b6SIntel if (diag != 0) 1706013af9b6SIntel return diag; 1707013af9b6SIntel mapping_found = 1; 1708013af9b6SIntel } 1709013af9b6SIntel } 1710013af9b6SIntel if (mapping_found) 1711013af9b6SIntel port->rx_queue_stats_mapping_enabled = 1; 1712013af9b6SIntel return 0; 1713013af9b6SIntel } 1714013af9b6SIntel 1715013af9b6SIntel static void 1716013af9b6SIntel map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port) 1717013af9b6SIntel { 1718013af9b6SIntel int diag = 0; 1719013af9b6SIntel 1720013af9b6SIntel diag = set_tx_queue_stats_mapping_registers(pi, port); 1721af75078fSIntel if (diag != 0) { 1722013af9b6SIntel if (diag == -ENOTSUP) { 1723013af9b6SIntel port->tx_queue_stats_mapping_enabled = 0; 1724013af9b6SIntel printf("TX queue stats mapping not supported port id=%d\n", pi); 1725013af9b6SIntel } 1726013af9b6SIntel else 1727013af9b6SIntel rte_exit(EXIT_FAILURE, 1728013af9b6SIntel "set_tx_queue_stats_mapping_registers " 1729013af9b6SIntel "failed for port id=%d diag=%d\n", 1730af75078fSIntel pi, diag); 1731af75078fSIntel } 1732013af9b6SIntel 1733013af9b6SIntel diag = set_rx_queue_stats_mapping_registers(pi, port); 1734af75078fSIntel if (diag != 0) { 1735013af9b6SIntel if (diag == -ENOTSUP) { 1736013af9b6SIntel port->rx_queue_stats_mapping_enabled = 0; 1737013af9b6SIntel printf("RX queue stats mapping not supported port id=%d\n", pi); 1738013af9b6SIntel } 1739013af9b6SIntel else 1740013af9b6SIntel rte_exit(EXIT_FAILURE, 1741013af9b6SIntel "set_rx_queue_stats_mapping_registers " 1742013af9b6SIntel "failed for port id=%d diag=%d\n", 1743af75078fSIntel pi, diag); 1744af75078fSIntel } 1745af75078fSIntel } 1746af75078fSIntel 1747f2c5125aSPablo de Lara static void 1748f2c5125aSPablo de Lara rxtx_port_config(struct rte_port *port) 1749f2c5125aSPablo de Lara { 1750f2c5125aSPablo de Lara port->rx_conf = port->dev_info.default_rxconf; 1751f2c5125aSPablo de Lara port->tx_conf = port->dev_info.default_txconf; 1752f2c5125aSPablo de Lara 1753f2c5125aSPablo de Lara /* Check if any RX/TX parameters have been passed */ 1754f2c5125aSPablo de Lara if (rx_pthresh != RTE_PMD_PARAM_UNSET) 1755f2c5125aSPablo de Lara port->rx_conf.rx_thresh.pthresh = rx_pthresh; 1756f2c5125aSPablo de Lara 1757f2c5125aSPablo de Lara if (rx_hthresh != RTE_PMD_PARAM_UNSET) 1758f2c5125aSPablo de Lara port->rx_conf.rx_thresh.hthresh = rx_hthresh; 1759f2c5125aSPablo de Lara 1760f2c5125aSPablo de Lara if (rx_wthresh != RTE_PMD_PARAM_UNSET) 1761f2c5125aSPablo de Lara port->rx_conf.rx_thresh.wthresh = rx_wthresh; 1762f2c5125aSPablo de Lara 1763f2c5125aSPablo de Lara if (rx_free_thresh != RTE_PMD_PARAM_UNSET) 1764f2c5125aSPablo de Lara port->rx_conf.rx_free_thresh = rx_free_thresh; 1765f2c5125aSPablo de Lara 1766f2c5125aSPablo de Lara if (rx_drop_en != RTE_PMD_PARAM_UNSET) 1767f2c5125aSPablo de Lara port->rx_conf.rx_drop_en = rx_drop_en; 1768f2c5125aSPablo de Lara 1769f2c5125aSPablo de Lara if (tx_pthresh != RTE_PMD_PARAM_UNSET) 1770f2c5125aSPablo de Lara port->tx_conf.tx_thresh.pthresh = tx_pthresh; 1771f2c5125aSPablo de Lara 1772f2c5125aSPablo de Lara if (tx_hthresh != RTE_PMD_PARAM_UNSET) 1773f2c5125aSPablo de Lara port->tx_conf.tx_thresh.hthresh = tx_hthresh; 1774f2c5125aSPablo de Lara 1775f2c5125aSPablo de Lara if (tx_wthresh != RTE_PMD_PARAM_UNSET) 1776f2c5125aSPablo de Lara port->tx_conf.tx_thresh.wthresh = tx_wthresh; 1777f2c5125aSPablo de Lara 1778f2c5125aSPablo de Lara if (tx_rs_thresh != RTE_PMD_PARAM_UNSET) 1779f2c5125aSPablo de Lara port->tx_conf.tx_rs_thresh = tx_rs_thresh; 1780f2c5125aSPablo de Lara 1781f2c5125aSPablo de Lara if (tx_free_thresh != RTE_PMD_PARAM_UNSET) 1782f2c5125aSPablo de Lara port->tx_conf.tx_free_thresh = tx_free_thresh; 1783f2c5125aSPablo de Lara 1784f2c5125aSPablo de Lara if (txq_flags != RTE_PMD_PARAM_UNSET) 1785f2c5125aSPablo de Lara port->tx_conf.txq_flags = txq_flags; 1786f2c5125aSPablo de Lara } 1787f2c5125aSPablo de Lara 1788013af9b6SIntel void 1789013af9b6SIntel init_port_config(void) 1790013af9b6SIntel { 1791013af9b6SIntel portid_t pid; 1792013af9b6SIntel struct rte_port *port; 1793013af9b6SIntel 1794edab33b1STetsuya Mukawa FOREACH_PORT(pid, ports) { 1795013af9b6SIntel port = &ports[pid]; 1796013af9b6SIntel port->dev_conf.rxmode = rx_mode; 1797013af9b6SIntel port->dev_conf.fdir_conf = fdir_conf; 17983ce690d3SBruce Richardson if (nb_rxq > 1) { 1799013af9b6SIntel port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL; 1800013af9b6SIntel port->dev_conf.rx_adv_conf.rss_conf.rss_hf = rss_hf; 1801af75078fSIntel } else { 1802013af9b6SIntel port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL; 1803013af9b6SIntel port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0; 1804af75078fSIntel } 18053ce690d3SBruce Richardson 18063ce690d3SBruce Richardson if (port->dcb_flag == 0 && port->dev_info.max_vfs == 0) { 18073ce690d3SBruce Richardson if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0) 18083ce690d3SBruce Richardson port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_RSS; 18093ce690d3SBruce Richardson else 18103ce690d3SBruce Richardson port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE; 18113ce690d3SBruce Richardson } 18123ce690d3SBruce Richardson 1813a30979f6SOuyang Changchun if (port->dev_info.max_vfs != 0) { 1814a30979f6SOuyang Changchun if (port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0) 1815a30979f6SOuyang Changchun port->dev_conf.rxmode.mq_mode = 1816a30979f6SOuyang Changchun ETH_MQ_RX_VMDQ_RSS; 1817a30979f6SOuyang Changchun else 1818a30979f6SOuyang Changchun port->dev_conf.rxmode.mq_mode = 1819a30979f6SOuyang Changchun ETH_MQ_RX_NONE; 1820a30979f6SOuyang Changchun 1821a30979f6SOuyang Changchun port->dev_conf.txmode.mq_mode = ETH_MQ_TX_NONE; 1822a30979f6SOuyang Changchun } 1823a30979f6SOuyang Changchun 1824f2c5125aSPablo de Lara rxtx_port_config(port); 1825013af9b6SIntel 1826013af9b6SIntel rte_eth_macaddr_get(pid, &port->eth_addr); 1827013af9b6SIntel 1828013af9b6SIntel map_port_queue_stats_mapping_registers(pid, port); 18297b7e5ba7SIntel #ifdef RTE_NIC_BYPASS 18307b7e5ba7SIntel rte_eth_dev_bypass_init(pid); 18317b7e5ba7SIntel #endif 1832013af9b6SIntel } 1833013af9b6SIntel } 1834013af9b6SIntel 183541b05095SBernard Iremonger void set_port_slave_flag(portid_t slave_pid) 183641b05095SBernard Iremonger { 183741b05095SBernard Iremonger struct rte_port *port; 183841b05095SBernard Iremonger 183941b05095SBernard Iremonger port = &ports[slave_pid]; 184041b05095SBernard Iremonger port->slave_flag = 1; 184141b05095SBernard Iremonger } 184241b05095SBernard Iremonger 184341b05095SBernard Iremonger void clear_port_slave_flag(portid_t slave_pid) 184441b05095SBernard Iremonger { 184541b05095SBernard Iremonger struct rte_port *port; 184641b05095SBernard Iremonger 184741b05095SBernard Iremonger port = &ports[slave_pid]; 184841b05095SBernard Iremonger port->slave_flag = 0; 184941b05095SBernard Iremonger } 185041b05095SBernard Iremonger 1851013af9b6SIntel const uint16_t vlan_tags[] = { 1852013af9b6SIntel 0, 1, 2, 3, 4, 5, 6, 7, 1853013af9b6SIntel 8, 9, 10, 11, 12, 13, 14, 15, 1854013af9b6SIntel 16, 17, 18, 19, 20, 21, 22, 23, 1855013af9b6SIntel 24, 25, 26, 27, 28, 29, 30, 31 1856013af9b6SIntel }; 1857013af9b6SIntel 1858013af9b6SIntel static int 18591a572499SJingjing Wu get_eth_dcb_conf(struct rte_eth_conf *eth_conf, 18601a572499SJingjing Wu enum dcb_mode_enable dcb_mode, 18611a572499SJingjing Wu enum rte_eth_nb_tcs num_tcs, 18621a572499SJingjing Wu uint8_t pfc_en) 1863013af9b6SIntel { 1864013af9b6SIntel uint8_t i; 1865af75078fSIntel 1866af75078fSIntel /* 1867013af9b6SIntel * Builds up the correct configuration for dcb+vt based on the vlan tags array 1868013af9b6SIntel * given above, and the number of traffic classes available for use. 1869af75078fSIntel */ 18701a572499SJingjing Wu if (dcb_mode == DCB_VT_ENABLED) { 18711a572499SJingjing Wu struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf = 18721a572499SJingjing Wu ð_conf->rx_adv_conf.vmdq_dcb_conf; 18731a572499SJingjing Wu struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf = 18741a572499SJingjing Wu ð_conf->tx_adv_conf.vmdq_dcb_tx_conf; 1875013af9b6SIntel 1876013af9b6SIntel /* VMDQ+DCB RX and TX configrations */ 18771a572499SJingjing Wu vmdq_rx_conf->enable_default_pool = 0; 18781a572499SJingjing Wu vmdq_rx_conf->default_pool = 0; 18791a572499SJingjing Wu vmdq_rx_conf->nb_queue_pools = 18801a572499SJingjing Wu (num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS); 18811a572499SJingjing Wu vmdq_tx_conf->nb_queue_pools = 18821a572499SJingjing Wu (num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS); 1883013af9b6SIntel 18841a572499SJingjing Wu vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools; 18851a572499SJingjing Wu for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) { 18861a572499SJingjing Wu vmdq_rx_conf->pool_map[i].vlan_id = vlan_tags[i]; 18871a572499SJingjing Wu vmdq_rx_conf->pool_map[i].pools = 18881a572499SJingjing Wu 1 << (i % vmdq_rx_conf->nb_queue_pools); 1889af75078fSIntel } 1890013af9b6SIntel for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) { 18911a572499SJingjing Wu vmdq_rx_conf->dcb_tc[i] = i; 18921a572499SJingjing Wu vmdq_tx_conf->dcb_tc[i] = i; 1893013af9b6SIntel } 1894013af9b6SIntel 1895013af9b6SIntel /* set DCB mode of RX and TX of multiple queues */ 189632e7aa0bSIntel eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB; 189732e7aa0bSIntel eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB; 18981a572499SJingjing Wu } else { 18991a572499SJingjing Wu struct rte_eth_dcb_rx_conf *rx_conf = 19001a572499SJingjing Wu ð_conf->rx_adv_conf.dcb_rx_conf; 19011a572499SJingjing Wu struct rte_eth_dcb_tx_conf *tx_conf = 19021a572499SJingjing Wu ð_conf->tx_adv_conf.dcb_tx_conf; 1903013af9b6SIntel 19041a572499SJingjing Wu rx_conf->nb_tcs = num_tcs; 19051a572499SJingjing Wu tx_conf->nb_tcs = num_tcs; 19061a572499SJingjing Wu 19071a572499SJingjing Wu for (i = 0; i < num_tcs; i++) { 19081a572499SJingjing Wu rx_conf->dcb_tc[i] = i; 19091a572499SJingjing Wu tx_conf->dcb_tc[i] = i; 1910013af9b6SIntel } 19111a572499SJingjing Wu eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB_RSS; 19121a572499SJingjing Wu eth_conf->rx_adv_conf.rss_conf.rss_hf = rss_hf; 191332e7aa0bSIntel eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB; 19141a572499SJingjing Wu } 19151a572499SJingjing Wu 19161a572499SJingjing Wu if (pfc_en) 19171a572499SJingjing Wu eth_conf->dcb_capability_en = 19181a572499SJingjing Wu ETH_DCB_PG_SUPPORT | ETH_DCB_PFC_SUPPORT; 1919013af9b6SIntel else 1920013af9b6SIntel eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT; 1921013af9b6SIntel 1922013af9b6SIntel return 0; 1923013af9b6SIntel } 1924013af9b6SIntel 1925013af9b6SIntel int 19261a572499SJingjing Wu init_port_dcb_config(portid_t pid, 19271a572499SJingjing Wu enum dcb_mode_enable dcb_mode, 19281a572499SJingjing Wu enum rte_eth_nb_tcs num_tcs, 19291a572499SJingjing Wu uint8_t pfc_en) 1930013af9b6SIntel { 1931013af9b6SIntel struct rte_eth_conf port_conf; 19321a572499SJingjing Wu struct rte_eth_dev_info dev_info; 1933013af9b6SIntel struct rte_port *rte_port; 1934013af9b6SIntel int retval; 1935013af9b6SIntel uint16_t i; 1936013af9b6SIntel 19371a572499SJingjing Wu rte_eth_dev_info_get(pid, &dev_info); 19381a572499SJingjing Wu 19391a572499SJingjing Wu /* If dev_info.vmdq_pool_base is greater than 0, 19401a572499SJingjing Wu * the queue id of vmdq pools is started after pf queues. 19411a572499SJingjing Wu */ 19421a572499SJingjing Wu if (dcb_mode == DCB_VT_ENABLED && dev_info.vmdq_pool_base > 0) { 19431a572499SJingjing Wu printf("VMDQ_DCB multi-queue mode is nonsensical" 19441a572499SJingjing Wu " for port %d.", pid); 19451a572499SJingjing Wu return -1; 19461a572499SJingjing Wu } 19471a572499SJingjing Wu 19481a572499SJingjing Wu /* Assume the ports in testpmd have the same dcb capability 19491a572499SJingjing Wu * and has the same number of rxq and txq in dcb mode 19501a572499SJingjing Wu */ 19511a572499SJingjing Wu if (dcb_mode == DCB_VT_ENABLED) { 19521a572499SJingjing Wu nb_rxq = dev_info.max_rx_queues; 19531a572499SJingjing Wu nb_txq = dev_info.max_tx_queues; 19541a572499SJingjing Wu } else { 19551a572499SJingjing Wu /*if vt is disabled, use all pf queues */ 19561a572499SJingjing Wu if (dev_info.vmdq_pool_base == 0) { 19571a572499SJingjing Wu nb_rxq = dev_info.max_rx_queues; 19581a572499SJingjing Wu nb_txq = dev_info.max_tx_queues; 19591a572499SJingjing Wu } else { 19601a572499SJingjing Wu nb_rxq = (queueid_t)num_tcs; 19611a572499SJingjing Wu nb_txq = (queueid_t)num_tcs; 19621a572499SJingjing Wu 19631a572499SJingjing Wu } 19641a572499SJingjing Wu } 1965013af9b6SIntel rx_free_thresh = 64; 1966013af9b6SIntel 1967013af9b6SIntel memset(&port_conf, 0, sizeof(struct rte_eth_conf)); 1968013af9b6SIntel /* Enter DCB configuration status */ 1969013af9b6SIntel dcb_config = 1; 1970013af9b6SIntel 1971013af9b6SIntel /*set configuration of DCB in vt mode and DCB in non-vt mode*/ 19721a572499SJingjing Wu retval = get_eth_dcb_conf(&port_conf, dcb_mode, num_tcs, pfc_en); 1973013af9b6SIntel if (retval < 0) 1974013af9b6SIntel return retval; 1975013af9b6SIntel 1976013af9b6SIntel rte_port = &ports[pid]; 1977013af9b6SIntel memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf)); 1978013af9b6SIntel 1979f2c5125aSPablo de Lara rxtx_port_config(rte_port); 1980013af9b6SIntel /* VLAN filter */ 1981013af9b6SIntel rte_port->dev_conf.rxmode.hw_vlan_filter = 1; 19821a572499SJingjing Wu for (i = 0; i < RTE_DIM(vlan_tags); i++) 1983013af9b6SIntel rx_vft_set(pid, vlan_tags[i], 1); 1984013af9b6SIntel 1985013af9b6SIntel rte_eth_macaddr_get(pid, &rte_port->eth_addr); 1986013af9b6SIntel map_port_queue_stats_mapping_registers(pid, rte_port); 1987013af9b6SIntel 19887741e4cfSIntel rte_port->dcb_flag = 1; 19897741e4cfSIntel 1990013af9b6SIntel return 0; 1991af75078fSIntel } 1992af75078fSIntel 1993ffc468ffSTetsuya Mukawa static void 1994ffc468ffSTetsuya Mukawa init_port(void) 1995ffc468ffSTetsuya Mukawa { 1996ffc468ffSTetsuya Mukawa portid_t pid; 1997ffc468ffSTetsuya Mukawa 1998ffc468ffSTetsuya Mukawa /* Configuration of Ethernet ports. */ 1999ffc468ffSTetsuya Mukawa ports = rte_zmalloc("testpmd: ports", 2000ffc468ffSTetsuya Mukawa sizeof(struct rte_port) * RTE_MAX_ETHPORTS, 2001ffc468ffSTetsuya Mukawa RTE_CACHE_LINE_SIZE); 2002ffc468ffSTetsuya Mukawa if (ports == NULL) { 2003ffc468ffSTetsuya Mukawa rte_exit(EXIT_FAILURE, 2004ffc468ffSTetsuya Mukawa "rte_zmalloc(%d struct rte_port) failed\n", 2005ffc468ffSTetsuya Mukawa RTE_MAX_ETHPORTS); 2006ffc468ffSTetsuya Mukawa } 2007ffc468ffSTetsuya Mukawa 2008ffc468ffSTetsuya Mukawa /* enabled allocated ports */ 2009ffc468ffSTetsuya Mukawa for (pid = 0; pid < nb_ports; pid++) 2010ffc468ffSTetsuya Mukawa ports[pid].enabled = 1; 2011ffc468ffSTetsuya Mukawa } 2012ffc468ffSTetsuya Mukawa 2013d3a274ceSZhihong Wang static void 2014d3a274ceSZhihong Wang force_quit(void) 2015d3a274ceSZhihong Wang { 2016d3a274ceSZhihong Wang pmd_test_exit(); 2017d3a274ceSZhihong Wang prompt_exit(); 2018d3a274ceSZhihong Wang } 2019d3a274ceSZhihong Wang 2020d3a274ceSZhihong Wang static void 2021d3a274ceSZhihong Wang signal_handler(int signum) 2022d3a274ceSZhihong Wang { 2023d3a274ceSZhihong Wang if (signum == SIGINT || signum == SIGTERM) { 2024d3a274ceSZhihong Wang printf("\nSignal %d received, preparing to exit...\n", 2025d3a274ceSZhihong Wang signum); 2026d3a274ceSZhihong Wang force_quit(); 2027d3a274ceSZhihong Wang /* exit with the expected status */ 2028d3a274ceSZhihong Wang signal(signum, SIG_DFL); 2029d3a274ceSZhihong Wang kill(getpid(), signum); 2030d3a274ceSZhihong Wang } 2031d3a274ceSZhihong Wang } 2032d3a274ceSZhihong Wang 2033af75078fSIntel int 2034af75078fSIntel main(int argc, char** argv) 2035af75078fSIntel { 2036af75078fSIntel int diag; 2037013af9b6SIntel uint8_t port_id; 2038af75078fSIntel 2039d3a274ceSZhihong Wang signal(SIGINT, signal_handler); 2040d3a274ceSZhihong Wang signal(SIGTERM, signal_handler); 2041d3a274ceSZhihong Wang 2042af75078fSIntel diag = rte_eal_init(argc, argv); 2043af75078fSIntel if (diag < 0) 2044af75078fSIntel rte_panic("Cannot init EAL\n"); 2045af75078fSIntel 2046af75078fSIntel nb_ports = (portid_t) rte_eth_dev_count(); 2047af75078fSIntel if (nb_ports == 0) 2048edab33b1STetsuya Mukawa RTE_LOG(WARNING, EAL, "No probed ethernet devices\n"); 2049af75078fSIntel 2050ffc468ffSTetsuya Mukawa /* allocate port structures, and init them */ 2051ffc468ffSTetsuya Mukawa init_port(); 2052ffc468ffSTetsuya Mukawa 2053af75078fSIntel set_def_fwd_config(); 2054af75078fSIntel if (nb_lcores == 0) 2055af75078fSIntel rte_panic("Empty set of forwarding logical cores - check the " 2056af75078fSIntel "core mask supplied in the command parameters\n"); 2057af75078fSIntel 2058af75078fSIntel argc -= diag; 2059af75078fSIntel argv += diag; 2060af75078fSIntel if (argc > 1) 2061af75078fSIntel launch_args_parse(argc, argv); 2062af75078fSIntel 20635a8fb55cSReshma Pattan if (!nb_rxq && !nb_txq) 20645a8fb55cSReshma Pattan printf("Warning: Either rx or tx queues should be non-zero\n"); 20655a8fb55cSReshma Pattan 20665a8fb55cSReshma Pattan if (nb_rxq > 1 && nb_rxq > nb_txq) 2067af75078fSIntel printf("Warning: nb_rxq=%d enables RSS configuration, " 2068af75078fSIntel "but nb_txq=%d will prevent to fully test it.\n", 2069af75078fSIntel nb_rxq, nb_txq); 2070af75078fSIntel 2071af75078fSIntel init_config(); 2072148f963fSBruce Richardson if (start_port(RTE_PORT_ALL) != 0) 2073148f963fSBruce Richardson rte_exit(EXIT_FAILURE, "Start ports failed\n"); 2074af75078fSIntel 2075ce8d5614SIntel /* set all ports to promiscuous mode by default */ 2076edab33b1STetsuya Mukawa FOREACH_PORT(port_id, ports) 2077ce8d5614SIntel rte_eth_promiscuous_enable(port_id); 2078af75078fSIntel 20790d56cb81SThomas Monjalon #ifdef RTE_LIBRTE_CMDLINE 2080ca7feb22SCyril Chemparathy if (interactive == 1) { 2081ca7feb22SCyril Chemparathy if (auto_start) { 2082ca7feb22SCyril Chemparathy printf("Start automatic packet forwarding\n"); 2083ca7feb22SCyril Chemparathy start_packet_forwarding(0); 2084ca7feb22SCyril Chemparathy } 2085af75078fSIntel prompt(); 2086ca7feb22SCyril Chemparathy } else 20870d56cb81SThomas Monjalon #endif 20880d56cb81SThomas Monjalon { 2089af75078fSIntel char c; 2090af75078fSIntel int rc; 2091af75078fSIntel 2092af75078fSIntel printf("No commandline core given, start packet forwarding\n"); 2093af75078fSIntel start_packet_forwarding(0); 2094af75078fSIntel printf("Press enter to exit\n"); 2095af75078fSIntel rc = read(0, &c, 1); 2096d3a274ceSZhihong Wang pmd_test_exit(); 2097af75078fSIntel if (rc < 0) 2098af75078fSIntel return 1; 2099af75078fSIntel } 2100af75078fSIntel 2101af75078fSIntel return 0; 2102af75078fSIntel } 2103