1af75078fSIntel /*- 2af75078fSIntel * BSD LICENSE 3af75078fSIntel * 45a8fb55cSReshma Pattan * Copyright(c) 2010-2016 Intel Corporation. All rights reserved. 5af75078fSIntel * All rights reserved. 6af75078fSIntel * 7af75078fSIntel * Redistribution and use in source and binary forms, with or without 8af75078fSIntel * modification, are permitted provided that the following conditions 9af75078fSIntel * are met: 10af75078fSIntel * 11af75078fSIntel * * Redistributions of source code must retain the above copyright 12af75078fSIntel * notice, this list of conditions and the following disclaimer. 13af75078fSIntel * * Redistributions in binary form must reproduce the above copyright 14af75078fSIntel * notice, this list of conditions and the following disclaimer in 15af75078fSIntel * the documentation and/or other materials provided with the 16af75078fSIntel * distribution. 17af75078fSIntel * * Neither the name of Intel Corporation nor the names of its 18af75078fSIntel * contributors may be used to endorse or promote products derived 19af75078fSIntel * from this software without specific prior written permission. 20af75078fSIntel * 21af75078fSIntel * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22af75078fSIntel * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23af75078fSIntel * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24af75078fSIntel * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25af75078fSIntel * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26af75078fSIntel * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27af75078fSIntel * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28af75078fSIntel * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29af75078fSIntel * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30af75078fSIntel * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31af75078fSIntel * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32af75078fSIntel */ 33af75078fSIntel 34af75078fSIntel #include <stdarg.h> 35af75078fSIntel #include <stdio.h> 36af75078fSIntel #include <stdlib.h> 37af75078fSIntel #include <signal.h> 38af75078fSIntel #include <string.h> 39af75078fSIntel #include <time.h> 40af75078fSIntel #include <fcntl.h> 41af75078fSIntel #include <sys/types.h> 42af75078fSIntel #include <errno.h> 43af75078fSIntel 44af75078fSIntel #include <sys/queue.h> 45af75078fSIntel #include <sys/stat.h> 46af75078fSIntel 47af75078fSIntel #include <stdint.h> 48af75078fSIntel #include <unistd.h> 49af75078fSIntel #include <inttypes.h> 50af75078fSIntel 51af75078fSIntel #include <rte_common.h> 52d1eb542eSOlivier Matz #include <rte_errno.h> 53af75078fSIntel #include <rte_byteorder.h> 54af75078fSIntel #include <rte_log.h> 55af75078fSIntel #include <rte_debug.h> 56af75078fSIntel #include <rte_cycles.h> 57af75078fSIntel #include <rte_memory.h> 58af75078fSIntel #include <rte_memcpy.h> 59af75078fSIntel #include <rte_memzone.h> 60af75078fSIntel #include <rte_launch.h> 61af75078fSIntel #include <rte_eal.h> 62af75078fSIntel #include <rte_per_lcore.h> 63af75078fSIntel #include <rte_lcore.h> 64af75078fSIntel #include <rte_atomic.h> 65af75078fSIntel #include <rte_branch_prediction.h> 66af75078fSIntel #include <rte_ring.h> 67af75078fSIntel #include <rte_mempool.h> 68af75078fSIntel #include <rte_malloc.h> 69af75078fSIntel #include <rte_mbuf.h> 70af75078fSIntel #include <rte_interrupts.h> 71af75078fSIntel #include <rte_pci.h> 72af75078fSIntel #include <rte_ether.h> 73af75078fSIntel #include <rte_ethdev.h> 74edab33b1STetsuya Mukawa #include <rte_dev.h> 75af75078fSIntel #include <rte_string_fns.h> 76148f963fSBruce Richardson #ifdef RTE_LIBRTE_PMD_XENVIRT 77148f963fSBruce Richardson #include <rte_eth_xenvirt.h> 78148f963fSBruce Richardson #endif 79af75078fSIntel 80af75078fSIntel #include "testpmd.h" 81af75078fSIntel 82af75078fSIntel uint16_t verbose_level = 0; /**< Silent by default. */ 83af75078fSIntel 84af75078fSIntel /* use master core for command line ? */ 85af75078fSIntel uint8_t interactive = 0; 86ca7feb22SCyril Chemparathy uint8_t auto_start = 0; 87af75078fSIntel 88af75078fSIntel /* 89af75078fSIntel * NUMA support configuration. 90af75078fSIntel * When set, the NUMA support attempts to dispatch the allocation of the 91af75078fSIntel * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the 92af75078fSIntel * probed ports among the CPU sockets 0 and 1. 93af75078fSIntel * Otherwise, all memory is allocated from CPU socket 0. 94af75078fSIntel */ 95af75078fSIntel uint8_t numa_support = 0; /**< No numa support by default */ 96af75078fSIntel 97af75078fSIntel /* 98b6ea6408SIntel * In UMA mode,all memory is allocated from socket 0 if --socket-num is 99b6ea6408SIntel * not configured. 100b6ea6408SIntel */ 101b6ea6408SIntel uint8_t socket_num = UMA_NO_CONFIG; 102b6ea6408SIntel 103b6ea6408SIntel /* 104148f963fSBruce Richardson * Use ANONYMOUS mapped memory (might be not physically continuous) for mbufs. 105148f963fSBruce Richardson */ 106148f963fSBruce Richardson uint8_t mp_anon = 0; 107148f963fSBruce Richardson 108148f963fSBruce Richardson /* 109af75078fSIntel * Record the Ethernet address of peer target ports to which packets are 110af75078fSIntel * forwarded. 111af75078fSIntel * Must be instanciated with the ethernet addresses of peer traffic generator 112af75078fSIntel * ports. 113af75078fSIntel */ 114af75078fSIntel struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS]; 115af75078fSIntel portid_t nb_peer_eth_addrs = 0; 116af75078fSIntel 117af75078fSIntel /* 118af75078fSIntel * Probed Target Environment. 119af75078fSIntel */ 120af75078fSIntel struct rte_port *ports; /**< For all probed ethernet ports. */ 121af75078fSIntel portid_t nb_ports; /**< Number of probed ethernet ports. */ 122af75078fSIntel struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */ 123af75078fSIntel lcoreid_t nb_lcores; /**< Number of probed logical cores. */ 124af75078fSIntel 125af75078fSIntel /* 126af75078fSIntel * Test Forwarding Configuration. 127af75078fSIntel * nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores 128af75078fSIntel * nb_fwd_ports <= nb_cfg_ports <= nb_ports 129af75078fSIntel */ 130af75078fSIntel lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */ 131af75078fSIntel lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */ 132af75078fSIntel portid_t nb_cfg_ports; /**< Number of configured ports. */ 133af75078fSIntel portid_t nb_fwd_ports; /**< Number of forwarding ports. */ 134af75078fSIntel 135af75078fSIntel unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */ 136af75078fSIntel portid_t fwd_ports_ids[RTE_MAX_ETHPORTS]; /**< Port ids configuration. */ 137af75078fSIntel 138af75078fSIntel struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */ 139af75078fSIntel streamid_t nb_fwd_streams; /**< Is equal to (nb_ports * nb_rxq). */ 140af75078fSIntel 141af75078fSIntel /* 142af75078fSIntel * Forwarding engines. 143af75078fSIntel */ 144af75078fSIntel struct fwd_engine * fwd_engines[] = { 145af75078fSIntel &io_fwd_engine, 146af75078fSIntel &mac_fwd_engine, 14757e85242SBruce Richardson &mac_retry_fwd_engine, 148d47388f1SCyril Chemparathy &mac_swap_engine, 149e9e23a61SCyril Chemparathy &flow_gen_engine, 150af75078fSIntel &rx_only_engine, 151af75078fSIntel &tx_only_engine, 152af75078fSIntel &csum_fwd_engine, 153168dfa61SIvan Boule &icmp_echo_engine, 154af75078fSIntel #ifdef RTE_LIBRTE_IEEE1588 155af75078fSIntel &ieee1588_fwd_engine, 156af75078fSIntel #endif 157af75078fSIntel NULL, 158af75078fSIntel }; 159af75078fSIntel 160af75078fSIntel struct fwd_config cur_fwd_config; 161af75078fSIntel struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */ 162af75078fSIntel 163af75078fSIntel uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */ 164c8798818SIntel uint32_t param_total_num_mbufs = 0; /**< number of mbufs in all pools - if 165c8798818SIntel * specified on command-line. */ 166af75078fSIntel 167af75078fSIntel /* 168af75078fSIntel * Configuration of packet segments used by the "txonly" processing engine. 169af75078fSIntel */ 170af75078fSIntel uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */ 171af75078fSIntel uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = { 172af75078fSIntel TXONLY_DEF_PACKET_LEN, 173af75078fSIntel }; 174af75078fSIntel uint8_t tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */ 175af75078fSIntel 17679bec05bSKonstantin Ananyev enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF; 17779bec05bSKonstantin Ananyev /**< Split policy for packets to TX. */ 17879bec05bSKonstantin Ananyev 179af75078fSIntel uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */ 180e9378bbcSCunming Liang uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */ 181af75078fSIntel 182900550deSIntel /* current configuration is in DCB or not,0 means it is not in DCB mode */ 183900550deSIntel uint8_t dcb_config = 0; 184900550deSIntel 185900550deSIntel /* Whether the dcb is in testing status */ 186900550deSIntel uint8_t dcb_test = 0; 187900550deSIntel 188af75078fSIntel /* 189af75078fSIntel * Configurable number of RX/TX queues. 190af75078fSIntel */ 191af75078fSIntel queueid_t nb_rxq = 1; /**< Number of RX queues per port. */ 192af75078fSIntel queueid_t nb_txq = 1; /**< Number of TX queues per port. */ 193af75078fSIntel 194af75078fSIntel /* 195af75078fSIntel * Configurable number of RX/TX ring descriptors. 196af75078fSIntel */ 197af75078fSIntel #define RTE_TEST_RX_DESC_DEFAULT 128 198af75078fSIntel #define RTE_TEST_TX_DESC_DEFAULT 512 199af75078fSIntel uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */ 200af75078fSIntel uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */ 201af75078fSIntel 202f2c5125aSPablo de Lara #define RTE_PMD_PARAM_UNSET -1 203af75078fSIntel /* 204af75078fSIntel * Configurable values of RX and TX ring threshold registers. 205af75078fSIntel */ 206af75078fSIntel 207f2c5125aSPablo de Lara int8_t rx_pthresh = RTE_PMD_PARAM_UNSET; 208f2c5125aSPablo de Lara int8_t rx_hthresh = RTE_PMD_PARAM_UNSET; 209f2c5125aSPablo de Lara int8_t rx_wthresh = RTE_PMD_PARAM_UNSET; 210af75078fSIntel 211f2c5125aSPablo de Lara int8_t tx_pthresh = RTE_PMD_PARAM_UNSET; 212f2c5125aSPablo de Lara int8_t tx_hthresh = RTE_PMD_PARAM_UNSET; 213f2c5125aSPablo de Lara int8_t tx_wthresh = RTE_PMD_PARAM_UNSET; 214af75078fSIntel 215af75078fSIntel /* 216af75078fSIntel * Configurable value of RX free threshold. 217af75078fSIntel */ 218f2c5125aSPablo de Lara int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET; 219af75078fSIntel 220af75078fSIntel /* 221ce8d5614SIntel * Configurable value of RX drop enable. 222ce8d5614SIntel */ 223f2c5125aSPablo de Lara int8_t rx_drop_en = RTE_PMD_PARAM_UNSET; 224ce8d5614SIntel 225ce8d5614SIntel /* 226af75078fSIntel * Configurable value of TX free threshold. 227af75078fSIntel */ 228f2c5125aSPablo de Lara int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET; 229af75078fSIntel 230af75078fSIntel /* 231af75078fSIntel * Configurable value of TX RS bit threshold. 232af75078fSIntel */ 233f2c5125aSPablo de Lara int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET; 234af75078fSIntel 235af75078fSIntel /* 236ce8d5614SIntel * Configurable value of TX queue flags. 237ce8d5614SIntel */ 238f2c5125aSPablo de Lara int32_t txq_flags = RTE_PMD_PARAM_UNSET; 239ce8d5614SIntel 240ce8d5614SIntel /* 241af75078fSIntel * Receive Side Scaling (RSS) configuration. 242af75078fSIntel */ 2438a387fa8SHelin Zhang uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */ 244af75078fSIntel 245af75078fSIntel /* 246af75078fSIntel * Port topology configuration 247af75078fSIntel */ 248af75078fSIntel uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */ 249af75078fSIntel 2507741e4cfSIntel /* 2517741e4cfSIntel * Avoids to flush all the RX streams before starts forwarding. 2527741e4cfSIntel */ 2537741e4cfSIntel uint8_t no_flush_rx = 0; /* flush by default */ 2547741e4cfSIntel 255af75078fSIntel /* 256bc202406SDavid Marchand * Avoids to check link status when starting/stopping a port. 257bc202406SDavid Marchand */ 258bc202406SDavid Marchand uint8_t no_link_check = 0; /* check by default */ 259bc202406SDavid Marchand 260bc202406SDavid Marchand /* 2617b7e5ba7SIntel * NIC bypass mode configuration options. 2627b7e5ba7SIntel */ 2637b7e5ba7SIntel #ifdef RTE_NIC_BYPASS 2647b7e5ba7SIntel 2657b7e5ba7SIntel /* The NIC bypass watchdog timeout. */ 2667b7e5ba7SIntel uint32_t bypass_timeout = RTE_BYPASS_TMT_OFF; 2677b7e5ba7SIntel 2687b7e5ba7SIntel #endif 2697b7e5ba7SIntel 2707b7e5ba7SIntel /* 271af75078fSIntel * Ethernet device configuration. 272af75078fSIntel */ 273af75078fSIntel struct rte_eth_rxmode rx_mode = { 274af75078fSIntel .max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */ 275af75078fSIntel .split_hdr_size = 0, 276af75078fSIntel .header_split = 0, /**< Header Split disabled. */ 277af75078fSIntel .hw_ip_checksum = 0, /**< IP checksum offload disabled. */ 278af75078fSIntel .hw_vlan_filter = 1, /**< VLAN filtering enabled. */ 279a47aa8b9SIntel .hw_vlan_strip = 1, /**< VLAN strip enabled. */ 280a47aa8b9SIntel .hw_vlan_extend = 0, /**< Extended VLAN disabled. */ 281af75078fSIntel .jumbo_frame = 0, /**< Jumbo Frame Support disabled. */ 282af75078fSIntel .hw_strip_crc = 0, /**< CRC stripping by hardware disabled. */ 283af75078fSIntel }; 284af75078fSIntel 285af75078fSIntel struct rte_fdir_conf fdir_conf = { 286af75078fSIntel .mode = RTE_FDIR_MODE_NONE, 287af75078fSIntel .pballoc = RTE_FDIR_PBALLOC_64K, 288af75078fSIntel .status = RTE_FDIR_REPORT_STATUS, 289d9d5e6f2SJingjing Wu .mask = { 290d9d5e6f2SJingjing Wu .vlan_tci_mask = 0x0, 291d9d5e6f2SJingjing Wu .ipv4_mask = { 292d9d5e6f2SJingjing Wu .src_ip = 0xFFFFFFFF, 293d9d5e6f2SJingjing Wu .dst_ip = 0xFFFFFFFF, 294d9d5e6f2SJingjing Wu }, 295d9d5e6f2SJingjing Wu .ipv6_mask = { 296d9d5e6f2SJingjing Wu .src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF}, 297d9d5e6f2SJingjing Wu .dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF}, 298d9d5e6f2SJingjing Wu }, 299d9d5e6f2SJingjing Wu .src_port_mask = 0xFFFF, 300d9d5e6f2SJingjing Wu .dst_port_mask = 0xFFFF, 30147b3ac6bSWenzhuo Lu .mac_addr_byte_mask = 0xFF, 30247b3ac6bSWenzhuo Lu .tunnel_type_mask = 1, 30347b3ac6bSWenzhuo Lu .tunnel_id_mask = 0xFFFFFFFF, 304d9d5e6f2SJingjing Wu }, 305af75078fSIntel .drop_queue = 127, 306af75078fSIntel }; 307af75078fSIntel 3082950a769SDeclan Doherty volatile int test_done = 1; /* stop packet forwarding when set to 1. */ 309af75078fSIntel 310ed30d9b6SIntel struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS]; 311ed30d9b6SIntel struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS]; 312ed30d9b6SIntel 313ed30d9b6SIntel struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array; 314ed30d9b6SIntel struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array; 315ed30d9b6SIntel 316ed30d9b6SIntel uint16_t nb_tx_queue_stats_mappings = 0; 317ed30d9b6SIntel uint16_t nb_rx_queue_stats_mappings = 0; 318ed30d9b6SIntel 3197acf894dSStephen Hurd unsigned max_socket = 0; 3207acf894dSStephen Hurd 321ed30d9b6SIntel /* Forward function declarations */ 322ed30d9b6SIntel static void map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port); 323edab33b1STetsuya Mukawa static void check_all_ports_link_status(uint32_t port_mask); 324ce8d5614SIntel 325ce8d5614SIntel /* 326ce8d5614SIntel * Check if all the ports are started. 327ce8d5614SIntel * If yes, return positive value. If not, return zero. 328ce8d5614SIntel */ 329ce8d5614SIntel static int all_ports_started(void); 330ed30d9b6SIntel 331af75078fSIntel /* 332edab33b1STetsuya Mukawa * Find next enabled port 333edab33b1STetsuya Mukawa */ 334edab33b1STetsuya Mukawa portid_t 335edab33b1STetsuya Mukawa find_next_port(portid_t p, struct rte_port *ports, int size) 336edab33b1STetsuya Mukawa { 337edab33b1STetsuya Mukawa if (ports == NULL) 338edab33b1STetsuya Mukawa rte_exit(-EINVAL, "failed to find a next port id\n"); 339edab33b1STetsuya Mukawa 34012a8e30fSJulien Cretin while ((p < size) && (ports[p].enabled == 0)) 341edab33b1STetsuya Mukawa p++; 342edab33b1STetsuya Mukawa return p; 343edab33b1STetsuya Mukawa } 344edab33b1STetsuya Mukawa 345edab33b1STetsuya Mukawa /* 346af75078fSIntel * Setup default configuration. 347af75078fSIntel */ 348af75078fSIntel static void 349af75078fSIntel set_default_fwd_lcores_config(void) 350af75078fSIntel { 351af75078fSIntel unsigned int i; 352af75078fSIntel unsigned int nb_lc; 3537acf894dSStephen Hurd unsigned int sock_num; 354af75078fSIntel 355af75078fSIntel nb_lc = 0; 356af75078fSIntel for (i = 0; i < RTE_MAX_LCORE; i++) { 3577acf894dSStephen Hurd sock_num = rte_lcore_to_socket_id(i) + 1; 3587acf894dSStephen Hurd if (sock_num > max_socket) { 3597acf894dSStephen Hurd if (sock_num > RTE_MAX_NUMA_NODES) 3607acf894dSStephen Hurd rte_exit(EXIT_FAILURE, "Total sockets greater than %u\n", RTE_MAX_NUMA_NODES); 3617acf894dSStephen Hurd max_socket = sock_num; 3627acf894dSStephen Hurd } 363f54fe5eeSStephen Hurd if (!rte_lcore_is_enabled(i)) 364f54fe5eeSStephen Hurd continue; 365f54fe5eeSStephen Hurd if (i == rte_get_master_lcore()) 366f54fe5eeSStephen Hurd continue; 367f54fe5eeSStephen Hurd fwd_lcores_cpuids[nb_lc++] = i; 368af75078fSIntel } 369af75078fSIntel nb_lcores = (lcoreid_t) nb_lc; 370af75078fSIntel nb_cfg_lcores = nb_lcores; 371af75078fSIntel nb_fwd_lcores = 1; 372af75078fSIntel } 373af75078fSIntel 374af75078fSIntel static void 375af75078fSIntel set_def_peer_eth_addrs(void) 376af75078fSIntel { 377af75078fSIntel portid_t i; 378af75078fSIntel 379af75078fSIntel for (i = 0; i < RTE_MAX_ETHPORTS; i++) { 380af75078fSIntel peer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR; 381af75078fSIntel peer_eth_addrs[i].addr_bytes[5] = i; 382af75078fSIntel } 383af75078fSIntel } 384af75078fSIntel 385af75078fSIntel static void 386af75078fSIntel set_default_fwd_ports_config(void) 387af75078fSIntel { 388af75078fSIntel portid_t pt_id; 389af75078fSIntel 390af75078fSIntel for (pt_id = 0; pt_id < nb_ports; pt_id++) 391af75078fSIntel fwd_ports_ids[pt_id] = pt_id; 392af75078fSIntel 393af75078fSIntel nb_cfg_ports = nb_ports; 394af75078fSIntel nb_fwd_ports = nb_ports; 395af75078fSIntel } 396af75078fSIntel 397af75078fSIntel void 398af75078fSIntel set_def_fwd_config(void) 399af75078fSIntel { 400af75078fSIntel set_default_fwd_lcores_config(); 401af75078fSIntel set_def_peer_eth_addrs(); 402af75078fSIntel set_default_fwd_ports_config(); 403af75078fSIntel } 404af75078fSIntel 405af75078fSIntel /* 406af75078fSIntel * Configuration initialisation done once at init time. 407af75078fSIntel */ 408af75078fSIntel static void 409af75078fSIntel mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf, 410af75078fSIntel unsigned int socket_id) 411af75078fSIntel { 412af75078fSIntel char pool_name[RTE_MEMPOOL_NAMESIZE]; 413bece7b6cSChristian Ehrhardt struct rte_mempool *rte_mp = NULL; 414af75078fSIntel uint32_t mb_size; 415af75078fSIntel 416dfb03bbeSOlivier Matz mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size; 417af75078fSIntel mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name)); 418148f963fSBruce Richardson 419d1eb542eSOlivier Matz RTE_LOG(INFO, USER1, 420d1eb542eSOlivier Matz "create a new mbuf pool <%s>: n=%u, size=%u, socket=%u\n", 421d1eb542eSOlivier Matz pool_name, nb_mbuf, mbuf_seg_size, socket_id); 422d1eb542eSOlivier Matz 423148f963fSBruce Richardson #ifdef RTE_LIBRTE_PMD_XENVIRT 424148f963fSBruce Richardson rte_mp = rte_mempool_gntalloc_create(pool_name, nb_mbuf, mb_size, 425af75078fSIntel (unsigned) mb_mempool_cache, 426af75078fSIntel sizeof(struct rte_pktmbuf_pool_private), 427dfb03bbeSOlivier Matz rte_pktmbuf_pool_init, NULL, 428dfb03bbeSOlivier Matz rte_pktmbuf_init, NULL, 429af75078fSIntel socket_id, 0); 430bece7b6cSChristian Ehrhardt #endif 431148f963fSBruce Richardson 432bece7b6cSChristian Ehrhardt /* if the former XEN allocation failed fall back to normal allocation */ 433bece7b6cSChristian Ehrhardt if (rte_mp == NULL) { 434b19a0c75SOlivier Matz if (mp_anon != 0) { 435b19a0c75SOlivier Matz rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf, 436bece7b6cSChristian Ehrhardt mb_size, (unsigned) mb_mempool_cache, 437148f963fSBruce Richardson sizeof(struct rte_pktmbuf_pool_private), 438148f963fSBruce Richardson socket_id, 0); 439b19a0c75SOlivier Matz 440b19a0c75SOlivier Matz if (rte_mempool_populate_anon(rte_mp) == 0) { 441b19a0c75SOlivier Matz rte_mempool_free(rte_mp); 442b19a0c75SOlivier Matz rte_mp = NULL; 443b19a0c75SOlivier Matz } 444b19a0c75SOlivier Matz rte_pktmbuf_pool_init(rte_mp, NULL); 445b19a0c75SOlivier Matz rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL); 446b19a0c75SOlivier Matz } else { 447ea0c20eaSOlivier Matz /* wrapper to rte_mempool_create() */ 448ea0c20eaSOlivier Matz rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf, 449ea0c20eaSOlivier Matz mb_mempool_cache, 0, mbuf_seg_size, socket_id); 450bece7b6cSChristian Ehrhardt } 451b19a0c75SOlivier Matz } 452148f963fSBruce Richardson 453af75078fSIntel if (rte_mp == NULL) { 454d1eb542eSOlivier Matz rte_exit(EXIT_FAILURE, 455d1eb542eSOlivier Matz "Creation of mbuf pool for socket %u failed: %s\n", 456d1eb542eSOlivier Matz socket_id, rte_strerror(rte_errno)); 457148f963fSBruce Richardson } else if (verbose_level > 0) { 458591a9d79SStephen Hemminger rte_mempool_dump(stdout, rte_mp); 459af75078fSIntel } 460af75078fSIntel } 461af75078fSIntel 46220a0286fSLiu Xiaofeng /* 46320a0286fSLiu Xiaofeng * Check given socket id is valid or not with NUMA mode, 46420a0286fSLiu Xiaofeng * if valid, return 0, else return -1 46520a0286fSLiu Xiaofeng */ 46620a0286fSLiu Xiaofeng static int 46720a0286fSLiu Xiaofeng check_socket_id(const unsigned int socket_id) 46820a0286fSLiu Xiaofeng { 46920a0286fSLiu Xiaofeng static int warning_once = 0; 47020a0286fSLiu Xiaofeng 4717acf894dSStephen Hurd if (socket_id >= max_socket) { 47220a0286fSLiu Xiaofeng if (!warning_once && numa_support) 47320a0286fSLiu Xiaofeng printf("Warning: NUMA should be configured manually by" 47420a0286fSLiu Xiaofeng " using --port-numa-config and" 47520a0286fSLiu Xiaofeng " --ring-numa-config parameters along with" 47620a0286fSLiu Xiaofeng " --numa.\n"); 47720a0286fSLiu Xiaofeng warning_once = 1; 47820a0286fSLiu Xiaofeng return -1; 47920a0286fSLiu Xiaofeng } 48020a0286fSLiu Xiaofeng return 0; 48120a0286fSLiu Xiaofeng } 48220a0286fSLiu Xiaofeng 483af75078fSIntel static void 484af75078fSIntel init_config(void) 485af75078fSIntel { 486ce8d5614SIntel portid_t pid; 487af75078fSIntel struct rte_port *port; 488af75078fSIntel struct rte_mempool *mbp; 489af75078fSIntel unsigned int nb_mbuf_per_pool; 490af75078fSIntel lcoreid_t lc_id; 4917acf894dSStephen Hurd uint8_t port_per_socket[RTE_MAX_NUMA_NODES]; 492af75078fSIntel 4937acf894dSStephen Hurd memset(port_per_socket,0,RTE_MAX_NUMA_NODES); 494af75078fSIntel /* Configuration of logical cores. */ 495af75078fSIntel fwd_lcores = rte_zmalloc("testpmd: fwd_lcores", 496af75078fSIntel sizeof(struct fwd_lcore *) * nb_lcores, 497fdf20fa7SSergio Gonzalez Monroy RTE_CACHE_LINE_SIZE); 498af75078fSIntel if (fwd_lcores == NULL) { 499ce8d5614SIntel rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) " 500ce8d5614SIntel "failed\n", nb_lcores); 501af75078fSIntel } 502af75078fSIntel for (lc_id = 0; lc_id < nb_lcores; lc_id++) { 503af75078fSIntel fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore", 504af75078fSIntel sizeof(struct fwd_lcore), 505fdf20fa7SSergio Gonzalez Monroy RTE_CACHE_LINE_SIZE); 506af75078fSIntel if (fwd_lcores[lc_id] == NULL) { 507ce8d5614SIntel rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) " 508ce8d5614SIntel "failed\n"); 509af75078fSIntel } 510af75078fSIntel fwd_lcores[lc_id]->cpuid_idx = lc_id; 511af75078fSIntel } 512af75078fSIntel 513af75078fSIntel /* 514af75078fSIntel * Create pools of mbuf. 515af75078fSIntel * If NUMA support is disabled, create a single pool of mbuf in 516b6ea6408SIntel * socket 0 memory by default. 517af75078fSIntel * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1. 518c8798818SIntel * 519c8798818SIntel * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and 520c8798818SIntel * nb_txd can be configured at run time. 521af75078fSIntel */ 522c8798818SIntel if (param_total_num_mbufs) 523c8798818SIntel nb_mbuf_per_pool = param_total_num_mbufs; 524c8798818SIntel else { 525c8798818SIntel nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX + (nb_lcores * mb_mempool_cache) 526c8798818SIntel + RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST; 527b6ea6408SIntel 528b6ea6408SIntel if (!numa_support) 529edab33b1STetsuya Mukawa nb_mbuf_per_pool = 530edab33b1STetsuya Mukawa (nb_mbuf_per_pool * RTE_MAX_ETHPORTS); 531c8798818SIntel } 532af75078fSIntel 533b6ea6408SIntel if (!numa_support) { 534b6ea6408SIntel if (socket_num == UMA_NO_CONFIG) 535b6ea6408SIntel mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0); 536b6ea6408SIntel else 537b6ea6408SIntel mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 538b6ea6408SIntel socket_num); 539b6ea6408SIntel } 540af75078fSIntel 541edab33b1STetsuya Mukawa FOREACH_PORT(pid, ports) { 542ce8d5614SIntel port = &ports[pid]; 543ce8d5614SIntel rte_eth_dev_info_get(pid, &port->dev_info); 544ce8d5614SIntel 545b6ea6408SIntel if (numa_support) { 546b6ea6408SIntel if (port_numa[pid] != NUMA_NO_CONFIG) 547b6ea6408SIntel port_per_socket[port_numa[pid]]++; 548b6ea6408SIntel else { 549b6ea6408SIntel uint32_t socket_id = rte_eth_dev_socket_id(pid); 55020a0286fSLiu Xiaofeng 55120a0286fSLiu Xiaofeng /* if socket_id is invalid, set to 0 */ 55220a0286fSLiu Xiaofeng if (check_socket_id(socket_id) < 0) 55320a0286fSLiu Xiaofeng socket_id = 0; 554b6ea6408SIntel port_per_socket[socket_id]++; 555b6ea6408SIntel } 556b6ea6408SIntel } 557b6ea6408SIntel 558ce8d5614SIntel /* set flag to initialize port/queue */ 559ce8d5614SIntel port->need_reconfig = 1; 560ce8d5614SIntel port->need_reconfig_queues = 1; 561ce8d5614SIntel } 562ce8d5614SIntel 563b6ea6408SIntel if (numa_support) { 564b6ea6408SIntel uint8_t i; 565b6ea6408SIntel unsigned int nb_mbuf; 566ce8d5614SIntel 567b6ea6408SIntel if (param_total_num_mbufs) 568b6ea6408SIntel nb_mbuf_per_pool = nb_mbuf_per_pool/nb_ports; 569b6ea6408SIntel 5707acf894dSStephen Hurd for (i = 0; i < max_socket; i++) { 571edab33b1STetsuya Mukawa nb_mbuf = (nb_mbuf_per_pool * RTE_MAX_ETHPORTS); 572b6ea6408SIntel if (nb_mbuf) 573b6ea6408SIntel mbuf_pool_create(mbuf_data_size, 574b6ea6408SIntel nb_mbuf,i); 575b6ea6408SIntel } 576b6ea6408SIntel } 577b6ea6408SIntel init_port_config(); 5785886ae07SAdrien Mazarguil 5795886ae07SAdrien Mazarguil /* 5805886ae07SAdrien Mazarguil * Records which Mbuf pool to use by each logical core, if needed. 5815886ae07SAdrien Mazarguil */ 5825886ae07SAdrien Mazarguil for (lc_id = 0; lc_id < nb_lcores; lc_id++) { 5838fd8bebcSAdrien Mazarguil mbp = mbuf_pool_find( 5848fd8bebcSAdrien Mazarguil rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id])); 5858fd8bebcSAdrien Mazarguil 5865886ae07SAdrien Mazarguil if (mbp == NULL) 5875886ae07SAdrien Mazarguil mbp = mbuf_pool_find(0); 5885886ae07SAdrien Mazarguil fwd_lcores[lc_id]->mbp = mbp; 5895886ae07SAdrien Mazarguil } 5905886ae07SAdrien Mazarguil 591ce8d5614SIntel /* Configuration of packet forwarding streams. */ 592ce8d5614SIntel if (init_fwd_streams() < 0) 593ce8d5614SIntel rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n"); 5940c0db76fSBernard Iremonger 5950c0db76fSBernard Iremonger fwd_config_setup(); 596ce8d5614SIntel } 597ce8d5614SIntel 5982950a769SDeclan Doherty 5992950a769SDeclan Doherty void 600a21d5a4bSDeclan Doherty reconfig(portid_t new_port_id, unsigned socket_id) 6012950a769SDeclan Doherty { 6022950a769SDeclan Doherty struct rte_port *port; 6032950a769SDeclan Doherty 6042950a769SDeclan Doherty /* Reconfiguration of Ethernet ports. */ 6052950a769SDeclan Doherty port = &ports[new_port_id]; 6062950a769SDeclan Doherty rte_eth_dev_info_get(new_port_id, &port->dev_info); 6072950a769SDeclan Doherty 6082950a769SDeclan Doherty /* set flag to initialize port/queue */ 6092950a769SDeclan Doherty port->need_reconfig = 1; 6102950a769SDeclan Doherty port->need_reconfig_queues = 1; 611a21d5a4bSDeclan Doherty port->socket_id = socket_id; 6122950a769SDeclan Doherty 6132950a769SDeclan Doherty init_port_config(); 6142950a769SDeclan Doherty } 6152950a769SDeclan Doherty 6162950a769SDeclan Doherty 617ce8d5614SIntel int 618ce8d5614SIntel init_fwd_streams(void) 619ce8d5614SIntel { 620ce8d5614SIntel portid_t pid; 621ce8d5614SIntel struct rte_port *port; 622ce8d5614SIntel streamid_t sm_id, nb_fwd_streams_new; 6235a8fb55cSReshma Pattan queueid_t q; 624ce8d5614SIntel 625ce8d5614SIntel /* set socket id according to numa or not */ 626edab33b1STetsuya Mukawa FOREACH_PORT(pid, ports) { 627ce8d5614SIntel port = &ports[pid]; 628ce8d5614SIntel if (nb_rxq > port->dev_info.max_rx_queues) { 629ce8d5614SIntel printf("Fail: nb_rxq(%d) is greater than " 630ce8d5614SIntel "max_rx_queues(%d)\n", nb_rxq, 631ce8d5614SIntel port->dev_info.max_rx_queues); 632ce8d5614SIntel return -1; 633ce8d5614SIntel } 634ce8d5614SIntel if (nb_txq > port->dev_info.max_tx_queues) { 635ce8d5614SIntel printf("Fail: nb_txq(%d) is greater than " 636ce8d5614SIntel "max_tx_queues(%d)\n", nb_txq, 637ce8d5614SIntel port->dev_info.max_tx_queues); 638ce8d5614SIntel return -1; 639ce8d5614SIntel } 64020a0286fSLiu Xiaofeng if (numa_support) { 64120a0286fSLiu Xiaofeng if (port_numa[pid] != NUMA_NO_CONFIG) 64220a0286fSLiu Xiaofeng port->socket_id = port_numa[pid]; 64320a0286fSLiu Xiaofeng else { 644b6ea6408SIntel port->socket_id = rte_eth_dev_socket_id(pid); 64520a0286fSLiu Xiaofeng 64620a0286fSLiu Xiaofeng /* if socket_id is invalid, set to 0 */ 64720a0286fSLiu Xiaofeng if (check_socket_id(port->socket_id) < 0) 64820a0286fSLiu Xiaofeng port->socket_id = 0; 64920a0286fSLiu Xiaofeng } 65020a0286fSLiu Xiaofeng } 651b6ea6408SIntel else { 652b6ea6408SIntel if (socket_num == UMA_NO_CONFIG) 653af75078fSIntel port->socket_id = 0; 654b6ea6408SIntel else 655b6ea6408SIntel port->socket_id = socket_num; 656b6ea6408SIntel } 657af75078fSIntel } 658af75078fSIntel 6595a8fb55cSReshma Pattan q = RTE_MAX(nb_rxq, nb_txq); 6605a8fb55cSReshma Pattan if (q == 0) { 6615a8fb55cSReshma Pattan printf("Fail: Cannot allocate fwd streams as number of queues is 0\n"); 6625a8fb55cSReshma Pattan return -1; 6635a8fb55cSReshma Pattan } 6645a8fb55cSReshma Pattan nb_fwd_streams_new = (streamid_t)(nb_ports * q); 665ce8d5614SIntel if (nb_fwd_streams_new == nb_fwd_streams) 666ce8d5614SIntel return 0; 667ce8d5614SIntel /* clear the old */ 668ce8d5614SIntel if (fwd_streams != NULL) { 669ce8d5614SIntel for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) { 670ce8d5614SIntel if (fwd_streams[sm_id] == NULL) 671ce8d5614SIntel continue; 672ce8d5614SIntel rte_free(fwd_streams[sm_id]); 673ce8d5614SIntel fwd_streams[sm_id] = NULL; 674af75078fSIntel } 675ce8d5614SIntel rte_free(fwd_streams); 676ce8d5614SIntel fwd_streams = NULL; 677ce8d5614SIntel } 678ce8d5614SIntel 679ce8d5614SIntel /* init new */ 680ce8d5614SIntel nb_fwd_streams = nb_fwd_streams_new; 681ce8d5614SIntel fwd_streams = rte_zmalloc("testpmd: fwd_streams", 682fdf20fa7SSergio Gonzalez Monroy sizeof(struct fwd_stream *) * nb_fwd_streams, RTE_CACHE_LINE_SIZE); 683ce8d5614SIntel if (fwd_streams == NULL) 684ce8d5614SIntel rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_stream *)) " 685ce8d5614SIntel "failed\n", nb_fwd_streams); 686ce8d5614SIntel 687af75078fSIntel for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) { 688af75078fSIntel fwd_streams[sm_id] = rte_zmalloc("testpmd: struct fwd_stream", 689fdf20fa7SSergio Gonzalez Monroy sizeof(struct fwd_stream), RTE_CACHE_LINE_SIZE); 690ce8d5614SIntel if (fwd_streams[sm_id] == NULL) 691ce8d5614SIntel rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_stream)" 692ce8d5614SIntel " failed\n"); 693af75078fSIntel } 694ce8d5614SIntel 695ce8d5614SIntel return 0; 696af75078fSIntel } 697af75078fSIntel 698af75078fSIntel #ifdef RTE_TEST_PMD_RECORD_BURST_STATS 699af75078fSIntel static void 700af75078fSIntel pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs) 701af75078fSIntel { 702af75078fSIntel unsigned int total_burst; 703af75078fSIntel unsigned int nb_burst; 704af75078fSIntel unsigned int burst_stats[3]; 705af75078fSIntel uint16_t pktnb_stats[3]; 706af75078fSIntel uint16_t nb_pkt; 707af75078fSIntel int burst_percent[3]; 708af75078fSIntel 709af75078fSIntel /* 710af75078fSIntel * First compute the total number of packet bursts and the 711af75078fSIntel * two highest numbers of bursts of the same number of packets. 712af75078fSIntel */ 713af75078fSIntel total_burst = 0; 714af75078fSIntel burst_stats[0] = burst_stats[1] = burst_stats[2] = 0; 715af75078fSIntel pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0; 716af75078fSIntel for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) { 717af75078fSIntel nb_burst = pbs->pkt_burst_spread[nb_pkt]; 718af75078fSIntel if (nb_burst == 0) 719af75078fSIntel continue; 720af75078fSIntel total_burst += nb_burst; 721af75078fSIntel if (nb_burst > burst_stats[0]) { 722af75078fSIntel burst_stats[1] = burst_stats[0]; 723af75078fSIntel pktnb_stats[1] = pktnb_stats[0]; 724af75078fSIntel burst_stats[0] = nb_burst; 725af75078fSIntel pktnb_stats[0] = nb_pkt; 726af75078fSIntel } 727af75078fSIntel } 728af75078fSIntel if (total_burst == 0) 729af75078fSIntel return; 730af75078fSIntel burst_percent[0] = (burst_stats[0] * 100) / total_burst; 731af75078fSIntel printf(" %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst, 732af75078fSIntel burst_percent[0], (int) pktnb_stats[0]); 733af75078fSIntel if (burst_stats[0] == total_burst) { 734af75078fSIntel printf("]\n"); 735af75078fSIntel return; 736af75078fSIntel } 737af75078fSIntel if (burst_stats[0] + burst_stats[1] == total_burst) { 738af75078fSIntel printf(" + %d%% of %d pkts]\n", 739af75078fSIntel 100 - burst_percent[0], pktnb_stats[1]); 740af75078fSIntel return; 741af75078fSIntel } 742af75078fSIntel burst_percent[1] = (burst_stats[1] * 100) / total_burst; 743af75078fSIntel burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]); 744af75078fSIntel if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) { 745af75078fSIntel printf(" + %d%% of others]\n", 100 - burst_percent[0]); 746af75078fSIntel return; 747af75078fSIntel } 748af75078fSIntel printf(" + %d%% of %d pkts + %d%% of others]\n", 749af75078fSIntel burst_percent[1], (int) pktnb_stats[1], burst_percent[2]); 750af75078fSIntel } 751af75078fSIntel #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */ 752af75078fSIntel 753af75078fSIntel static void 754af75078fSIntel fwd_port_stats_display(portid_t port_id, struct rte_eth_stats *stats) 755af75078fSIntel { 756af75078fSIntel struct rte_port *port; 757013af9b6SIntel uint8_t i; 758af75078fSIntel 759af75078fSIntel static const char *fwd_stats_border = "----------------------"; 760af75078fSIntel 761af75078fSIntel port = &ports[port_id]; 762af75078fSIntel printf("\n %s Forward statistics for port %-2d %s\n", 763af75078fSIntel fwd_stats_border, port_id, fwd_stats_border); 764013af9b6SIntel 765013af9b6SIntel if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) { 766af75078fSIntel printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: " 767af75078fSIntel "%-"PRIu64"\n", 76870bdb186SIvan Boule stats->ipackets, stats->imissed, 76970bdb186SIvan Boule (uint64_t) (stats->ipackets + stats->imissed)); 770af75078fSIntel 771af75078fSIntel if (cur_fwd_eng == &csum_fwd_engine) 772af75078fSIntel printf(" Bad-ipcsum: %-14"PRIu64" Bad-l4csum: %-14"PRIu64" \n", 773af75078fSIntel port->rx_bad_ip_csum, port->rx_bad_l4_csum); 77486057c99SIgor Ryzhov if ((stats->ierrors + stats->rx_nombuf) > 0) { 775f72a0fa6SStephen Hemminger printf(" RX-error: %-"PRIu64"\n", stats->ierrors); 77670bdb186SIvan Boule printf(" RX-nombufs: %-14"PRIu64"\n", stats->rx_nombuf); 77770bdb186SIvan Boule } 778af75078fSIntel 779af75078fSIntel printf(" TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: " 780af75078fSIntel "%-"PRIu64"\n", 781af75078fSIntel stats->opackets, port->tx_dropped, 782af75078fSIntel (uint64_t) (stats->opackets + port->tx_dropped)); 783013af9b6SIntel } 784013af9b6SIntel else { 785013af9b6SIntel printf(" RX-packets: %14"PRIu64" RX-dropped:%14"PRIu64" RX-total:" 786013af9b6SIntel "%14"PRIu64"\n", 78770bdb186SIvan Boule stats->ipackets, stats->imissed, 78870bdb186SIvan Boule (uint64_t) (stats->ipackets + stats->imissed)); 789013af9b6SIntel 790013af9b6SIntel if (cur_fwd_eng == &csum_fwd_engine) 791013af9b6SIntel printf(" Bad-ipcsum:%14"PRIu64" Bad-l4csum:%14"PRIu64"\n", 792013af9b6SIntel port->rx_bad_ip_csum, port->rx_bad_l4_csum); 79386057c99SIgor Ryzhov if ((stats->ierrors + stats->rx_nombuf) > 0) { 794f72a0fa6SStephen Hemminger printf(" RX-error:%"PRIu64"\n", stats->ierrors); 79570bdb186SIvan Boule printf(" RX-nombufs: %14"PRIu64"\n", 79670bdb186SIvan Boule stats->rx_nombuf); 79770bdb186SIvan Boule } 798013af9b6SIntel 799013af9b6SIntel printf(" TX-packets: %14"PRIu64" TX-dropped:%14"PRIu64" TX-total:" 800013af9b6SIntel "%14"PRIu64"\n", 801013af9b6SIntel stats->opackets, port->tx_dropped, 802013af9b6SIntel (uint64_t) (stats->opackets + port->tx_dropped)); 803013af9b6SIntel } 804e659b6b4SIvan Boule 805af75078fSIntel #ifdef RTE_TEST_PMD_RECORD_BURST_STATS 806af75078fSIntel if (port->rx_stream) 807013af9b6SIntel pkt_burst_stats_display("RX", 808013af9b6SIntel &port->rx_stream->rx_burst_stats); 809af75078fSIntel if (port->tx_stream) 810013af9b6SIntel pkt_burst_stats_display("TX", 811013af9b6SIntel &port->tx_stream->tx_burst_stats); 812af75078fSIntel #endif 813af75078fSIntel 814013af9b6SIntel if (port->rx_queue_stats_mapping_enabled) { 815013af9b6SIntel printf("\n"); 816013af9b6SIntel for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) { 817013af9b6SIntel printf(" Stats reg %2d RX-packets:%14"PRIu64 818013af9b6SIntel " RX-errors:%14"PRIu64 819013af9b6SIntel " RX-bytes:%14"PRIu64"\n", 820013af9b6SIntel i, stats->q_ipackets[i], stats->q_errors[i], stats->q_ibytes[i]); 821013af9b6SIntel } 822013af9b6SIntel printf("\n"); 823013af9b6SIntel } 824013af9b6SIntel if (port->tx_queue_stats_mapping_enabled) { 825013af9b6SIntel for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) { 826013af9b6SIntel printf(" Stats reg %2d TX-packets:%14"PRIu64 827013af9b6SIntel " TX-bytes:%14"PRIu64"\n", 828013af9b6SIntel i, stats->q_opackets[i], stats->q_obytes[i]); 829013af9b6SIntel } 830013af9b6SIntel } 831013af9b6SIntel 832af75078fSIntel printf(" %s--------------------------------%s\n", 833af75078fSIntel fwd_stats_border, fwd_stats_border); 834af75078fSIntel } 835af75078fSIntel 836af75078fSIntel static void 837af75078fSIntel fwd_stream_stats_display(streamid_t stream_id) 838af75078fSIntel { 839af75078fSIntel struct fwd_stream *fs; 840af75078fSIntel static const char *fwd_top_stats_border = "-------"; 841af75078fSIntel 842af75078fSIntel fs = fwd_streams[stream_id]; 843af75078fSIntel if ((fs->rx_packets == 0) && (fs->tx_packets == 0) && 844af75078fSIntel (fs->fwd_dropped == 0)) 845af75078fSIntel return; 846af75078fSIntel printf("\n %s Forward Stats for RX Port=%2d/Queue=%2d -> " 847af75078fSIntel "TX Port=%2d/Queue=%2d %s\n", 848af75078fSIntel fwd_top_stats_border, fs->rx_port, fs->rx_queue, 849af75078fSIntel fs->tx_port, fs->tx_queue, fwd_top_stats_border); 850af75078fSIntel printf(" RX-packets: %-14u TX-packets: %-14u TX-dropped: %-14u", 851af75078fSIntel fs->rx_packets, fs->tx_packets, fs->fwd_dropped); 852af75078fSIntel 853af75078fSIntel /* if checksum mode */ 854af75078fSIntel if (cur_fwd_eng == &csum_fwd_engine) { 855013af9b6SIntel printf(" RX- bad IP checksum: %-14u Rx- bad L4 checksum: " 856013af9b6SIntel "%-14u\n", fs->rx_bad_ip_csum, fs->rx_bad_l4_csum); 857af75078fSIntel } 858af75078fSIntel 859af75078fSIntel #ifdef RTE_TEST_PMD_RECORD_BURST_STATS 860af75078fSIntel pkt_burst_stats_display("RX", &fs->rx_burst_stats); 861af75078fSIntel pkt_burst_stats_display("TX", &fs->tx_burst_stats); 862af75078fSIntel #endif 863af75078fSIntel } 864af75078fSIntel 865af75078fSIntel static void 8667741e4cfSIntel flush_fwd_rx_queues(void) 867af75078fSIntel { 868af75078fSIntel struct rte_mbuf *pkts_burst[MAX_PKT_BURST]; 869af75078fSIntel portid_t rxp; 8707741e4cfSIntel portid_t port_id; 871af75078fSIntel queueid_t rxq; 872af75078fSIntel uint16_t nb_rx; 873af75078fSIntel uint16_t i; 874af75078fSIntel uint8_t j; 875af75078fSIntel 876af75078fSIntel for (j = 0; j < 2; j++) { 8777741e4cfSIntel for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) { 878af75078fSIntel for (rxq = 0; rxq < nb_rxq; rxq++) { 8797741e4cfSIntel port_id = fwd_ports_ids[rxp]; 880af75078fSIntel do { 8817741e4cfSIntel nb_rx = rte_eth_rx_burst(port_id, rxq, 882013af9b6SIntel pkts_burst, MAX_PKT_BURST); 883af75078fSIntel for (i = 0; i < nb_rx; i++) 884af75078fSIntel rte_pktmbuf_free(pkts_burst[i]); 885af75078fSIntel } while (nb_rx > 0); 886af75078fSIntel } 887af75078fSIntel } 888af75078fSIntel rte_delay_ms(10); /* wait 10 milli-seconds before retrying */ 889af75078fSIntel } 890af75078fSIntel } 891af75078fSIntel 892af75078fSIntel static void 893af75078fSIntel run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd) 894af75078fSIntel { 895af75078fSIntel struct fwd_stream **fsm; 896af75078fSIntel streamid_t nb_fs; 897af75078fSIntel streamid_t sm_id; 898af75078fSIntel 899af75078fSIntel fsm = &fwd_streams[fc->stream_idx]; 900af75078fSIntel nb_fs = fc->stream_nb; 901af75078fSIntel do { 902af75078fSIntel for (sm_id = 0; sm_id < nb_fs; sm_id++) 903af75078fSIntel (*pkt_fwd)(fsm[sm_id]); 904af75078fSIntel } while (! fc->stopped); 905af75078fSIntel } 906af75078fSIntel 907af75078fSIntel static int 908af75078fSIntel start_pkt_forward_on_core(void *fwd_arg) 909af75078fSIntel { 910af75078fSIntel run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg, 911af75078fSIntel cur_fwd_config.fwd_eng->packet_fwd); 912af75078fSIntel return 0; 913af75078fSIntel } 914af75078fSIntel 915af75078fSIntel /* 916af75078fSIntel * Run the TXONLY packet forwarding engine to send a single burst of packets. 917af75078fSIntel * Used to start communication flows in network loopback test configurations. 918af75078fSIntel */ 919af75078fSIntel static int 920af75078fSIntel run_one_txonly_burst_on_core(void *fwd_arg) 921af75078fSIntel { 922af75078fSIntel struct fwd_lcore *fwd_lc; 923af75078fSIntel struct fwd_lcore tmp_lcore; 924af75078fSIntel 925af75078fSIntel fwd_lc = (struct fwd_lcore *) fwd_arg; 926af75078fSIntel tmp_lcore = *fwd_lc; 927af75078fSIntel tmp_lcore.stopped = 1; 928af75078fSIntel run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd); 929af75078fSIntel return 0; 930af75078fSIntel } 931af75078fSIntel 932af75078fSIntel /* 933af75078fSIntel * Launch packet forwarding: 934af75078fSIntel * - Setup per-port forwarding context. 935af75078fSIntel * - launch logical cores with their forwarding configuration. 936af75078fSIntel */ 937af75078fSIntel static void 938af75078fSIntel launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore) 939af75078fSIntel { 940af75078fSIntel port_fwd_begin_t port_fwd_begin; 941af75078fSIntel unsigned int i; 942af75078fSIntel unsigned int lc_id; 943af75078fSIntel int diag; 944af75078fSIntel 945af75078fSIntel port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin; 946af75078fSIntel if (port_fwd_begin != NULL) { 947af75078fSIntel for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) 948af75078fSIntel (*port_fwd_begin)(fwd_ports_ids[i]); 949af75078fSIntel } 950af75078fSIntel for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) { 951af75078fSIntel lc_id = fwd_lcores_cpuids[i]; 952af75078fSIntel if ((interactive == 0) || (lc_id != rte_lcore_id())) { 953af75078fSIntel fwd_lcores[i]->stopped = 0; 954af75078fSIntel diag = rte_eal_remote_launch(pkt_fwd_on_lcore, 955af75078fSIntel fwd_lcores[i], lc_id); 956af75078fSIntel if (diag != 0) 957af75078fSIntel printf("launch lcore %u failed - diag=%d\n", 958af75078fSIntel lc_id, diag); 959af75078fSIntel } 960af75078fSIntel } 961af75078fSIntel } 962af75078fSIntel 963af75078fSIntel /* 964af75078fSIntel * Launch packet forwarding configuration. 965af75078fSIntel */ 966af75078fSIntel void 967af75078fSIntel start_packet_forwarding(int with_tx_first) 968af75078fSIntel { 969af75078fSIntel port_fwd_begin_t port_fwd_begin; 970af75078fSIntel port_fwd_end_t port_fwd_end; 971af75078fSIntel struct rte_port *port; 972af75078fSIntel unsigned int i; 973af75078fSIntel portid_t pt_id; 974af75078fSIntel streamid_t sm_id; 975af75078fSIntel 9765a8fb55cSReshma Pattan if (strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") == 0 && !nb_rxq) 9775a8fb55cSReshma Pattan rte_exit(EXIT_FAILURE, "rxq are 0, cannot use rxonly fwd mode\n"); 9785a8fb55cSReshma Pattan 9795a8fb55cSReshma Pattan if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 && !nb_txq) 9805a8fb55cSReshma Pattan rte_exit(EXIT_FAILURE, "txq are 0, cannot use txonly fwd mode\n"); 9815a8fb55cSReshma Pattan 9825a8fb55cSReshma Pattan if ((strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") != 0 && 9835a8fb55cSReshma Pattan strcmp(cur_fwd_eng->fwd_mode_name, "txonly") != 0) && 9845a8fb55cSReshma Pattan (!nb_rxq || !nb_txq)) 9855a8fb55cSReshma Pattan rte_exit(EXIT_FAILURE, 9865a8fb55cSReshma Pattan "Either rxq or txq are 0, cannot use %s fwd mode\n", 9875a8fb55cSReshma Pattan cur_fwd_eng->fwd_mode_name); 9885a8fb55cSReshma Pattan 989ce8d5614SIntel if (all_ports_started() == 0) { 990ce8d5614SIntel printf("Not all ports were started\n"); 991ce8d5614SIntel return; 992ce8d5614SIntel } 993af75078fSIntel if (test_done == 0) { 994af75078fSIntel printf("Packet forwarding already started\n"); 995af75078fSIntel return; 996af75078fSIntel } 997*edf87b4aSBernard Iremonger 998*edf87b4aSBernard Iremonger if (init_fwd_streams() < 0) { 999*edf87b4aSBernard Iremonger printf("Fail from init_fwd_streams()\n"); 1000*edf87b4aSBernard Iremonger return; 1001*edf87b4aSBernard Iremonger } 1002*edf87b4aSBernard Iremonger 10037741e4cfSIntel if(dcb_test) { 10047741e4cfSIntel for (i = 0; i < nb_fwd_ports; i++) { 10057741e4cfSIntel pt_id = fwd_ports_ids[i]; 10067741e4cfSIntel port = &ports[pt_id]; 10077741e4cfSIntel if (!port->dcb_flag) { 10087741e4cfSIntel printf("In DCB mode, all forwarding ports must " 10097741e4cfSIntel "be configured in this mode.\n"); 1010013af9b6SIntel return; 1011013af9b6SIntel } 10127741e4cfSIntel } 10137741e4cfSIntel if (nb_fwd_lcores == 1) { 10147741e4cfSIntel printf("In DCB mode,the nb forwarding cores " 10157741e4cfSIntel "should be larger than 1.\n"); 10167741e4cfSIntel return; 10177741e4cfSIntel } 10187741e4cfSIntel } 1019af75078fSIntel test_done = 0; 10207741e4cfSIntel 10217741e4cfSIntel if(!no_flush_rx) 10227741e4cfSIntel flush_fwd_rx_queues(); 10237741e4cfSIntel 1024af75078fSIntel fwd_config_setup(); 1025af75078fSIntel rxtx_config_display(); 1026af75078fSIntel 1027af75078fSIntel for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) { 1028af75078fSIntel pt_id = fwd_ports_ids[i]; 1029af75078fSIntel port = &ports[pt_id]; 1030af75078fSIntel rte_eth_stats_get(pt_id, &port->stats); 1031af75078fSIntel port->tx_dropped = 0; 1032013af9b6SIntel 1033013af9b6SIntel map_port_queue_stats_mapping_registers(pt_id, port); 1034af75078fSIntel } 1035af75078fSIntel for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) { 1036af75078fSIntel fwd_streams[sm_id]->rx_packets = 0; 1037af75078fSIntel fwd_streams[sm_id]->tx_packets = 0; 1038af75078fSIntel fwd_streams[sm_id]->fwd_dropped = 0; 1039af75078fSIntel fwd_streams[sm_id]->rx_bad_ip_csum = 0; 1040af75078fSIntel fwd_streams[sm_id]->rx_bad_l4_csum = 0; 1041af75078fSIntel 1042af75078fSIntel #ifdef RTE_TEST_PMD_RECORD_BURST_STATS 1043af75078fSIntel memset(&fwd_streams[sm_id]->rx_burst_stats, 0, 1044af75078fSIntel sizeof(fwd_streams[sm_id]->rx_burst_stats)); 1045af75078fSIntel memset(&fwd_streams[sm_id]->tx_burst_stats, 0, 1046af75078fSIntel sizeof(fwd_streams[sm_id]->tx_burst_stats)); 1047af75078fSIntel #endif 1048af75078fSIntel #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES 1049af75078fSIntel fwd_streams[sm_id]->core_cycles = 0; 1050af75078fSIntel #endif 1051af75078fSIntel } 1052af75078fSIntel if (with_tx_first) { 1053af75078fSIntel port_fwd_begin = tx_only_engine.port_fwd_begin; 1054af75078fSIntel if (port_fwd_begin != NULL) { 1055af75078fSIntel for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) 1056af75078fSIntel (*port_fwd_begin)(fwd_ports_ids[i]); 1057af75078fSIntel } 1058af75078fSIntel launch_packet_forwarding(run_one_txonly_burst_on_core); 1059af75078fSIntel rte_eal_mp_wait_lcore(); 1060af75078fSIntel port_fwd_end = tx_only_engine.port_fwd_end; 1061af75078fSIntel if (port_fwd_end != NULL) { 1062af75078fSIntel for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) 1063af75078fSIntel (*port_fwd_end)(fwd_ports_ids[i]); 1064af75078fSIntel } 1065af75078fSIntel } 1066af75078fSIntel launch_packet_forwarding(start_pkt_forward_on_core); 1067af75078fSIntel } 1068af75078fSIntel 1069af75078fSIntel void 1070af75078fSIntel stop_packet_forwarding(void) 1071af75078fSIntel { 1072af75078fSIntel struct rte_eth_stats stats; 1073af75078fSIntel struct rte_port *port; 1074af75078fSIntel port_fwd_end_t port_fwd_end; 1075af75078fSIntel int i; 1076af75078fSIntel portid_t pt_id; 1077af75078fSIntel streamid_t sm_id; 1078af75078fSIntel lcoreid_t lc_id; 1079af75078fSIntel uint64_t total_recv; 1080af75078fSIntel uint64_t total_xmit; 1081af75078fSIntel uint64_t total_rx_dropped; 1082af75078fSIntel uint64_t total_tx_dropped; 1083af75078fSIntel uint64_t total_rx_nombuf; 1084af75078fSIntel uint64_t tx_dropped; 1085af75078fSIntel uint64_t rx_bad_ip_csum; 1086af75078fSIntel uint64_t rx_bad_l4_csum; 1087af75078fSIntel #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES 1088af75078fSIntel uint64_t fwd_cycles; 1089af75078fSIntel #endif 1090af75078fSIntel static const char *acc_stats_border = "+++++++++++++++"; 1091af75078fSIntel 1092ce8d5614SIntel if (all_ports_started() == 0) { 1093ce8d5614SIntel printf("Not all ports were started\n"); 1094ce8d5614SIntel return; 1095ce8d5614SIntel } 1096af75078fSIntel if (test_done) { 1097af75078fSIntel printf("Packet forwarding not started\n"); 1098af75078fSIntel return; 1099af75078fSIntel } 1100af75078fSIntel printf("Telling cores to stop..."); 1101af75078fSIntel for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) 1102af75078fSIntel fwd_lcores[lc_id]->stopped = 1; 1103af75078fSIntel printf("\nWaiting for lcores to finish...\n"); 1104af75078fSIntel rte_eal_mp_wait_lcore(); 1105af75078fSIntel port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end; 1106af75078fSIntel if (port_fwd_end != NULL) { 1107af75078fSIntel for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) { 1108af75078fSIntel pt_id = fwd_ports_ids[i]; 1109af75078fSIntel (*port_fwd_end)(pt_id); 1110af75078fSIntel } 1111af75078fSIntel } 1112af75078fSIntel #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES 1113af75078fSIntel fwd_cycles = 0; 1114af75078fSIntel #endif 1115af75078fSIntel for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) { 1116af75078fSIntel if (cur_fwd_config.nb_fwd_streams > 1117af75078fSIntel cur_fwd_config.nb_fwd_ports) { 1118af75078fSIntel fwd_stream_stats_display(sm_id); 1119af75078fSIntel ports[fwd_streams[sm_id]->tx_port].tx_stream = NULL; 1120af75078fSIntel ports[fwd_streams[sm_id]->rx_port].rx_stream = NULL; 1121af75078fSIntel } else { 1122af75078fSIntel ports[fwd_streams[sm_id]->tx_port].tx_stream = 1123af75078fSIntel fwd_streams[sm_id]; 1124af75078fSIntel ports[fwd_streams[sm_id]->rx_port].rx_stream = 1125af75078fSIntel fwd_streams[sm_id]; 1126af75078fSIntel } 1127af75078fSIntel tx_dropped = ports[fwd_streams[sm_id]->tx_port].tx_dropped; 1128af75078fSIntel tx_dropped = (uint64_t) (tx_dropped + 1129af75078fSIntel fwd_streams[sm_id]->fwd_dropped); 1130af75078fSIntel ports[fwd_streams[sm_id]->tx_port].tx_dropped = tx_dropped; 1131af75078fSIntel 1132013af9b6SIntel rx_bad_ip_csum = 1133013af9b6SIntel ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum; 1134af75078fSIntel rx_bad_ip_csum = (uint64_t) (rx_bad_ip_csum + 1135af75078fSIntel fwd_streams[sm_id]->rx_bad_ip_csum); 1136013af9b6SIntel ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum = 1137013af9b6SIntel rx_bad_ip_csum; 1138af75078fSIntel 1139013af9b6SIntel rx_bad_l4_csum = 1140013af9b6SIntel ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum; 1141af75078fSIntel rx_bad_l4_csum = (uint64_t) (rx_bad_l4_csum + 1142af75078fSIntel fwd_streams[sm_id]->rx_bad_l4_csum); 1143013af9b6SIntel ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum = 1144013af9b6SIntel rx_bad_l4_csum; 1145af75078fSIntel 1146af75078fSIntel #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES 1147af75078fSIntel fwd_cycles = (uint64_t) (fwd_cycles + 1148af75078fSIntel fwd_streams[sm_id]->core_cycles); 1149af75078fSIntel #endif 1150af75078fSIntel } 1151af75078fSIntel total_recv = 0; 1152af75078fSIntel total_xmit = 0; 1153af75078fSIntel total_rx_dropped = 0; 1154af75078fSIntel total_tx_dropped = 0; 1155af75078fSIntel total_rx_nombuf = 0; 11567741e4cfSIntel for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) { 1157af75078fSIntel pt_id = fwd_ports_ids[i]; 1158af75078fSIntel 1159af75078fSIntel port = &ports[pt_id]; 1160af75078fSIntel rte_eth_stats_get(pt_id, &stats); 1161af75078fSIntel stats.ipackets -= port->stats.ipackets; 1162af75078fSIntel port->stats.ipackets = 0; 1163af75078fSIntel stats.opackets -= port->stats.opackets; 1164af75078fSIntel port->stats.opackets = 0; 1165af75078fSIntel stats.ibytes -= port->stats.ibytes; 1166af75078fSIntel port->stats.ibytes = 0; 1167af75078fSIntel stats.obytes -= port->stats.obytes; 1168af75078fSIntel port->stats.obytes = 0; 116970bdb186SIvan Boule stats.imissed -= port->stats.imissed; 117070bdb186SIvan Boule port->stats.imissed = 0; 1171af75078fSIntel stats.oerrors -= port->stats.oerrors; 1172af75078fSIntel port->stats.oerrors = 0; 1173af75078fSIntel stats.rx_nombuf -= port->stats.rx_nombuf; 1174af75078fSIntel port->stats.rx_nombuf = 0; 1175af75078fSIntel 1176af75078fSIntel total_recv += stats.ipackets; 1177af75078fSIntel total_xmit += stats.opackets; 117870bdb186SIvan Boule total_rx_dropped += stats.imissed; 1179af75078fSIntel total_tx_dropped += port->tx_dropped; 1180af75078fSIntel total_rx_nombuf += stats.rx_nombuf; 1181af75078fSIntel 1182af75078fSIntel fwd_port_stats_display(pt_id, &stats); 1183af75078fSIntel } 1184af75078fSIntel printf("\n %s Accumulated forward statistics for all ports" 1185af75078fSIntel "%s\n", 1186af75078fSIntel acc_stats_border, acc_stats_border); 1187af75078fSIntel printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: " 1188af75078fSIntel "%-"PRIu64"\n" 1189af75078fSIntel " TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: " 1190af75078fSIntel "%-"PRIu64"\n", 1191af75078fSIntel total_recv, total_rx_dropped, total_recv + total_rx_dropped, 1192af75078fSIntel total_xmit, total_tx_dropped, total_xmit + total_tx_dropped); 1193af75078fSIntel if (total_rx_nombuf > 0) 1194af75078fSIntel printf(" RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf); 1195af75078fSIntel printf(" %s++++++++++++++++++++++++++++++++++++++++++++++" 1196af75078fSIntel "%s\n", 1197af75078fSIntel acc_stats_border, acc_stats_border); 1198af75078fSIntel #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES 1199af75078fSIntel if (total_recv > 0) 1200af75078fSIntel printf("\n CPU cycles/packet=%u (total cycles=" 1201af75078fSIntel "%"PRIu64" / total RX packets=%"PRIu64")\n", 1202af75078fSIntel (unsigned int)(fwd_cycles / total_recv), 1203af75078fSIntel fwd_cycles, total_recv); 1204af75078fSIntel #endif 1205af75078fSIntel printf("\nDone.\n"); 1206af75078fSIntel test_done = 1; 1207af75078fSIntel } 1208af75078fSIntel 1209cfae07fdSOuyang Changchun void 1210cfae07fdSOuyang Changchun dev_set_link_up(portid_t pid) 1211cfae07fdSOuyang Changchun { 1212cfae07fdSOuyang Changchun if (rte_eth_dev_set_link_up((uint8_t)pid) < 0) 1213cfae07fdSOuyang Changchun printf("\nSet link up fail.\n"); 1214cfae07fdSOuyang Changchun } 1215cfae07fdSOuyang Changchun 1216cfae07fdSOuyang Changchun void 1217cfae07fdSOuyang Changchun dev_set_link_down(portid_t pid) 1218cfae07fdSOuyang Changchun { 1219cfae07fdSOuyang Changchun if (rte_eth_dev_set_link_down((uint8_t)pid) < 0) 1220cfae07fdSOuyang Changchun printf("\nSet link down fail.\n"); 1221cfae07fdSOuyang Changchun } 1222cfae07fdSOuyang Changchun 1223ce8d5614SIntel static int 1224ce8d5614SIntel all_ports_started(void) 1225ce8d5614SIntel { 1226ce8d5614SIntel portid_t pi; 1227ce8d5614SIntel struct rte_port *port; 1228ce8d5614SIntel 1229edab33b1STetsuya Mukawa FOREACH_PORT(pi, ports) { 1230ce8d5614SIntel port = &ports[pi]; 1231ce8d5614SIntel /* Check if there is a port which is not started */ 123241b05095SBernard Iremonger if ((port->port_status != RTE_PORT_STARTED) && 123341b05095SBernard Iremonger (port->slave_flag == 0)) 1234ce8d5614SIntel return 0; 1235ce8d5614SIntel } 1236ce8d5614SIntel 1237ce8d5614SIntel /* No port is not started */ 1238ce8d5614SIntel return 1; 1239ce8d5614SIntel } 1240ce8d5614SIntel 1241148f963fSBruce Richardson int 1242edab33b1STetsuya Mukawa all_ports_stopped(void) 1243edab33b1STetsuya Mukawa { 1244edab33b1STetsuya Mukawa portid_t pi; 1245edab33b1STetsuya Mukawa struct rte_port *port; 1246edab33b1STetsuya Mukawa 1247edab33b1STetsuya Mukawa FOREACH_PORT(pi, ports) { 1248edab33b1STetsuya Mukawa port = &ports[pi]; 124941b05095SBernard Iremonger if ((port->port_status != RTE_PORT_STOPPED) && 125041b05095SBernard Iremonger (port->slave_flag == 0)) 1251edab33b1STetsuya Mukawa return 0; 1252edab33b1STetsuya Mukawa } 1253edab33b1STetsuya Mukawa 1254edab33b1STetsuya Mukawa return 1; 1255edab33b1STetsuya Mukawa } 1256edab33b1STetsuya Mukawa 1257edab33b1STetsuya Mukawa int 1258edab33b1STetsuya Mukawa port_is_started(portid_t port_id) 1259edab33b1STetsuya Mukawa { 1260edab33b1STetsuya Mukawa if (port_id_is_invalid(port_id, ENABLED_WARN)) 1261edab33b1STetsuya Mukawa return 0; 1262edab33b1STetsuya Mukawa 1263edab33b1STetsuya Mukawa if (ports[port_id].port_status != RTE_PORT_STARTED) 1264edab33b1STetsuya Mukawa return 0; 1265edab33b1STetsuya Mukawa 1266edab33b1STetsuya Mukawa return 1; 1267edab33b1STetsuya Mukawa } 1268edab33b1STetsuya Mukawa 1269edab33b1STetsuya Mukawa static int 1270edab33b1STetsuya Mukawa port_is_closed(portid_t port_id) 1271edab33b1STetsuya Mukawa { 1272edab33b1STetsuya Mukawa if (port_id_is_invalid(port_id, ENABLED_WARN)) 1273edab33b1STetsuya Mukawa return 0; 1274edab33b1STetsuya Mukawa 1275edab33b1STetsuya Mukawa if (ports[port_id].port_status != RTE_PORT_CLOSED) 1276edab33b1STetsuya Mukawa return 0; 1277edab33b1STetsuya Mukawa 1278edab33b1STetsuya Mukawa return 1; 1279edab33b1STetsuya Mukawa } 1280edab33b1STetsuya Mukawa 1281edab33b1STetsuya Mukawa int 1282ce8d5614SIntel start_port(portid_t pid) 1283ce8d5614SIntel { 128492d2703eSMichael Qiu int diag, need_check_link_status = -1; 1285ce8d5614SIntel portid_t pi; 1286ce8d5614SIntel queueid_t qi; 1287ce8d5614SIntel struct rte_port *port; 12882950a769SDeclan Doherty struct ether_addr mac_addr; 1289ce8d5614SIntel 12904468635fSMichael Qiu if (port_id_is_invalid(pid, ENABLED_WARN)) 12914468635fSMichael Qiu return 0; 12924468635fSMichael Qiu 1293ce8d5614SIntel if(dcb_config) 1294ce8d5614SIntel dcb_test = 1; 1295edab33b1STetsuya Mukawa FOREACH_PORT(pi, ports) { 1296edab33b1STetsuya Mukawa if (pid != pi && pid != (portid_t)RTE_PORT_ALL) 1297ce8d5614SIntel continue; 1298ce8d5614SIntel 129992d2703eSMichael Qiu need_check_link_status = 0; 1300ce8d5614SIntel port = &ports[pi]; 1301ce8d5614SIntel if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED, 1302ce8d5614SIntel RTE_PORT_HANDLING) == 0) { 1303ce8d5614SIntel printf("Port %d is now not stopped\n", pi); 1304ce8d5614SIntel continue; 1305ce8d5614SIntel } 1306ce8d5614SIntel 1307ce8d5614SIntel if (port->need_reconfig > 0) { 1308ce8d5614SIntel port->need_reconfig = 0; 1309ce8d5614SIntel 13105706de65SJulien Cretin printf("Configuring Port %d (socket %u)\n", pi, 131120a0286fSLiu Xiaofeng port->socket_id); 1312ce8d5614SIntel /* configure port */ 1313ce8d5614SIntel diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq, 1314ce8d5614SIntel &(port->dev_conf)); 1315ce8d5614SIntel if (diag != 0) { 1316ce8d5614SIntel if (rte_atomic16_cmpset(&(port->port_status), 1317ce8d5614SIntel RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0) 1318ce8d5614SIntel printf("Port %d can not be set back " 1319ce8d5614SIntel "to stopped\n", pi); 1320ce8d5614SIntel printf("Fail to configure port %d\n", pi); 1321ce8d5614SIntel /* try to reconfigure port next time */ 1322ce8d5614SIntel port->need_reconfig = 1; 1323148f963fSBruce Richardson return -1; 1324ce8d5614SIntel } 1325ce8d5614SIntel } 1326ce8d5614SIntel if (port->need_reconfig_queues > 0) { 1327ce8d5614SIntel port->need_reconfig_queues = 0; 1328ce8d5614SIntel /* setup tx queues */ 1329ce8d5614SIntel for (qi = 0; qi < nb_txq; qi++) { 1330b6ea6408SIntel if ((numa_support) && 1331b6ea6408SIntel (txring_numa[pi] != NUMA_NO_CONFIG)) 1332b6ea6408SIntel diag = rte_eth_tx_queue_setup(pi, qi, 1333b6ea6408SIntel nb_txd,txring_numa[pi], 1334b6ea6408SIntel &(port->tx_conf)); 1335b6ea6408SIntel else 1336b6ea6408SIntel diag = rte_eth_tx_queue_setup(pi, qi, 1337b6ea6408SIntel nb_txd,port->socket_id, 1338b6ea6408SIntel &(port->tx_conf)); 1339b6ea6408SIntel 1340ce8d5614SIntel if (diag == 0) 1341ce8d5614SIntel continue; 1342ce8d5614SIntel 1343ce8d5614SIntel /* Fail to setup tx queue, return */ 1344ce8d5614SIntel if (rte_atomic16_cmpset(&(port->port_status), 1345ce8d5614SIntel RTE_PORT_HANDLING, 1346ce8d5614SIntel RTE_PORT_STOPPED) == 0) 1347ce8d5614SIntel printf("Port %d can not be set back " 1348ce8d5614SIntel "to stopped\n", pi); 1349ce8d5614SIntel printf("Fail to configure port %d tx queues\n", pi); 1350ce8d5614SIntel /* try to reconfigure queues next time */ 1351ce8d5614SIntel port->need_reconfig_queues = 1; 1352148f963fSBruce Richardson return -1; 1353ce8d5614SIntel } 1354ce8d5614SIntel /* setup rx queues */ 1355ce8d5614SIntel for (qi = 0; qi < nb_rxq; qi++) { 1356b6ea6408SIntel if ((numa_support) && 1357b6ea6408SIntel (rxring_numa[pi] != NUMA_NO_CONFIG)) { 1358b6ea6408SIntel struct rte_mempool * mp = 1359b6ea6408SIntel mbuf_pool_find(rxring_numa[pi]); 1360b6ea6408SIntel if (mp == NULL) { 1361b6ea6408SIntel printf("Failed to setup RX queue:" 1362b6ea6408SIntel "No mempool allocation" 1363b6ea6408SIntel "on the socket %d\n", 1364b6ea6408SIntel rxring_numa[pi]); 1365148f963fSBruce Richardson return -1; 1366b6ea6408SIntel } 1367b6ea6408SIntel 1368b6ea6408SIntel diag = rte_eth_rx_queue_setup(pi, qi, 1369b6ea6408SIntel nb_rxd,rxring_numa[pi], 1370b6ea6408SIntel &(port->rx_conf),mp); 1371b6ea6408SIntel } 1372b6ea6408SIntel else 1373b6ea6408SIntel diag = rte_eth_rx_queue_setup(pi, qi, 1374b6ea6408SIntel nb_rxd,port->socket_id, 1375b6ea6408SIntel &(port->rx_conf), 1376ce8d5614SIntel mbuf_pool_find(port->socket_id)); 1377b6ea6408SIntel 1378ce8d5614SIntel if (diag == 0) 1379ce8d5614SIntel continue; 1380ce8d5614SIntel 1381b6ea6408SIntel 1382ce8d5614SIntel /* Fail to setup rx queue, return */ 1383ce8d5614SIntel if (rte_atomic16_cmpset(&(port->port_status), 1384ce8d5614SIntel RTE_PORT_HANDLING, 1385ce8d5614SIntel RTE_PORT_STOPPED) == 0) 1386ce8d5614SIntel printf("Port %d can not be set back " 1387ce8d5614SIntel "to stopped\n", pi); 1388ce8d5614SIntel printf("Fail to configure port %d rx queues\n", pi); 1389ce8d5614SIntel /* try to reconfigure queues next time */ 1390ce8d5614SIntel port->need_reconfig_queues = 1; 1391148f963fSBruce Richardson return -1; 1392ce8d5614SIntel } 1393ce8d5614SIntel } 1394ce8d5614SIntel /* start port */ 1395ce8d5614SIntel if (rte_eth_dev_start(pi) < 0) { 1396ce8d5614SIntel printf("Fail to start port %d\n", pi); 1397ce8d5614SIntel 1398ce8d5614SIntel /* Fail to setup rx queue, return */ 1399ce8d5614SIntel if (rte_atomic16_cmpset(&(port->port_status), 1400ce8d5614SIntel RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0) 1401ce8d5614SIntel printf("Port %d can not be set back to " 1402ce8d5614SIntel "stopped\n", pi); 1403ce8d5614SIntel continue; 1404ce8d5614SIntel } 1405ce8d5614SIntel 1406ce8d5614SIntel if (rte_atomic16_cmpset(&(port->port_status), 1407ce8d5614SIntel RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0) 1408ce8d5614SIntel printf("Port %d can not be set into started\n", pi); 1409ce8d5614SIntel 14102950a769SDeclan Doherty rte_eth_macaddr_get(pi, &mac_addr); 1411d8c89163SZijie Pan printf("Port %d: %02X:%02X:%02X:%02X:%02X:%02X\n", pi, 14122950a769SDeclan Doherty mac_addr.addr_bytes[0], mac_addr.addr_bytes[1], 14132950a769SDeclan Doherty mac_addr.addr_bytes[2], mac_addr.addr_bytes[3], 14142950a769SDeclan Doherty mac_addr.addr_bytes[4], mac_addr.addr_bytes[5]); 1415d8c89163SZijie Pan 1416ce8d5614SIntel /* at least one port started, need checking link status */ 1417ce8d5614SIntel need_check_link_status = 1; 1418ce8d5614SIntel } 1419ce8d5614SIntel 142092d2703eSMichael Qiu if (need_check_link_status == 1 && !no_link_check) 1421edab33b1STetsuya Mukawa check_all_ports_link_status(RTE_PORT_ALL); 142292d2703eSMichael Qiu else if (need_check_link_status == 0) 1423ce8d5614SIntel printf("Please stop the ports first\n"); 1424ce8d5614SIntel 1425ce8d5614SIntel printf("Done\n"); 1426148f963fSBruce Richardson return 0; 1427ce8d5614SIntel } 1428ce8d5614SIntel 1429ce8d5614SIntel void 1430ce8d5614SIntel stop_port(portid_t pid) 1431ce8d5614SIntel { 1432ce8d5614SIntel portid_t pi; 1433ce8d5614SIntel struct rte_port *port; 1434ce8d5614SIntel int need_check_link_status = 0; 1435ce8d5614SIntel 1436ce8d5614SIntel if (dcb_test) { 1437ce8d5614SIntel dcb_test = 0; 1438ce8d5614SIntel dcb_config = 0; 1439ce8d5614SIntel } 14404468635fSMichael Qiu 14414468635fSMichael Qiu if (port_id_is_invalid(pid, ENABLED_WARN)) 14424468635fSMichael Qiu return; 14434468635fSMichael Qiu 1444ce8d5614SIntel printf("Stopping ports...\n"); 1445ce8d5614SIntel 1446edab33b1STetsuya Mukawa FOREACH_PORT(pi, ports) { 14474468635fSMichael Qiu if (pid != pi && pid != (portid_t)RTE_PORT_ALL) 1448ce8d5614SIntel continue; 1449ce8d5614SIntel 1450a8ef3e3aSBernard Iremonger if (port_is_forwarding(pi) != 0 && test_done == 0) { 1451a8ef3e3aSBernard Iremonger printf("Please remove port %d from forwarding configuration.\n", pi); 1452a8ef3e3aSBernard Iremonger continue; 1453a8ef3e3aSBernard Iremonger } 1454a8ef3e3aSBernard Iremonger 14550e545d30SBernard Iremonger if (port_is_bonding_slave(pi)) { 14560e545d30SBernard Iremonger printf("Please remove port %d from bonded device.\n", pi); 14570e545d30SBernard Iremonger continue; 14580e545d30SBernard Iremonger } 14590e545d30SBernard Iremonger 1460ce8d5614SIntel port = &ports[pi]; 1461ce8d5614SIntel if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED, 1462ce8d5614SIntel RTE_PORT_HANDLING) == 0) 1463ce8d5614SIntel continue; 1464ce8d5614SIntel 1465ce8d5614SIntel rte_eth_dev_stop(pi); 1466ce8d5614SIntel 1467ce8d5614SIntel if (rte_atomic16_cmpset(&(port->port_status), 1468ce8d5614SIntel RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0) 1469ce8d5614SIntel printf("Port %d can not be set into stopped\n", pi); 1470ce8d5614SIntel need_check_link_status = 1; 1471ce8d5614SIntel } 1472bc202406SDavid Marchand if (need_check_link_status && !no_link_check) 1473edab33b1STetsuya Mukawa check_all_ports_link_status(RTE_PORT_ALL); 1474ce8d5614SIntel 1475ce8d5614SIntel printf("Done\n"); 1476ce8d5614SIntel } 1477ce8d5614SIntel 1478ce8d5614SIntel void 1479ce8d5614SIntel close_port(portid_t pid) 1480ce8d5614SIntel { 1481ce8d5614SIntel portid_t pi; 1482ce8d5614SIntel struct rte_port *port; 1483ce8d5614SIntel 14844468635fSMichael Qiu if (port_id_is_invalid(pid, ENABLED_WARN)) 14854468635fSMichael Qiu return; 14864468635fSMichael Qiu 1487ce8d5614SIntel printf("Closing ports...\n"); 1488ce8d5614SIntel 1489edab33b1STetsuya Mukawa FOREACH_PORT(pi, ports) { 14904468635fSMichael Qiu if (pid != pi && pid != (portid_t)RTE_PORT_ALL) 1491ce8d5614SIntel continue; 1492ce8d5614SIntel 1493a8ef3e3aSBernard Iremonger if (port_is_forwarding(pi) != 0 && test_done == 0) { 1494a8ef3e3aSBernard Iremonger printf("Please remove port %d from forwarding configuration.\n", pi); 1495a8ef3e3aSBernard Iremonger continue; 1496a8ef3e3aSBernard Iremonger } 1497a8ef3e3aSBernard Iremonger 14980e545d30SBernard Iremonger if (port_is_bonding_slave(pi)) { 14990e545d30SBernard Iremonger printf("Please remove port %d from bonded device.\n", pi); 15000e545d30SBernard Iremonger continue; 15010e545d30SBernard Iremonger } 15020e545d30SBernard Iremonger 1503ce8d5614SIntel port = &ports[pi]; 1504ce8d5614SIntel if (rte_atomic16_cmpset(&(port->port_status), 1505d4e8ad64SMichael Qiu RTE_PORT_CLOSED, RTE_PORT_CLOSED) == 1) { 1506d4e8ad64SMichael Qiu printf("Port %d is already closed\n", pi); 1507d4e8ad64SMichael Qiu continue; 1508d4e8ad64SMichael Qiu } 1509d4e8ad64SMichael Qiu 1510d4e8ad64SMichael Qiu if (rte_atomic16_cmpset(&(port->port_status), 1511ce8d5614SIntel RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) { 1512ce8d5614SIntel printf("Port %d is now not stopped\n", pi); 1513ce8d5614SIntel continue; 1514ce8d5614SIntel } 1515ce8d5614SIntel 1516ce8d5614SIntel rte_eth_dev_close(pi); 1517ce8d5614SIntel 1518ce8d5614SIntel if (rte_atomic16_cmpset(&(port->port_status), 1519ce8d5614SIntel RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0) 1520b38bb262SPablo de Lara printf("Port %d cannot be set to closed\n", pi); 1521ce8d5614SIntel } 1522ce8d5614SIntel 1523ce8d5614SIntel printf("Done\n"); 1524ce8d5614SIntel } 1525ce8d5614SIntel 1526edab33b1STetsuya Mukawa void 1527edab33b1STetsuya Mukawa attach_port(char *identifier) 1528ce8d5614SIntel { 1529ebf5e9b7SBernard Iremonger portid_t pi = 0; 1530ce8d5614SIntel 1531edab33b1STetsuya Mukawa printf("Attaching a new port...\n"); 1532edab33b1STetsuya Mukawa 1533edab33b1STetsuya Mukawa if (identifier == NULL) { 1534edab33b1STetsuya Mukawa printf("Invalid parameters are specified\n"); 1535edab33b1STetsuya Mukawa return; 1536ce8d5614SIntel } 1537ce8d5614SIntel 1538edab33b1STetsuya Mukawa if (rte_eth_dev_attach(identifier, &pi)) 1539edab33b1STetsuya Mukawa return; 1540edab33b1STetsuya Mukawa 1541edab33b1STetsuya Mukawa ports[pi].enabled = 1; 1542edab33b1STetsuya Mukawa reconfig(pi, rte_eth_dev_socket_id(pi)); 1543edab33b1STetsuya Mukawa rte_eth_promiscuous_enable(pi); 1544edab33b1STetsuya Mukawa 1545edab33b1STetsuya Mukawa nb_ports = rte_eth_dev_count(); 1546edab33b1STetsuya Mukawa 1547edab33b1STetsuya Mukawa ports[pi].port_status = RTE_PORT_STOPPED; 1548edab33b1STetsuya Mukawa 1549edab33b1STetsuya Mukawa printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports); 1550edab33b1STetsuya Mukawa printf("Done\n"); 1551edab33b1STetsuya Mukawa } 1552edab33b1STetsuya Mukawa 1553edab33b1STetsuya Mukawa void 1554edab33b1STetsuya Mukawa detach_port(uint8_t port_id) 15555f4ec54fSChen Jing D(Mark) { 1556edab33b1STetsuya Mukawa char name[RTE_ETH_NAME_MAX_LEN]; 15575f4ec54fSChen Jing D(Mark) 1558edab33b1STetsuya Mukawa printf("Detaching a port...\n"); 15595f4ec54fSChen Jing D(Mark) 1560edab33b1STetsuya Mukawa if (!port_is_closed(port_id)) { 1561edab33b1STetsuya Mukawa printf("Please close port first\n"); 1562edab33b1STetsuya Mukawa return; 1563edab33b1STetsuya Mukawa } 1564edab33b1STetsuya Mukawa 1565edab33b1STetsuya Mukawa if (rte_eth_dev_detach(port_id, name)) 1566edab33b1STetsuya Mukawa return; 1567edab33b1STetsuya Mukawa 1568edab33b1STetsuya Mukawa ports[port_id].enabled = 0; 1569edab33b1STetsuya Mukawa nb_ports = rte_eth_dev_count(); 1570edab33b1STetsuya Mukawa 1571edab33b1STetsuya Mukawa printf("Port '%s' is detached. Now total ports is %d\n", 1572edab33b1STetsuya Mukawa name, nb_ports); 1573edab33b1STetsuya Mukawa printf("Done\n"); 1574edab33b1STetsuya Mukawa return; 15755f4ec54fSChen Jing D(Mark) } 15765f4ec54fSChen Jing D(Mark) 1577af75078fSIntel void 1578af75078fSIntel pmd_test_exit(void) 1579af75078fSIntel { 1580af75078fSIntel portid_t pt_id; 1581af75078fSIntel 15828210ec25SPablo de Lara if (test_done == 0) 15838210ec25SPablo de Lara stop_packet_forwarding(); 15848210ec25SPablo de Lara 1585d3a274ceSZhihong Wang if (ports != NULL) { 1586d3a274ceSZhihong Wang no_link_check = 1; 1587edab33b1STetsuya Mukawa FOREACH_PORT(pt_id, ports) { 1588d3a274ceSZhihong Wang printf("\nShutting down port %d...\n", pt_id); 1589af75078fSIntel fflush(stdout); 1590d3a274ceSZhihong Wang stop_port(pt_id); 1591d3a274ceSZhihong Wang close_port(pt_id); 1592af75078fSIntel } 1593d3a274ceSZhihong Wang } 1594d3a274ceSZhihong Wang printf("\nBye...\n"); 1595af75078fSIntel } 1596af75078fSIntel 1597af75078fSIntel typedef void (*cmd_func_t)(void); 1598af75078fSIntel struct pmd_test_command { 1599af75078fSIntel const char *cmd_name; 1600af75078fSIntel cmd_func_t cmd_func; 1601af75078fSIntel }; 1602af75078fSIntel 1603af75078fSIntel #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0])) 1604af75078fSIntel 1605ce8d5614SIntel /* Check the link status of all ports in up to 9s, and print them finally */ 1606af75078fSIntel static void 1607edab33b1STetsuya Mukawa check_all_ports_link_status(uint32_t port_mask) 1608af75078fSIntel { 1609ce8d5614SIntel #define CHECK_INTERVAL 100 /* 100ms */ 1610ce8d5614SIntel #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */ 1611ce8d5614SIntel uint8_t portid, count, all_ports_up, print_flag = 0; 1612ce8d5614SIntel struct rte_eth_link link; 1613ce8d5614SIntel 1614ce8d5614SIntel printf("Checking link statuses...\n"); 1615ce8d5614SIntel fflush(stdout); 1616ce8d5614SIntel for (count = 0; count <= MAX_CHECK_TIME; count++) { 1617ce8d5614SIntel all_ports_up = 1; 1618edab33b1STetsuya Mukawa FOREACH_PORT(portid, ports) { 1619ce8d5614SIntel if ((port_mask & (1 << portid)) == 0) 1620ce8d5614SIntel continue; 1621ce8d5614SIntel memset(&link, 0, sizeof(link)); 1622ce8d5614SIntel rte_eth_link_get_nowait(portid, &link); 1623ce8d5614SIntel /* print link status if flag set */ 1624ce8d5614SIntel if (print_flag == 1) { 1625ce8d5614SIntel if (link.link_status) 1626ce8d5614SIntel printf("Port %d Link Up - speed %u " 1627ce8d5614SIntel "Mbps - %s\n", (uint8_t)portid, 1628ce8d5614SIntel (unsigned)link.link_speed, 1629ce8d5614SIntel (link.link_duplex == ETH_LINK_FULL_DUPLEX) ? 1630ce8d5614SIntel ("full-duplex") : ("half-duplex\n")); 1631ce8d5614SIntel else 1632ce8d5614SIntel printf("Port %d Link Down\n", 1633ce8d5614SIntel (uint8_t)portid); 1634ce8d5614SIntel continue; 1635ce8d5614SIntel } 1636ce8d5614SIntel /* clear all_ports_up flag if any link down */ 163709419f23SThomas Monjalon if (link.link_status == ETH_LINK_DOWN) { 1638ce8d5614SIntel all_ports_up = 0; 1639ce8d5614SIntel break; 1640ce8d5614SIntel } 1641ce8d5614SIntel } 1642ce8d5614SIntel /* after finally printing all link status, get out */ 1643ce8d5614SIntel if (print_flag == 1) 1644ce8d5614SIntel break; 1645ce8d5614SIntel 1646ce8d5614SIntel if (all_ports_up == 0) { 1647ce8d5614SIntel fflush(stdout); 1648ce8d5614SIntel rte_delay_ms(CHECK_INTERVAL); 1649ce8d5614SIntel } 1650ce8d5614SIntel 1651ce8d5614SIntel /* set the print_flag if all ports up or timeout */ 1652ce8d5614SIntel if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) { 1653ce8d5614SIntel print_flag = 1; 1654ce8d5614SIntel } 1655ce8d5614SIntel } 1656af75078fSIntel } 1657af75078fSIntel 1658013af9b6SIntel static int 1659013af9b6SIntel set_tx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port) 1660af75078fSIntel { 1661013af9b6SIntel uint16_t i; 1662af75078fSIntel int diag; 1663013af9b6SIntel uint8_t mapping_found = 0; 1664af75078fSIntel 1665013af9b6SIntel for (i = 0; i < nb_tx_queue_stats_mappings; i++) { 1666013af9b6SIntel if ((tx_queue_stats_mappings[i].port_id == port_id) && 1667013af9b6SIntel (tx_queue_stats_mappings[i].queue_id < nb_txq )) { 1668013af9b6SIntel diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id, 1669013af9b6SIntel tx_queue_stats_mappings[i].queue_id, 1670013af9b6SIntel tx_queue_stats_mappings[i].stats_counter_id); 1671013af9b6SIntel if (diag != 0) 1672013af9b6SIntel return diag; 1673013af9b6SIntel mapping_found = 1; 1674af75078fSIntel } 1675013af9b6SIntel } 1676013af9b6SIntel if (mapping_found) 1677013af9b6SIntel port->tx_queue_stats_mapping_enabled = 1; 1678013af9b6SIntel return 0; 1679013af9b6SIntel } 1680013af9b6SIntel 1681013af9b6SIntel static int 1682013af9b6SIntel set_rx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port) 1683013af9b6SIntel { 1684013af9b6SIntel uint16_t i; 1685013af9b6SIntel int diag; 1686013af9b6SIntel uint8_t mapping_found = 0; 1687013af9b6SIntel 1688013af9b6SIntel for (i = 0; i < nb_rx_queue_stats_mappings; i++) { 1689013af9b6SIntel if ((rx_queue_stats_mappings[i].port_id == port_id) && 1690013af9b6SIntel (rx_queue_stats_mappings[i].queue_id < nb_rxq )) { 1691013af9b6SIntel diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id, 1692013af9b6SIntel rx_queue_stats_mappings[i].queue_id, 1693013af9b6SIntel rx_queue_stats_mappings[i].stats_counter_id); 1694013af9b6SIntel if (diag != 0) 1695013af9b6SIntel return diag; 1696013af9b6SIntel mapping_found = 1; 1697013af9b6SIntel } 1698013af9b6SIntel } 1699013af9b6SIntel if (mapping_found) 1700013af9b6SIntel port->rx_queue_stats_mapping_enabled = 1; 1701013af9b6SIntel return 0; 1702013af9b6SIntel } 1703013af9b6SIntel 1704013af9b6SIntel static void 1705013af9b6SIntel map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port) 1706013af9b6SIntel { 1707013af9b6SIntel int diag = 0; 1708013af9b6SIntel 1709013af9b6SIntel diag = set_tx_queue_stats_mapping_registers(pi, port); 1710af75078fSIntel if (diag != 0) { 1711013af9b6SIntel if (diag == -ENOTSUP) { 1712013af9b6SIntel port->tx_queue_stats_mapping_enabled = 0; 1713013af9b6SIntel printf("TX queue stats mapping not supported port id=%d\n", pi); 1714013af9b6SIntel } 1715013af9b6SIntel else 1716013af9b6SIntel rte_exit(EXIT_FAILURE, 1717013af9b6SIntel "set_tx_queue_stats_mapping_registers " 1718013af9b6SIntel "failed for port id=%d diag=%d\n", 1719af75078fSIntel pi, diag); 1720af75078fSIntel } 1721013af9b6SIntel 1722013af9b6SIntel diag = set_rx_queue_stats_mapping_registers(pi, port); 1723af75078fSIntel if (diag != 0) { 1724013af9b6SIntel if (diag == -ENOTSUP) { 1725013af9b6SIntel port->rx_queue_stats_mapping_enabled = 0; 1726013af9b6SIntel printf("RX queue stats mapping not supported port id=%d\n", pi); 1727013af9b6SIntel } 1728013af9b6SIntel else 1729013af9b6SIntel rte_exit(EXIT_FAILURE, 1730013af9b6SIntel "set_rx_queue_stats_mapping_registers " 1731013af9b6SIntel "failed for port id=%d diag=%d\n", 1732af75078fSIntel pi, diag); 1733af75078fSIntel } 1734af75078fSIntel } 1735af75078fSIntel 1736f2c5125aSPablo de Lara static void 1737f2c5125aSPablo de Lara rxtx_port_config(struct rte_port *port) 1738f2c5125aSPablo de Lara { 1739f2c5125aSPablo de Lara port->rx_conf = port->dev_info.default_rxconf; 1740f2c5125aSPablo de Lara port->tx_conf = port->dev_info.default_txconf; 1741f2c5125aSPablo de Lara 1742f2c5125aSPablo de Lara /* Check if any RX/TX parameters have been passed */ 1743f2c5125aSPablo de Lara if (rx_pthresh != RTE_PMD_PARAM_UNSET) 1744f2c5125aSPablo de Lara port->rx_conf.rx_thresh.pthresh = rx_pthresh; 1745f2c5125aSPablo de Lara 1746f2c5125aSPablo de Lara if (rx_hthresh != RTE_PMD_PARAM_UNSET) 1747f2c5125aSPablo de Lara port->rx_conf.rx_thresh.hthresh = rx_hthresh; 1748f2c5125aSPablo de Lara 1749f2c5125aSPablo de Lara if (rx_wthresh != RTE_PMD_PARAM_UNSET) 1750f2c5125aSPablo de Lara port->rx_conf.rx_thresh.wthresh = rx_wthresh; 1751f2c5125aSPablo de Lara 1752f2c5125aSPablo de Lara if (rx_free_thresh != RTE_PMD_PARAM_UNSET) 1753f2c5125aSPablo de Lara port->rx_conf.rx_free_thresh = rx_free_thresh; 1754f2c5125aSPablo de Lara 1755f2c5125aSPablo de Lara if (rx_drop_en != RTE_PMD_PARAM_UNSET) 1756f2c5125aSPablo de Lara port->rx_conf.rx_drop_en = rx_drop_en; 1757f2c5125aSPablo de Lara 1758f2c5125aSPablo de Lara if (tx_pthresh != RTE_PMD_PARAM_UNSET) 1759f2c5125aSPablo de Lara port->tx_conf.tx_thresh.pthresh = tx_pthresh; 1760f2c5125aSPablo de Lara 1761f2c5125aSPablo de Lara if (tx_hthresh != RTE_PMD_PARAM_UNSET) 1762f2c5125aSPablo de Lara port->tx_conf.tx_thresh.hthresh = tx_hthresh; 1763f2c5125aSPablo de Lara 1764f2c5125aSPablo de Lara if (tx_wthresh != RTE_PMD_PARAM_UNSET) 1765f2c5125aSPablo de Lara port->tx_conf.tx_thresh.wthresh = tx_wthresh; 1766f2c5125aSPablo de Lara 1767f2c5125aSPablo de Lara if (tx_rs_thresh != RTE_PMD_PARAM_UNSET) 1768f2c5125aSPablo de Lara port->tx_conf.tx_rs_thresh = tx_rs_thresh; 1769f2c5125aSPablo de Lara 1770f2c5125aSPablo de Lara if (tx_free_thresh != RTE_PMD_PARAM_UNSET) 1771f2c5125aSPablo de Lara port->tx_conf.tx_free_thresh = tx_free_thresh; 1772f2c5125aSPablo de Lara 1773f2c5125aSPablo de Lara if (txq_flags != RTE_PMD_PARAM_UNSET) 1774f2c5125aSPablo de Lara port->tx_conf.txq_flags = txq_flags; 1775f2c5125aSPablo de Lara } 1776f2c5125aSPablo de Lara 1777013af9b6SIntel void 1778013af9b6SIntel init_port_config(void) 1779013af9b6SIntel { 1780013af9b6SIntel portid_t pid; 1781013af9b6SIntel struct rte_port *port; 1782013af9b6SIntel 1783edab33b1STetsuya Mukawa FOREACH_PORT(pid, ports) { 1784013af9b6SIntel port = &ports[pid]; 1785013af9b6SIntel port->dev_conf.rxmode = rx_mode; 1786013af9b6SIntel port->dev_conf.fdir_conf = fdir_conf; 17873ce690d3SBruce Richardson if (nb_rxq > 1) { 1788013af9b6SIntel port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL; 1789013af9b6SIntel port->dev_conf.rx_adv_conf.rss_conf.rss_hf = rss_hf; 1790af75078fSIntel } else { 1791013af9b6SIntel port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL; 1792013af9b6SIntel port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0; 1793af75078fSIntel } 17943ce690d3SBruce Richardson 17953ce690d3SBruce Richardson if (port->dcb_flag == 0 && port->dev_info.max_vfs == 0) { 17963ce690d3SBruce Richardson if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0) 17973ce690d3SBruce Richardson port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_RSS; 17983ce690d3SBruce Richardson else 17993ce690d3SBruce Richardson port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE; 18003ce690d3SBruce Richardson } 18013ce690d3SBruce Richardson 1802a30979f6SOuyang Changchun if (port->dev_info.max_vfs != 0) { 1803a30979f6SOuyang Changchun if (port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0) 1804a30979f6SOuyang Changchun port->dev_conf.rxmode.mq_mode = 1805a30979f6SOuyang Changchun ETH_MQ_RX_VMDQ_RSS; 1806a30979f6SOuyang Changchun else 1807a30979f6SOuyang Changchun port->dev_conf.rxmode.mq_mode = 1808a30979f6SOuyang Changchun ETH_MQ_RX_NONE; 1809a30979f6SOuyang Changchun 1810a30979f6SOuyang Changchun port->dev_conf.txmode.mq_mode = ETH_MQ_TX_NONE; 1811a30979f6SOuyang Changchun } 1812a30979f6SOuyang Changchun 1813f2c5125aSPablo de Lara rxtx_port_config(port); 1814013af9b6SIntel 1815013af9b6SIntel rte_eth_macaddr_get(pid, &port->eth_addr); 1816013af9b6SIntel 1817013af9b6SIntel map_port_queue_stats_mapping_registers(pid, port); 18187b7e5ba7SIntel #ifdef RTE_NIC_BYPASS 18197b7e5ba7SIntel rte_eth_dev_bypass_init(pid); 18207b7e5ba7SIntel #endif 1821013af9b6SIntel } 1822013af9b6SIntel } 1823013af9b6SIntel 182441b05095SBernard Iremonger void set_port_slave_flag(portid_t slave_pid) 182541b05095SBernard Iremonger { 182641b05095SBernard Iremonger struct rte_port *port; 182741b05095SBernard Iremonger 182841b05095SBernard Iremonger port = &ports[slave_pid]; 182941b05095SBernard Iremonger port->slave_flag = 1; 183041b05095SBernard Iremonger } 183141b05095SBernard Iremonger 183241b05095SBernard Iremonger void clear_port_slave_flag(portid_t slave_pid) 183341b05095SBernard Iremonger { 183441b05095SBernard Iremonger struct rte_port *port; 183541b05095SBernard Iremonger 183641b05095SBernard Iremonger port = &ports[slave_pid]; 183741b05095SBernard Iremonger port->slave_flag = 0; 183841b05095SBernard Iremonger } 183941b05095SBernard Iremonger 18400e545d30SBernard Iremonger uint8_t port_is_bonding_slave(portid_t slave_pid) 18410e545d30SBernard Iremonger { 18420e545d30SBernard Iremonger struct rte_port *port; 18430e545d30SBernard Iremonger 18440e545d30SBernard Iremonger port = &ports[slave_pid]; 18450e545d30SBernard Iremonger return port->slave_flag; 18460e545d30SBernard Iremonger } 18470e545d30SBernard Iremonger 1848013af9b6SIntel const uint16_t vlan_tags[] = { 1849013af9b6SIntel 0, 1, 2, 3, 4, 5, 6, 7, 1850013af9b6SIntel 8, 9, 10, 11, 12, 13, 14, 15, 1851013af9b6SIntel 16, 17, 18, 19, 20, 21, 22, 23, 1852013af9b6SIntel 24, 25, 26, 27, 28, 29, 30, 31 1853013af9b6SIntel }; 1854013af9b6SIntel 1855013af9b6SIntel static int 18561a572499SJingjing Wu get_eth_dcb_conf(struct rte_eth_conf *eth_conf, 18571a572499SJingjing Wu enum dcb_mode_enable dcb_mode, 18581a572499SJingjing Wu enum rte_eth_nb_tcs num_tcs, 18591a572499SJingjing Wu uint8_t pfc_en) 1860013af9b6SIntel { 1861013af9b6SIntel uint8_t i; 1862af75078fSIntel 1863af75078fSIntel /* 1864013af9b6SIntel * Builds up the correct configuration for dcb+vt based on the vlan tags array 1865013af9b6SIntel * given above, and the number of traffic classes available for use. 1866af75078fSIntel */ 18671a572499SJingjing Wu if (dcb_mode == DCB_VT_ENABLED) { 18681a572499SJingjing Wu struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf = 18691a572499SJingjing Wu ð_conf->rx_adv_conf.vmdq_dcb_conf; 18701a572499SJingjing Wu struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf = 18711a572499SJingjing Wu ð_conf->tx_adv_conf.vmdq_dcb_tx_conf; 1872013af9b6SIntel 1873013af9b6SIntel /* VMDQ+DCB RX and TX configrations */ 18741a572499SJingjing Wu vmdq_rx_conf->enable_default_pool = 0; 18751a572499SJingjing Wu vmdq_rx_conf->default_pool = 0; 18761a572499SJingjing Wu vmdq_rx_conf->nb_queue_pools = 18771a572499SJingjing Wu (num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS); 18781a572499SJingjing Wu vmdq_tx_conf->nb_queue_pools = 18791a572499SJingjing Wu (num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS); 1880013af9b6SIntel 18811a572499SJingjing Wu vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools; 18821a572499SJingjing Wu for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) { 18831a572499SJingjing Wu vmdq_rx_conf->pool_map[i].vlan_id = vlan_tags[i]; 18841a572499SJingjing Wu vmdq_rx_conf->pool_map[i].pools = 18851a572499SJingjing Wu 1 << (i % vmdq_rx_conf->nb_queue_pools); 1886af75078fSIntel } 1887013af9b6SIntel for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) { 18881a572499SJingjing Wu vmdq_rx_conf->dcb_tc[i] = i; 18891a572499SJingjing Wu vmdq_tx_conf->dcb_tc[i] = i; 1890013af9b6SIntel } 1891013af9b6SIntel 1892013af9b6SIntel /* set DCB mode of RX and TX of multiple queues */ 189332e7aa0bSIntel eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB; 189432e7aa0bSIntel eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB; 18951a572499SJingjing Wu } else { 18961a572499SJingjing Wu struct rte_eth_dcb_rx_conf *rx_conf = 18971a572499SJingjing Wu ð_conf->rx_adv_conf.dcb_rx_conf; 18981a572499SJingjing Wu struct rte_eth_dcb_tx_conf *tx_conf = 18991a572499SJingjing Wu ð_conf->tx_adv_conf.dcb_tx_conf; 1900013af9b6SIntel 19011a572499SJingjing Wu rx_conf->nb_tcs = num_tcs; 19021a572499SJingjing Wu tx_conf->nb_tcs = num_tcs; 19031a572499SJingjing Wu 19041a572499SJingjing Wu for (i = 0; i < num_tcs; i++) { 19051a572499SJingjing Wu rx_conf->dcb_tc[i] = i; 19061a572499SJingjing Wu tx_conf->dcb_tc[i] = i; 1907013af9b6SIntel } 19081a572499SJingjing Wu eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB_RSS; 19091a572499SJingjing Wu eth_conf->rx_adv_conf.rss_conf.rss_hf = rss_hf; 191032e7aa0bSIntel eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB; 19111a572499SJingjing Wu } 19121a572499SJingjing Wu 19131a572499SJingjing Wu if (pfc_en) 19141a572499SJingjing Wu eth_conf->dcb_capability_en = 19151a572499SJingjing Wu ETH_DCB_PG_SUPPORT | ETH_DCB_PFC_SUPPORT; 1916013af9b6SIntel else 1917013af9b6SIntel eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT; 1918013af9b6SIntel 1919013af9b6SIntel return 0; 1920013af9b6SIntel } 1921013af9b6SIntel 1922013af9b6SIntel int 19231a572499SJingjing Wu init_port_dcb_config(portid_t pid, 19241a572499SJingjing Wu enum dcb_mode_enable dcb_mode, 19251a572499SJingjing Wu enum rte_eth_nb_tcs num_tcs, 19261a572499SJingjing Wu uint8_t pfc_en) 1927013af9b6SIntel { 1928013af9b6SIntel struct rte_eth_conf port_conf; 19291a572499SJingjing Wu struct rte_eth_dev_info dev_info; 1930013af9b6SIntel struct rte_port *rte_port; 1931013af9b6SIntel int retval; 1932013af9b6SIntel uint16_t i; 1933013af9b6SIntel 19341a572499SJingjing Wu rte_eth_dev_info_get(pid, &dev_info); 19351a572499SJingjing Wu 19361a572499SJingjing Wu /* If dev_info.vmdq_pool_base is greater than 0, 19371a572499SJingjing Wu * the queue id of vmdq pools is started after pf queues. 19381a572499SJingjing Wu */ 19391a572499SJingjing Wu if (dcb_mode == DCB_VT_ENABLED && dev_info.vmdq_pool_base > 0) { 19401a572499SJingjing Wu printf("VMDQ_DCB multi-queue mode is nonsensical" 19411a572499SJingjing Wu " for port %d.", pid); 19421a572499SJingjing Wu return -1; 19431a572499SJingjing Wu } 19441a572499SJingjing Wu 19451a572499SJingjing Wu /* Assume the ports in testpmd have the same dcb capability 19461a572499SJingjing Wu * and has the same number of rxq and txq in dcb mode 19471a572499SJingjing Wu */ 19481a572499SJingjing Wu if (dcb_mode == DCB_VT_ENABLED) { 19491a572499SJingjing Wu nb_rxq = dev_info.max_rx_queues; 19501a572499SJingjing Wu nb_txq = dev_info.max_tx_queues; 19511a572499SJingjing Wu } else { 19521a572499SJingjing Wu /*if vt is disabled, use all pf queues */ 19531a572499SJingjing Wu if (dev_info.vmdq_pool_base == 0) { 19541a572499SJingjing Wu nb_rxq = dev_info.max_rx_queues; 19551a572499SJingjing Wu nb_txq = dev_info.max_tx_queues; 19561a572499SJingjing Wu } else { 19571a572499SJingjing Wu nb_rxq = (queueid_t)num_tcs; 19581a572499SJingjing Wu nb_txq = (queueid_t)num_tcs; 19591a572499SJingjing Wu 19601a572499SJingjing Wu } 19611a572499SJingjing Wu } 1962013af9b6SIntel rx_free_thresh = 64; 1963013af9b6SIntel 1964013af9b6SIntel memset(&port_conf, 0, sizeof(struct rte_eth_conf)); 1965013af9b6SIntel /* Enter DCB configuration status */ 1966013af9b6SIntel dcb_config = 1; 1967013af9b6SIntel 1968013af9b6SIntel /*set configuration of DCB in vt mode and DCB in non-vt mode*/ 19691a572499SJingjing Wu retval = get_eth_dcb_conf(&port_conf, dcb_mode, num_tcs, pfc_en); 1970013af9b6SIntel if (retval < 0) 1971013af9b6SIntel return retval; 1972013af9b6SIntel 1973013af9b6SIntel rte_port = &ports[pid]; 1974013af9b6SIntel memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf)); 1975013af9b6SIntel 1976f2c5125aSPablo de Lara rxtx_port_config(rte_port); 1977013af9b6SIntel /* VLAN filter */ 1978013af9b6SIntel rte_port->dev_conf.rxmode.hw_vlan_filter = 1; 19791a572499SJingjing Wu for (i = 0; i < RTE_DIM(vlan_tags); i++) 1980013af9b6SIntel rx_vft_set(pid, vlan_tags[i], 1); 1981013af9b6SIntel 1982013af9b6SIntel rte_eth_macaddr_get(pid, &rte_port->eth_addr); 1983013af9b6SIntel map_port_queue_stats_mapping_registers(pid, rte_port); 1984013af9b6SIntel 19857741e4cfSIntel rte_port->dcb_flag = 1; 19867741e4cfSIntel 1987013af9b6SIntel return 0; 1988af75078fSIntel } 1989af75078fSIntel 1990ffc468ffSTetsuya Mukawa static void 1991ffc468ffSTetsuya Mukawa init_port(void) 1992ffc468ffSTetsuya Mukawa { 1993ffc468ffSTetsuya Mukawa portid_t pid; 1994ffc468ffSTetsuya Mukawa 1995ffc468ffSTetsuya Mukawa /* Configuration of Ethernet ports. */ 1996ffc468ffSTetsuya Mukawa ports = rte_zmalloc("testpmd: ports", 1997ffc468ffSTetsuya Mukawa sizeof(struct rte_port) * RTE_MAX_ETHPORTS, 1998ffc468ffSTetsuya Mukawa RTE_CACHE_LINE_SIZE); 1999ffc468ffSTetsuya Mukawa if (ports == NULL) { 2000ffc468ffSTetsuya Mukawa rte_exit(EXIT_FAILURE, 2001ffc468ffSTetsuya Mukawa "rte_zmalloc(%d struct rte_port) failed\n", 2002ffc468ffSTetsuya Mukawa RTE_MAX_ETHPORTS); 2003ffc468ffSTetsuya Mukawa } 2004ffc468ffSTetsuya Mukawa 2005ffc468ffSTetsuya Mukawa /* enabled allocated ports */ 2006ffc468ffSTetsuya Mukawa for (pid = 0; pid < nb_ports; pid++) 2007ffc468ffSTetsuya Mukawa ports[pid].enabled = 1; 2008ffc468ffSTetsuya Mukawa } 2009ffc468ffSTetsuya Mukawa 2010d3a274ceSZhihong Wang static void 2011d3a274ceSZhihong Wang force_quit(void) 2012d3a274ceSZhihong Wang { 2013d3a274ceSZhihong Wang pmd_test_exit(); 2014d3a274ceSZhihong Wang prompt_exit(); 2015d3a274ceSZhihong Wang } 2016d3a274ceSZhihong Wang 2017d3a274ceSZhihong Wang static void 2018d3a274ceSZhihong Wang signal_handler(int signum) 2019d3a274ceSZhihong Wang { 2020d3a274ceSZhihong Wang if (signum == SIGINT || signum == SIGTERM) { 2021d3a274ceSZhihong Wang printf("\nSignal %d received, preparing to exit...\n", 2022d3a274ceSZhihong Wang signum); 2023d3a274ceSZhihong Wang force_quit(); 2024d3a274ceSZhihong Wang /* exit with the expected status */ 2025d3a274ceSZhihong Wang signal(signum, SIG_DFL); 2026d3a274ceSZhihong Wang kill(getpid(), signum); 2027d3a274ceSZhihong Wang } 2028d3a274ceSZhihong Wang } 2029d3a274ceSZhihong Wang 2030af75078fSIntel int 2031af75078fSIntel main(int argc, char** argv) 2032af75078fSIntel { 2033af75078fSIntel int diag; 2034013af9b6SIntel uint8_t port_id; 2035af75078fSIntel 2036d3a274ceSZhihong Wang signal(SIGINT, signal_handler); 2037d3a274ceSZhihong Wang signal(SIGTERM, signal_handler); 2038d3a274ceSZhihong Wang 2039af75078fSIntel diag = rte_eal_init(argc, argv); 2040af75078fSIntel if (diag < 0) 2041af75078fSIntel rte_panic("Cannot init EAL\n"); 2042af75078fSIntel 2043af75078fSIntel nb_ports = (portid_t) rte_eth_dev_count(); 2044af75078fSIntel if (nb_ports == 0) 2045edab33b1STetsuya Mukawa RTE_LOG(WARNING, EAL, "No probed ethernet devices\n"); 2046af75078fSIntel 2047ffc468ffSTetsuya Mukawa /* allocate port structures, and init them */ 2048ffc468ffSTetsuya Mukawa init_port(); 2049ffc468ffSTetsuya Mukawa 2050af75078fSIntel set_def_fwd_config(); 2051af75078fSIntel if (nb_lcores == 0) 2052af75078fSIntel rte_panic("Empty set of forwarding logical cores - check the " 2053af75078fSIntel "core mask supplied in the command parameters\n"); 2054af75078fSIntel 2055af75078fSIntel argc -= diag; 2056af75078fSIntel argv += diag; 2057af75078fSIntel if (argc > 1) 2058af75078fSIntel launch_args_parse(argc, argv); 2059af75078fSIntel 20605a8fb55cSReshma Pattan if (!nb_rxq && !nb_txq) 20615a8fb55cSReshma Pattan printf("Warning: Either rx or tx queues should be non-zero\n"); 20625a8fb55cSReshma Pattan 20635a8fb55cSReshma Pattan if (nb_rxq > 1 && nb_rxq > nb_txq) 2064af75078fSIntel printf("Warning: nb_rxq=%d enables RSS configuration, " 2065af75078fSIntel "but nb_txq=%d will prevent to fully test it.\n", 2066af75078fSIntel nb_rxq, nb_txq); 2067af75078fSIntel 2068af75078fSIntel init_config(); 2069148f963fSBruce Richardson if (start_port(RTE_PORT_ALL) != 0) 2070148f963fSBruce Richardson rte_exit(EXIT_FAILURE, "Start ports failed\n"); 2071af75078fSIntel 2072ce8d5614SIntel /* set all ports to promiscuous mode by default */ 2073edab33b1STetsuya Mukawa FOREACH_PORT(port_id, ports) 2074ce8d5614SIntel rte_eth_promiscuous_enable(port_id); 2075af75078fSIntel 20760d56cb81SThomas Monjalon #ifdef RTE_LIBRTE_CMDLINE 2077ca7feb22SCyril Chemparathy if (interactive == 1) { 2078ca7feb22SCyril Chemparathy if (auto_start) { 2079ca7feb22SCyril Chemparathy printf("Start automatic packet forwarding\n"); 2080ca7feb22SCyril Chemparathy start_packet_forwarding(0); 2081ca7feb22SCyril Chemparathy } 2082af75078fSIntel prompt(); 2083ca7feb22SCyril Chemparathy } else 20840d56cb81SThomas Monjalon #endif 20850d56cb81SThomas Monjalon { 2086af75078fSIntel char c; 2087af75078fSIntel int rc; 2088af75078fSIntel 2089af75078fSIntel printf("No commandline core given, start packet forwarding\n"); 2090af75078fSIntel start_packet_forwarding(0); 2091af75078fSIntel printf("Press enter to exit\n"); 2092af75078fSIntel rc = read(0, &c, 1); 2093d3a274ceSZhihong Wang pmd_test_exit(); 2094af75078fSIntel if (rc < 0) 2095af75078fSIntel return 1; 2096af75078fSIntel } 2097af75078fSIntel 2098af75078fSIntel return 0; 2099af75078fSIntel } 2100