1af75078fSIntel /*- 2af75078fSIntel * BSD LICENSE 3af75078fSIntel * 47e4441c8SRemy Horton * Copyright(c) 2010-2017 Intel Corporation. All rights reserved. 5af75078fSIntel * All rights reserved. 6af75078fSIntel * 7af75078fSIntel * Redistribution and use in source and binary forms, with or without 8af75078fSIntel * modification, are permitted provided that the following conditions 9af75078fSIntel * are met: 10af75078fSIntel * 11af75078fSIntel * * Redistributions of source code must retain the above copyright 12af75078fSIntel * notice, this list of conditions and the following disclaimer. 13af75078fSIntel * * Redistributions in binary form must reproduce the above copyright 14af75078fSIntel * notice, this list of conditions and the following disclaimer in 15af75078fSIntel * the documentation and/or other materials provided with the 16af75078fSIntel * distribution. 17af75078fSIntel * * Neither the name of Intel Corporation nor the names of its 18af75078fSIntel * contributors may be used to endorse or promote products derived 19af75078fSIntel * from this software without specific prior written permission. 20af75078fSIntel * 21af75078fSIntel * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22af75078fSIntel * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23af75078fSIntel * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24af75078fSIntel * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25af75078fSIntel * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26af75078fSIntel * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27af75078fSIntel * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28af75078fSIntel * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29af75078fSIntel * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30af75078fSIntel * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31af75078fSIntel * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32af75078fSIntel */ 33af75078fSIntel 34af75078fSIntel #include <stdarg.h> 35af75078fSIntel #include <stdio.h> 36af75078fSIntel #include <stdlib.h> 37af75078fSIntel #include <signal.h> 38af75078fSIntel #include <string.h> 39af75078fSIntel #include <time.h> 40af75078fSIntel #include <fcntl.h> 41af75078fSIntel #include <sys/types.h> 42af75078fSIntel #include <errno.h> 43af75078fSIntel 44af75078fSIntel #include <sys/queue.h> 45af75078fSIntel #include <sys/stat.h> 46af75078fSIntel 47af75078fSIntel #include <stdint.h> 48af75078fSIntel #include <unistd.h> 49af75078fSIntel #include <inttypes.h> 50af75078fSIntel 51af75078fSIntel #include <rte_common.h> 52d1eb542eSOlivier Matz #include <rte_errno.h> 53af75078fSIntel #include <rte_byteorder.h> 54af75078fSIntel #include <rte_log.h> 55af75078fSIntel #include <rte_debug.h> 56af75078fSIntel #include <rte_cycles.h> 57af75078fSIntel #include <rte_memory.h> 58af75078fSIntel #include <rte_memcpy.h> 59af75078fSIntel #include <rte_memzone.h> 60af75078fSIntel #include <rte_launch.h> 61af75078fSIntel #include <rte_eal.h> 62af75078fSIntel #include <rte_per_lcore.h> 63af75078fSIntel #include <rte_lcore.h> 64af75078fSIntel #include <rte_atomic.h> 65af75078fSIntel #include <rte_branch_prediction.h> 66af75078fSIntel #include <rte_mempool.h> 67af75078fSIntel #include <rte_malloc.h> 68af75078fSIntel #include <rte_mbuf.h> 69af75078fSIntel #include <rte_interrupts.h> 70af75078fSIntel #include <rte_pci.h> 71af75078fSIntel #include <rte_ether.h> 72af75078fSIntel #include <rte_ethdev.h> 73edab33b1STetsuya Mukawa #include <rte_dev.h> 74af75078fSIntel #include <rte_string_fns.h> 75148f963fSBruce Richardson #ifdef RTE_LIBRTE_PMD_XENVIRT 76148f963fSBruce Richardson #include <rte_eth_xenvirt.h> 77148f963fSBruce Richardson #endif 78102b7329SReshma Pattan #ifdef RTE_LIBRTE_PDUMP 79102b7329SReshma Pattan #include <rte_pdump.h> 80102b7329SReshma Pattan #endif 81938a184aSAdrien Mazarguil #include <rte_flow.h> 827e4441c8SRemy Horton #include <rte_metrics.h> 837e4441c8SRemy Horton #ifdef RTE_LIBRTE_BITRATE 847e4441c8SRemy Horton #include <rte_bitrate.h> 857e4441c8SRemy Horton #endif 8662d3216dSReshma Pattan #include <rte_metrics.h> 8762d3216dSReshma Pattan #ifdef RTE_LIBRTE_LATENCY_STATS 8862d3216dSReshma Pattan #include <rte_latencystats.h> 8962d3216dSReshma Pattan #endif 90af75078fSIntel 91af75078fSIntel #include "testpmd.h" 92af75078fSIntel 93af75078fSIntel uint16_t verbose_level = 0; /**< Silent by default. */ 94af75078fSIntel 95af75078fSIntel /* use master core for command line ? */ 96af75078fSIntel uint8_t interactive = 0; 97ca7feb22SCyril Chemparathy uint8_t auto_start = 0; 98af75078fSIntel 99af75078fSIntel /* 100af75078fSIntel * NUMA support configuration. 101af75078fSIntel * When set, the NUMA support attempts to dispatch the allocation of the 102af75078fSIntel * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the 103af75078fSIntel * probed ports among the CPU sockets 0 and 1. 104af75078fSIntel * Otherwise, all memory is allocated from CPU socket 0. 105af75078fSIntel */ 106af75078fSIntel uint8_t numa_support = 0; /**< No numa support by default */ 107af75078fSIntel 108af75078fSIntel /* 109b6ea6408SIntel * In UMA mode,all memory is allocated from socket 0 if --socket-num is 110b6ea6408SIntel * not configured. 111b6ea6408SIntel */ 112b6ea6408SIntel uint8_t socket_num = UMA_NO_CONFIG; 113b6ea6408SIntel 114b6ea6408SIntel /* 115148f963fSBruce Richardson * Use ANONYMOUS mapped memory (might be not physically continuous) for mbufs. 116148f963fSBruce Richardson */ 117148f963fSBruce Richardson uint8_t mp_anon = 0; 118148f963fSBruce Richardson 119148f963fSBruce Richardson /* 120af75078fSIntel * Record the Ethernet address of peer target ports to which packets are 121af75078fSIntel * forwarded. 122547d946cSNirmoy Das * Must be instantiated with the ethernet addresses of peer traffic generator 123af75078fSIntel * ports. 124af75078fSIntel */ 125af75078fSIntel struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS]; 126af75078fSIntel portid_t nb_peer_eth_addrs = 0; 127af75078fSIntel 128af75078fSIntel /* 129af75078fSIntel * Probed Target Environment. 130af75078fSIntel */ 131af75078fSIntel struct rte_port *ports; /**< For all probed ethernet ports. */ 132af75078fSIntel portid_t nb_ports; /**< Number of probed ethernet ports. */ 133af75078fSIntel struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */ 134af75078fSIntel lcoreid_t nb_lcores; /**< Number of probed logical cores. */ 135af75078fSIntel 136af75078fSIntel /* 137af75078fSIntel * Test Forwarding Configuration. 138af75078fSIntel * nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores 139af75078fSIntel * nb_fwd_ports <= nb_cfg_ports <= nb_ports 140af75078fSIntel */ 141af75078fSIntel lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */ 142af75078fSIntel lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */ 143af75078fSIntel portid_t nb_cfg_ports; /**< Number of configured ports. */ 144af75078fSIntel portid_t nb_fwd_ports; /**< Number of forwarding ports. */ 145af75078fSIntel 146af75078fSIntel unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */ 147af75078fSIntel portid_t fwd_ports_ids[RTE_MAX_ETHPORTS]; /**< Port ids configuration. */ 148af75078fSIntel 149af75078fSIntel struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */ 150af75078fSIntel streamid_t nb_fwd_streams; /**< Is equal to (nb_ports * nb_rxq). */ 151af75078fSIntel 152af75078fSIntel /* 153af75078fSIntel * Forwarding engines. 154af75078fSIntel */ 155af75078fSIntel struct fwd_engine * fwd_engines[] = { 156af75078fSIntel &io_fwd_engine, 157af75078fSIntel &mac_fwd_engine, 158d47388f1SCyril Chemparathy &mac_swap_engine, 159e9e23a61SCyril Chemparathy &flow_gen_engine, 160af75078fSIntel &rx_only_engine, 161af75078fSIntel &tx_only_engine, 162af75078fSIntel &csum_fwd_engine, 163168dfa61SIvan Boule &icmp_echo_engine, 164af75078fSIntel #ifdef RTE_LIBRTE_IEEE1588 165af75078fSIntel &ieee1588_fwd_engine, 166af75078fSIntel #endif 167af75078fSIntel NULL, 168af75078fSIntel }; 169af75078fSIntel 170af75078fSIntel struct fwd_config cur_fwd_config; 171af75078fSIntel struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */ 172bf56fce1SZhihong Wang uint32_t retry_enabled; 173bf56fce1SZhihong Wang uint32_t burst_tx_delay_time = BURST_TX_WAIT_US; 174bf56fce1SZhihong Wang uint32_t burst_tx_retry_num = BURST_TX_RETRIES; 175af75078fSIntel 176af75078fSIntel uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */ 177c8798818SIntel uint32_t param_total_num_mbufs = 0; /**< number of mbufs in all pools - if 178c8798818SIntel * specified on command-line. */ 179af75078fSIntel 180af75078fSIntel /* 181af75078fSIntel * Configuration of packet segments used by the "txonly" processing engine. 182af75078fSIntel */ 183af75078fSIntel uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */ 184af75078fSIntel uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = { 185af75078fSIntel TXONLY_DEF_PACKET_LEN, 186af75078fSIntel }; 187af75078fSIntel uint8_t tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */ 188af75078fSIntel 18979bec05bSKonstantin Ananyev enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF; 19079bec05bSKonstantin Ananyev /**< Split policy for packets to TX. */ 19179bec05bSKonstantin Ananyev 192af75078fSIntel uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */ 193e9378bbcSCunming Liang uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */ 194af75078fSIntel 195900550deSIntel /* current configuration is in DCB or not,0 means it is not in DCB mode */ 196900550deSIntel uint8_t dcb_config = 0; 197900550deSIntel 198900550deSIntel /* Whether the dcb is in testing status */ 199900550deSIntel uint8_t dcb_test = 0; 200900550deSIntel 201af75078fSIntel /* 202af75078fSIntel * Configurable number of RX/TX queues. 203af75078fSIntel */ 204af75078fSIntel queueid_t nb_rxq = 1; /**< Number of RX queues per port. */ 205af75078fSIntel queueid_t nb_txq = 1; /**< Number of TX queues per port. */ 206af75078fSIntel 207af75078fSIntel /* 208af75078fSIntel * Configurable number of RX/TX ring descriptors. 209af75078fSIntel */ 210af75078fSIntel #define RTE_TEST_RX_DESC_DEFAULT 128 211af75078fSIntel #define RTE_TEST_TX_DESC_DEFAULT 512 212af75078fSIntel uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */ 213af75078fSIntel uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */ 214af75078fSIntel 215f2c5125aSPablo de Lara #define RTE_PMD_PARAM_UNSET -1 216af75078fSIntel /* 217af75078fSIntel * Configurable values of RX and TX ring threshold registers. 218af75078fSIntel */ 219af75078fSIntel 220f2c5125aSPablo de Lara int8_t rx_pthresh = RTE_PMD_PARAM_UNSET; 221f2c5125aSPablo de Lara int8_t rx_hthresh = RTE_PMD_PARAM_UNSET; 222f2c5125aSPablo de Lara int8_t rx_wthresh = RTE_PMD_PARAM_UNSET; 223af75078fSIntel 224f2c5125aSPablo de Lara int8_t tx_pthresh = RTE_PMD_PARAM_UNSET; 225f2c5125aSPablo de Lara int8_t tx_hthresh = RTE_PMD_PARAM_UNSET; 226f2c5125aSPablo de Lara int8_t tx_wthresh = RTE_PMD_PARAM_UNSET; 227af75078fSIntel 228af75078fSIntel /* 229af75078fSIntel * Configurable value of RX free threshold. 230af75078fSIntel */ 231f2c5125aSPablo de Lara int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET; 232af75078fSIntel 233af75078fSIntel /* 234ce8d5614SIntel * Configurable value of RX drop enable. 235ce8d5614SIntel */ 236f2c5125aSPablo de Lara int8_t rx_drop_en = RTE_PMD_PARAM_UNSET; 237ce8d5614SIntel 238ce8d5614SIntel /* 239af75078fSIntel * Configurable value of TX free threshold. 240af75078fSIntel */ 241f2c5125aSPablo de Lara int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET; 242af75078fSIntel 243af75078fSIntel /* 244af75078fSIntel * Configurable value of TX RS bit threshold. 245af75078fSIntel */ 246f2c5125aSPablo de Lara int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET; 247af75078fSIntel 248af75078fSIntel /* 249ce8d5614SIntel * Configurable value of TX queue flags. 250ce8d5614SIntel */ 251f2c5125aSPablo de Lara int32_t txq_flags = RTE_PMD_PARAM_UNSET; 252ce8d5614SIntel 253ce8d5614SIntel /* 254af75078fSIntel * Receive Side Scaling (RSS) configuration. 255af75078fSIntel */ 2568a387fa8SHelin Zhang uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */ 257af75078fSIntel 258af75078fSIntel /* 259af75078fSIntel * Port topology configuration 260af75078fSIntel */ 261af75078fSIntel uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */ 262af75078fSIntel 2637741e4cfSIntel /* 2647741e4cfSIntel * Avoids to flush all the RX streams before starts forwarding. 2657741e4cfSIntel */ 2667741e4cfSIntel uint8_t no_flush_rx = 0; /* flush by default */ 2677741e4cfSIntel 268af75078fSIntel /* 269bc202406SDavid Marchand * Avoids to check link status when starting/stopping a port. 270bc202406SDavid Marchand */ 271bc202406SDavid Marchand uint8_t no_link_check = 0; /* check by default */ 272bc202406SDavid Marchand 273bc202406SDavid Marchand /* 2747b7e5ba7SIntel * NIC bypass mode configuration options. 2757b7e5ba7SIntel */ 2767b7e5ba7SIntel #ifdef RTE_NIC_BYPASS 2777b7e5ba7SIntel 2787b7e5ba7SIntel /* The NIC bypass watchdog timeout. */ 2797b7e5ba7SIntel uint32_t bypass_timeout = RTE_BYPASS_TMT_OFF; 2807b7e5ba7SIntel 2817b7e5ba7SIntel #endif 2827b7e5ba7SIntel 28362d3216dSReshma Pattan #ifdef RTE_LIBRTE_LATENCY_STATS 28462d3216dSReshma Pattan 28562d3216dSReshma Pattan /* 28662d3216dSReshma Pattan * Set when latency stats is enabled in the commandline 28762d3216dSReshma Pattan */ 28862d3216dSReshma Pattan uint8_t latencystats_enabled; 28962d3216dSReshma Pattan 29062d3216dSReshma Pattan /* 29162d3216dSReshma Pattan * Lcore ID to serive latency statistics. 29262d3216dSReshma Pattan */ 29362d3216dSReshma Pattan lcoreid_t latencystats_lcore_id = -1; 29462d3216dSReshma Pattan 29562d3216dSReshma Pattan #endif 29662d3216dSReshma Pattan 2977b7e5ba7SIntel /* 298af75078fSIntel * Ethernet device configuration. 299af75078fSIntel */ 300af75078fSIntel struct rte_eth_rxmode rx_mode = { 301af75078fSIntel .max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */ 302af75078fSIntel .split_hdr_size = 0, 303af75078fSIntel .header_split = 0, /**< Header Split disabled. */ 304af75078fSIntel .hw_ip_checksum = 0, /**< IP checksum offload disabled. */ 305af75078fSIntel .hw_vlan_filter = 1, /**< VLAN filtering enabled. */ 306a47aa8b9SIntel .hw_vlan_strip = 1, /**< VLAN strip enabled. */ 307a47aa8b9SIntel .hw_vlan_extend = 0, /**< Extended VLAN disabled. */ 308af75078fSIntel .jumbo_frame = 0, /**< Jumbo Frame Support disabled. */ 30979dd163fSJeff Guo .hw_strip_crc = 1, /**< CRC stripping by hardware enabled. */ 310af75078fSIntel }; 311af75078fSIntel 312af75078fSIntel struct rte_fdir_conf fdir_conf = { 313af75078fSIntel .mode = RTE_FDIR_MODE_NONE, 314af75078fSIntel .pballoc = RTE_FDIR_PBALLOC_64K, 315af75078fSIntel .status = RTE_FDIR_REPORT_STATUS, 316d9d5e6f2SJingjing Wu .mask = { 317d9d5e6f2SJingjing Wu .vlan_tci_mask = 0x0, 318d9d5e6f2SJingjing Wu .ipv4_mask = { 319d9d5e6f2SJingjing Wu .src_ip = 0xFFFFFFFF, 320d9d5e6f2SJingjing Wu .dst_ip = 0xFFFFFFFF, 321d9d5e6f2SJingjing Wu }, 322d9d5e6f2SJingjing Wu .ipv6_mask = { 323d9d5e6f2SJingjing Wu .src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF}, 324d9d5e6f2SJingjing Wu .dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF}, 325d9d5e6f2SJingjing Wu }, 326d9d5e6f2SJingjing Wu .src_port_mask = 0xFFFF, 327d9d5e6f2SJingjing Wu .dst_port_mask = 0xFFFF, 32847b3ac6bSWenzhuo Lu .mac_addr_byte_mask = 0xFF, 32947b3ac6bSWenzhuo Lu .tunnel_type_mask = 1, 33047b3ac6bSWenzhuo Lu .tunnel_id_mask = 0xFFFFFFFF, 331d9d5e6f2SJingjing Wu }, 332af75078fSIntel .drop_queue = 127, 333af75078fSIntel }; 334af75078fSIntel 3352950a769SDeclan Doherty volatile int test_done = 1; /* stop packet forwarding when set to 1. */ 336af75078fSIntel 337ed30d9b6SIntel struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS]; 338ed30d9b6SIntel struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS]; 339ed30d9b6SIntel 340ed30d9b6SIntel struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array; 341ed30d9b6SIntel struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array; 342ed30d9b6SIntel 343ed30d9b6SIntel uint16_t nb_tx_queue_stats_mappings = 0; 344ed30d9b6SIntel uint16_t nb_rx_queue_stats_mappings = 0; 345ed30d9b6SIntel 3467acf894dSStephen Hurd unsigned max_socket = 0; 3477acf894dSStephen Hurd 3487e4441c8SRemy Horton /* Bitrate statistics */ 3497e4441c8SRemy Horton struct rte_stats_bitrates *bitrate_data; 3507e4441c8SRemy Horton 351ed30d9b6SIntel /* Forward function declarations */ 352ed30d9b6SIntel static void map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port); 353edab33b1STetsuya Mukawa static void check_all_ports_link_status(uint32_t port_mask); 354*76ad4a2dSGaetan Rivet static void eth_event_callback(uint8_t port_id, 355*76ad4a2dSGaetan Rivet enum rte_eth_event_type type, 356*76ad4a2dSGaetan Rivet void *param); 357ce8d5614SIntel 358ce8d5614SIntel /* 359ce8d5614SIntel * Check if all the ports are started. 360ce8d5614SIntel * If yes, return positive value. If not, return zero. 361ce8d5614SIntel */ 362ce8d5614SIntel static int all_ports_started(void); 363ed30d9b6SIntel 364af75078fSIntel /* 365af75078fSIntel * Setup default configuration. 366af75078fSIntel */ 367af75078fSIntel static void 368af75078fSIntel set_default_fwd_lcores_config(void) 369af75078fSIntel { 370af75078fSIntel unsigned int i; 371af75078fSIntel unsigned int nb_lc; 3727acf894dSStephen Hurd unsigned int sock_num; 373af75078fSIntel 374af75078fSIntel nb_lc = 0; 375af75078fSIntel for (i = 0; i < RTE_MAX_LCORE; i++) { 3767acf894dSStephen Hurd sock_num = rte_lcore_to_socket_id(i) + 1; 3777acf894dSStephen Hurd if (sock_num > max_socket) { 3787acf894dSStephen Hurd if (sock_num > RTE_MAX_NUMA_NODES) 3797acf894dSStephen Hurd rte_exit(EXIT_FAILURE, "Total sockets greater than %u\n", RTE_MAX_NUMA_NODES); 3807acf894dSStephen Hurd max_socket = sock_num; 3817acf894dSStephen Hurd } 382f54fe5eeSStephen Hurd if (!rte_lcore_is_enabled(i)) 383f54fe5eeSStephen Hurd continue; 384f54fe5eeSStephen Hurd if (i == rte_get_master_lcore()) 385f54fe5eeSStephen Hurd continue; 386f54fe5eeSStephen Hurd fwd_lcores_cpuids[nb_lc++] = i; 387af75078fSIntel } 388af75078fSIntel nb_lcores = (lcoreid_t) nb_lc; 389af75078fSIntel nb_cfg_lcores = nb_lcores; 390af75078fSIntel nb_fwd_lcores = 1; 391af75078fSIntel } 392af75078fSIntel 393af75078fSIntel static void 394af75078fSIntel set_def_peer_eth_addrs(void) 395af75078fSIntel { 396af75078fSIntel portid_t i; 397af75078fSIntel 398af75078fSIntel for (i = 0; i < RTE_MAX_ETHPORTS; i++) { 399af75078fSIntel peer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR; 400af75078fSIntel peer_eth_addrs[i].addr_bytes[5] = i; 401af75078fSIntel } 402af75078fSIntel } 403af75078fSIntel 404af75078fSIntel static void 405af75078fSIntel set_default_fwd_ports_config(void) 406af75078fSIntel { 407af75078fSIntel portid_t pt_id; 408af75078fSIntel 409af75078fSIntel for (pt_id = 0; pt_id < nb_ports; pt_id++) 410af75078fSIntel fwd_ports_ids[pt_id] = pt_id; 411af75078fSIntel 412af75078fSIntel nb_cfg_ports = nb_ports; 413af75078fSIntel nb_fwd_ports = nb_ports; 414af75078fSIntel } 415af75078fSIntel 416af75078fSIntel void 417af75078fSIntel set_def_fwd_config(void) 418af75078fSIntel { 419af75078fSIntel set_default_fwd_lcores_config(); 420af75078fSIntel set_def_peer_eth_addrs(); 421af75078fSIntel set_default_fwd_ports_config(); 422af75078fSIntel } 423af75078fSIntel 424af75078fSIntel /* 425af75078fSIntel * Configuration initialisation done once at init time. 426af75078fSIntel */ 427af75078fSIntel static void 428af75078fSIntel mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf, 429af75078fSIntel unsigned int socket_id) 430af75078fSIntel { 431af75078fSIntel char pool_name[RTE_MEMPOOL_NAMESIZE]; 432bece7b6cSChristian Ehrhardt struct rte_mempool *rte_mp = NULL; 433af75078fSIntel uint32_t mb_size; 434af75078fSIntel 435dfb03bbeSOlivier Matz mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size; 436af75078fSIntel mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name)); 437148f963fSBruce Richardson 438d1eb542eSOlivier Matz RTE_LOG(INFO, USER1, 439d1eb542eSOlivier Matz "create a new mbuf pool <%s>: n=%u, size=%u, socket=%u\n", 440d1eb542eSOlivier Matz pool_name, nb_mbuf, mbuf_seg_size, socket_id); 441d1eb542eSOlivier Matz 442148f963fSBruce Richardson #ifdef RTE_LIBRTE_PMD_XENVIRT 443148f963fSBruce Richardson rte_mp = rte_mempool_gntalloc_create(pool_name, nb_mbuf, mb_size, 444af75078fSIntel (unsigned) mb_mempool_cache, 445af75078fSIntel sizeof(struct rte_pktmbuf_pool_private), 446dfb03bbeSOlivier Matz rte_pktmbuf_pool_init, NULL, 447dfb03bbeSOlivier Matz rte_pktmbuf_init, NULL, 448af75078fSIntel socket_id, 0); 449bece7b6cSChristian Ehrhardt #endif 450148f963fSBruce Richardson 451bece7b6cSChristian Ehrhardt /* if the former XEN allocation failed fall back to normal allocation */ 452bece7b6cSChristian Ehrhardt if (rte_mp == NULL) { 453b19a0c75SOlivier Matz if (mp_anon != 0) { 454b19a0c75SOlivier Matz rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf, 455bece7b6cSChristian Ehrhardt mb_size, (unsigned) mb_mempool_cache, 456148f963fSBruce Richardson sizeof(struct rte_pktmbuf_pool_private), 457148f963fSBruce Richardson socket_id, 0); 45824427bb9SOlivier Matz if (rte_mp == NULL) 45924427bb9SOlivier Matz goto err; 460b19a0c75SOlivier Matz 461b19a0c75SOlivier Matz if (rte_mempool_populate_anon(rte_mp) == 0) { 462b19a0c75SOlivier Matz rte_mempool_free(rte_mp); 463b19a0c75SOlivier Matz rte_mp = NULL; 46424427bb9SOlivier Matz goto err; 465b19a0c75SOlivier Matz } 466b19a0c75SOlivier Matz rte_pktmbuf_pool_init(rte_mp, NULL); 467b19a0c75SOlivier Matz rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL); 468b19a0c75SOlivier Matz } else { 469ea0c20eaSOlivier Matz /* wrapper to rte_mempool_create() */ 470ea0c20eaSOlivier Matz rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf, 471ea0c20eaSOlivier Matz mb_mempool_cache, 0, mbuf_seg_size, socket_id); 472bece7b6cSChristian Ehrhardt } 473b19a0c75SOlivier Matz } 474148f963fSBruce Richardson 47524427bb9SOlivier Matz err: 476af75078fSIntel if (rte_mp == NULL) { 477d1eb542eSOlivier Matz rte_exit(EXIT_FAILURE, 478d1eb542eSOlivier Matz "Creation of mbuf pool for socket %u failed: %s\n", 479d1eb542eSOlivier Matz socket_id, rte_strerror(rte_errno)); 480148f963fSBruce Richardson } else if (verbose_level > 0) { 481591a9d79SStephen Hemminger rte_mempool_dump(stdout, rte_mp); 482af75078fSIntel } 483af75078fSIntel } 484af75078fSIntel 48520a0286fSLiu Xiaofeng /* 48620a0286fSLiu Xiaofeng * Check given socket id is valid or not with NUMA mode, 48720a0286fSLiu Xiaofeng * if valid, return 0, else return -1 48820a0286fSLiu Xiaofeng */ 48920a0286fSLiu Xiaofeng static int 49020a0286fSLiu Xiaofeng check_socket_id(const unsigned int socket_id) 49120a0286fSLiu Xiaofeng { 49220a0286fSLiu Xiaofeng static int warning_once = 0; 49320a0286fSLiu Xiaofeng 4947acf894dSStephen Hurd if (socket_id >= max_socket) { 49520a0286fSLiu Xiaofeng if (!warning_once && numa_support) 49620a0286fSLiu Xiaofeng printf("Warning: NUMA should be configured manually by" 49720a0286fSLiu Xiaofeng " using --port-numa-config and" 49820a0286fSLiu Xiaofeng " --ring-numa-config parameters along with" 49920a0286fSLiu Xiaofeng " --numa.\n"); 50020a0286fSLiu Xiaofeng warning_once = 1; 50120a0286fSLiu Xiaofeng return -1; 50220a0286fSLiu Xiaofeng } 50320a0286fSLiu Xiaofeng return 0; 50420a0286fSLiu Xiaofeng } 50520a0286fSLiu Xiaofeng 506af75078fSIntel static void 507af75078fSIntel init_config(void) 508af75078fSIntel { 509ce8d5614SIntel portid_t pid; 510af75078fSIntel struct rte_port *port; 511af75078fSIntel struct rte_mempool *mbp; 512af75078fSIntel unsigned int nb_mbuf_per_pool; 513af75078fSIntel lcoreid_t lc_id; 5147acf894dSStephen Hurd uint8_t port_per_socket[RTE_MAX_NUMA_NODES]; 515af75078fSIntel 5167acf894dSStephen Hurd memset(port_per_socket,0,RTE_MAX_NUMA_NODES); 517af75078fSIntel /* Configuration of logical cores. */ 518af75078fSIntel fwd_lcores = rte_zmalloc("testpmd: fwd_lcores", 519af75078fSIntel sizeof(struct fwd_lcore *) * nb_lcores, 520fdf20fa7SSergio Gonzalez Monroy RTE_CACHE_LINE_SIZE); 521af75078fSIntel if (fwd_lcores == NULL) { 522ce8d5614SIntel rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) " 523ce8d5614SIntel "failed\n", nb_lcores); 524af75078fSIntel } 525af75078fSIntel for (lc_id = 0; lc_id < nb_lcores; lc_id++) { 526af75078fSIntel fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore", 527af75078fSIntel sizeof(struct fwd_lcore), 528fdf20fa7SSergio Gonzalez Monroy RTE_CACHE_LINE_SIZE); 529af75078fSIntel if (fwd_lcores[lc_id] == NULL) { 530ce8d5614SIntel rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) " 531ce8d5614SIntel "failed\n"); 532af75078fSIntel } 533af75078fSIntel fwd_lcores[lc_id]->cpuid_idx = lc_id; 534af75078fSIntel } 535af75078fSIntel 536af75078fSIntel /* 537af75078fSIntel * Create pools of mbuf. 538af75078fSIntel * If NUMA support is disabled, create a single pool of mbuf in 539b6ea6408SIntel * socket 0 memory by default. 540af75078fSIntel * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1. 541c8798818SIntel * 542c8798818SIntel * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and 543c8798818SIntel * nb_txd can be configured at run time. 544af75078fSIntel */ 545c8798818SIntel if (param_total_num_mbufs) 546c8798818SIntel nb_mbuf_per_pool = param_total_num_mbufs; 547c8798818SIntel else { 548c8798818SIntel nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX + (nb_lcores * mb_mempool_cache) 549c8798818SIntel + RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST; 550b6ea6408SIntel 551b6ea6408SIntel if (!numa_support) 552edab33b1STetsuya Mukawa nb_mbuf_per_pool = 553edab33b1STetsuya Mukawa (nb_mbuf_per_pool * RTE_MAX_ETHPORTS); 554c8798818SIntel } 555af75078fSIntel 556b6ea6408SIntel if (!numa_support) { 557b6ea6408SIntel if (socket_num == UMA_NO_CONFIG) 558b6ea6408SIntel mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0); 559b6ea6408SIntel else 560b6ea6408SIntel mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 561b6ea6408SIntel socket_num); 562b6ea6408SIntel } 563af75078fSIntel 5647d89b261SGaetan Rivet RTE_ETH_FOREACH_DEV(pid) { 565ce8d5614SIntel port = &ports[pid]; 566ce8d5614SIntel rte_eth_dev_info_get(pid, &port->dev_info); 567ce8d5614SIntel 568b6ea6408SIntel if (numa_support) { 569b6ea6408SIntel if (port_numa[pid] != NUMA_NO_CONFIG) 570b6ea6408SIntel port_per_socket[port_numa[pid]]++; 571b6ea6408SIntel else { 572b6ea6408SIntel uint32_t socket_id = rte_eth_dev_socket_id(pid); 57320a0286fSLiu Xiaofeng 57420a0286fSLiu Xiaofeng /* if socket_id is invalid, set to 0 */ 57520a0286fSLiu Xiaofeng if (check_socket_id(socket_id) < 0) 57620a0286fSLiu Xiaofeng socket_id = 0; 577b6ea6408SIntel port_per_socket[socket_id]++; 578b6ea6408SIntel } 579b6ea6408SIntel } 580b6ea6408SIntel 581ce8d5614SIntel /* set flag to initialize port/queue */ 582ce8d5614SIntel port->need_reconfig = 1; 583ce8d5614SIntel port->need_reconfig_queues = 1; 584ce8d5614SIntel } 585ce8d5614SIntel 586b6ea6408SIntel if (numa_support) { 587b6ea6408SIntel uint8_t i; 588b6ea6408SIntel unsigned int nb_mbuf; 589ce8d5614SIntel 590b6ea6408SIntel if (param_total_num_mbufs) 591b6ea6408SIntel nb_mbuf_per_pool = nb_mbuf_per_pool/nb_ports; 592b6ea6408SIntel 5937acf894dSStephen Hurd for (i = 0; i < max_socket; i++) { 594edab33b1STetsuya Mukawa nb_mbuf = (nb_mbuf_per_pool * RTE_MAX_ETHPORTS); 595b6ea6408SIntel if (nb_mbuf) 596b6ea6408SIntel mbuf_pool_create(mbuf_data_size, 597b6ea6408SIntel nb_mbuf,i); 598b6ea6408SIntel } 599b6ea6408SIntel } 600b6ea6408SIntel init_port_config(); 6015886ae07SAdrien Mazarguil 6025886ae07SAdrien Mazarguil /* 6035886ae07SAdrien Mazarguil * Records which Mbuf pool to use by each logical core, if needed. 6045886ae07SAdrien Mazarguil */ 6055886ae07SAdrien Mazarguil for (lc_id = 0; lc_id < nb_lcores; lc_id++) { 6068fd8bebcSAdrien Mazarguil mbp = mbuf_pool_find( 6078fd8bebcSAdrien Mazarguil rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id])); 6088fd8bebcSAdrien Mazarguil 6095886ae07SAdrien Mazarguil if (mbp == NULL) 6105886ae07SAdrien Mazarguil mbp = mbuf_pool_find(0); 6115886ae07SAdrien Mazarguil fwd_lcores[lc_id]->mbp = mbp; 6125886ae07SAdrien Mazarguil } 6135886ae07SAdrien Mazarguil 614ce8d5614SIntel /* Configuration of packet forwarding streams. */ 615ce8d5614SIntel if (init_fwd_streams() < 0) 616ce8d5614SIntel rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n"); 6170c0db76fSBernard Iremonger 6180c0db76fSBernard Iremonger fwd_config_setup(); 619ce8d5614SIntel } 620ce8d5614SIntel 6212950a769SDeclan Doherty 6222950a769SDeclan Doherty void 623a21d5a4bSDeclan Doherty reconfig(portid_t new_port_id, unsigned socket_id) 6242950a769SDeclan Doherty { 6252950a769SDeclan Doherty struct rte_port *port; 6262950a769SDeclan Doherty 6272950a769SDeclan Doherty /* Reconfiguration of Ethernet ports. */ 6282950a769SDeclan Doherty port = &ports[new_port_id]; 6292950a769SDeclan Doherty rte_eth_dev_info_get(new_port_id, &port->dev_info); 6302950a769SDeclan Doherty 6312950a769SDeclan Doherty /* set flag to initialize port/queue */ 6322950a769SDeclan Doherty port->need_reconfig = 1; 6332950a769SDeclan Doherty port->need_reconfig_queues = 1; 634a21d5a4bSDeclan Doherty port->socket_id = socket_id; 6352950a769SDeclan Doherty 6362950a769SDeclan Doherty init_port_config(); 6372950a769SDeclan Doherty } 6382950a769SDeclan Doherty 6392950a769SDeclan Doherty 640ce8d5614SIntel int 641ce8d5614SIntel init_fwd_streams(void) 642ce8d5614SIntel { 643ce8d5614SIntel portid_t pid; 644ce8d5614SIntel struct rte_port *port; 645ce8d5614SIntel streamid_t sm_id, nb_fwd_streams_new; 6465a8fb55cSReshma Pattan queueid_t q; 647ce8d5614SIntel 648ce8d5614SIntel /* set socket id according to numa or not */ 6497d89b261SGaetan Rivet RTE_ETH_FOREACH_DEV(pid) { 650ce8d5614SIntel port = &ports[pid]; 651ce8d5614SIntel if (nb_rxq > port->dev_info.max_rx_queues) { 652ce8d5614SIntel printf("Fail: nb_rxq(%d) is greater than " 653ce8d5614SIntel "max_rx_queues(%d)\n", nb_rxq, 654ce8d5614SIntel port->dev_info.max_rx_queues); 655ce8d5614SIntel return -1; 656ce8d5614SIntel } 657ce8d5614SIntel if (nb_txq > port->dev_info.max_tx_queues) { 658ce8d5614SIntel printf("Fail: nb_txq(%d) is greater than " 659ce8d5614SIntel "max_tx_queues(%d)\n", nb_txq, 660ce8d5614SIntel port->dev_info.max_tx_queues); 661ce8d5614SIntel return -1; 662ce8d5614SIntel } 66320a0286fSLiu Xiaofeng if (numa_support) { 66420a0286fSLiu Xiaofeng if (port_numa[pid] != NUMA_NO_CONFIG) 66520a0286fSLiu Xiaofeng port->socket_id = port_numa[pid]; 66620a0286fSLiu Xiaofeng else { 667b6ea6408SIntel port->socket_id = rte_eth_dev_socket_id(pid); 66820a0286fSLiu Xiaofeng 66920a0286fSLiu Xiaofeng /* if socket_id is invalid, set to 0 */ 67020a0286fSLiu Xiaofeng if (check_socket_id(port->socket_id) < 0) 67120a0286fSLiu Xiaofeng port->socket_id = 0; 67220a0286fSLiu Xiaofeng } 67320a0286fSLiu Xiaofeng } 674b6ea6408SIntel else { 675b6ea6408SIntel if (socket_num == UMA_NO_CONFIG) 676af75078fSIntel port->socket_id = 0; 677b6ea6408SIntel else 678b6ea6408SIntel port->socket_id = socket_num; 679b6ea6408SIntel } 680af75078fSIntel } 681af75078fSIntel 6825a8fb55cSReshma Pattan q = RTE_MAX(nb_rxq, nb_txq); 6835a8fb55cSReshma Pattan if (q == 0) { 6845a8fb55cSReshma Pattan printf("Fail: Cannot allocate fwd streams as number of queues is 0\n"); 6855a8fb55cSReshma Pattan return -1; 6865a8fb55cSReshma Pattan } 6875a8fb55cSReshma Pattan nb_fwd_streams_new = (streamid_t)(nb_ports * q); 688ce8d5614SIntel if (nb_fwd_streams_new == nb_fwd_streams) 689ce8d5614SIntel return 0; 690ce8d5614SIntel /* clear the old */ 691ce8d5614SIntel if (fwd_streams != NULL) { 692ce8d5614SIntel for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) { 693ce8d5614SIntel if (fwd_streams[sm_id] == NULL) 694ce8d5614SIntel continue; 695ce8d5614SIntel rte_free(fwd_streams[sm_id]); 696ce8d5614SIntel fwd_streams[sm_id] = NULL; 697af75078fSIntel } 698ce8d5614SIntel rte_free(fwd_streams); 699ce8d5614SIntel fwd_streams = NULL; 700ce8d5614SIntel } 701ce8d5614SIntel 702ce8d5614SIntel /* init new */ 703ce8d5614SIntel nb_fwd_streams = nb_fwd_streams_new; 704ce8d5614SIntel fwd_streams = rte_zmalloc("testpmd: fwd_streams", 705fdf20fa7SSergio Gonzalez Monroy sizeof(struct fwd_stream *) * nb_fwd_streams, RTE_CACHE_LINE_SIZE); 706ce8d5614SIntel if (fwd_streams == NULL) 707ce8d5614SIntel rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_stream *)) " 708ce8d5614SIntel "failed\n", nb_fwd_streams); 709ce8d5614SIntel 710af75078fSIntel for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) { 711af75078fSIntel fwd_streams[sm_id] = rte_zmalloc("testpmd: struct fwd_stream", 712fdf20fa7SSergio Gonzalez Monroy sizeof(struct fwd_stream), RTE_CACHE_LINE_SIZE); 713ce8d5614SIntel if (fwd_streams[sm_id] == NULL) 714ce8d5614SIntel rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_stream)" 715ce8d5614SIntel " failed\n"); 716af75078fSIntel } 717ce8d5614SIntel 718ce8d5614SIntel return 0; 719af75078fSIntel } 720af75078fSIntel 721af75078fSIntel #ifdef RTE_TEST_PMD_RECORD_BURST_STATS 722af75078fSIntel static void 723af75078fSIntel pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs) 724af75078fSIntel { 725af75078fSIntel unsigned int total_burst; 726af75078fSIntel unsigned int nb_burst; 727af75078fSIntel unsigned int burst_stats[3]; 728af75078fSIntel uint16_t pktnb_stats[3]; 729af75078fSIntel uint16_t nb_pkt; 730af75078fSIntel int burst_percent[3]; 731af75078fSIntel 732af75078fSIntel /* 733af75078fSIntel * First compute the total number of packet bursts and the 734af75078fSIntel * two highest numbers of bursts of the same number of packets. 735af75078fSIntel */ 736af75078fSIntel total_burst = 0; 737af75078fSIntel burst_stats[0] = burst_stats[1] = burst_stats[2] = 0; 738af75078fSIntel pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0; 739af75078fSIntel for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) { 740af75078fSIntel nb_burst = pbs->pkt_burst_spread[nb_pkt]; 741af75078fSIntel if (nb_burst == 0) 742af75078fSIntel continue; 743af75078fSIntel total_burst += nb_burst; 744af75078fSIntel if (nb_burst > burst_stats[0]) { 745af75078fSIntel burst_stats[1] = burst_stats[0]; 746af75078fSIntel pktnb_stats[1] = pktnb_stats[0]; 747af75078fSIntel burst_stats[0] = nb_burst; 748af75078fSIntel pktnb_stats[0] = nb_pkt; 749af75078fSIntel } 750af75078fSIntel } 751af75078fSIntel if (total_burst == 0) 752af75078fSIntel return; 753af75078fSIntel burst_percent[0] = (burst_stats[0] * 100) / total_burst; 754af75078fSIntel printf(" %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst, 755af75078fSIntel burst_percent[0], (int) pktnb_stats[0]); 756af75078fSIntel if (burst_stats[0] == total_burst) { 757af75078fSIntel printf("]\n"); 758af75078fSIntel return; 759af75078fSIntel } 760af75078fSIntel if (burst_stats[0] + burst_stats[1] == total_burst) { 761af75078fSIntel printf(" + %d%% of %d pkts]\n", 762af75078fSIntel 100 - burst_percent[0], pktnb_stats[1]); 763af75078fSIntel return; 764af75078fSIntel } 765af75078fSIntel burst_percent[1] = (burst_stats[1] * 100) / total_burst; 766af75078fSIntel burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]); 767af75078fSIntel if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) { 768af75078fSIntel printf(" + %d%% of others]\n", 100 - burst_percent[0]); 769af75078fSIntel return; 770af75078fSIntel } 771af75078fSIntel printf(" + %d%% of %d pkts + %d%% of others]\n", 772af75078fSIntel burst_percent[1], (int) pktnb_stats[1], burst_percent[2]); 773af75078fSIntel } 774af75078fSIntel #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */ 775af75078fSIntel 776af75078fSIntel static void 777af75078fSIntel fwd_port_stats_display(portid_t port_id, struct rte_eth_stats *stats) 778af75078fSIntel { 779af75078fSIntel struct rte_port *port; 780013af9b6SIntel uint8_t i; 781af75078fSIntel 782af75078fSIntel static const char *fwd_stats_border = "----------------------"; 783af75078fSIntel 784af75078fSIntel port = &ports[port_id]; 785af75078fSIntel printf("\n %s Forward statistics for port %-2d %s\n", 786af75078fSIntel fwd_stats_border, port_id, fwd_stats_border); 787013af9b6SIntel 788013af9b6SIntel if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) { 789af75078fSIntel printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: " 790af75078fSIntel "%-"PRIu64"\n", 79170bdb186SIvan Boule stats->ipackets, stats->imissed, 79270bdb186SIvan Boule (uint64_t) (stats->ipackets + stats->imissed)); 793af75078fSIntel 794af75078fSIntel if (cur_fwd_eng == &csum_fwd_engine) 795af75078fSIntel printf(" Bad-ipcsum: %-14"PRIu64" Bad-l4csum: %-14"PRIu64" \n", 796af75078fSIntel port->rx_bad_ip_csum, port->rx_bad_l4_csum); 79786057c99SIgor Ryzhov if ((stats->ierrors + stats->rx_nombuf) > 0) { 798f72a0fa6SStephen Hemminger printf(" RX-error: %-"PRIu64"\n", stats->ierrors); 79970bdb186SIvan Boule printf(" RX-nombufs: %-14"PRIu64"\n", stats->rx_nombuf); 80070bdb186SIvan Boule } 801af75078fSIntel 802af75078fSIntel printf(" TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: " 803af75078fSIntel "%-"PRIu64"\n", 804af75078fSIntel stats->opackets, port->tx_dropped, 805af75078fSIntel (uint64_t) (stats->opackets + port->tx_dropped)); 806013af9b6SIntel } 807013af9b6SIntel else { 808013af9b6SIntel printf(" RX-packets: %14"PRIu64" RX-dropped:%14"PRIu64" RX-total:" 809013af9b6SIntel "%14"PRIu64"\n", 81070bdb186SIvan Boule stats->ipackets, stats->imissed, 81170bdb186SIvan Boule (uint64_t) (stats->ipackets + stats->imissed)); 812013af9b6SIntel 813013af9b6SIntel if (cur_fwd_eng == &csum_fwd_engine) 814013af9b6SIntel printf(" Bad-ipcsum:%14"PRIu64" Bad-l4csum:%14"PRIu64"\n", 815013af9b6SIntel port->rx_bad_ip_csum, port->rx_bad_l4_csum); 81686057c99SIgor Ryzhov if ((stats->ierrors + stats->rx_nombuf) > 0) { 817f72a0fa6SStephen Hemminger printf(" RX-error:%"PRIu64"\n", stats->ierrors); 81870bdb186SIvan Boule printf(" RX-nombufs: %14"PRIu64"\n", 81970bdb186SIvan Boule stats->rx_nombuf); 82070bdb186SIvan Boule } 821013af9b6SIntel 822013af9b6SIntel printf(" TX-packets: %14"PRIu64" TX-dropped:%14"PRIu64" TX-total:" 823013af9b6SIntel "%14"PRIu64"\n", 824013af9b6SIntel stats->opackets, port->tx_dropped, 825013af9b6SIntel (uint64_t) (stats->opackets + port->tx_dropped)); 826013af9b6SIntel } 827e659b6b4SIvan Boule 828af75078fSIntel #ifdef RTE_TEST_PMD_RECORD_BURST_STATS 829af75078fSIntel if (port->rx_stream) 830013af9b6SIntel pkt_burst_stats_display("RX", 831013af9b6SIntel &port->rx_stream->rx_burst_stats); 832af75078fSIntel if (port->tx_stream) 833013af9b6SIntel pkt_burst_stats_display("TX", 834013af9b6SIntel &port->tx_stream->tx_burst_stats); 835af75078fSIntel #endif 836af75078fSIntel 837013af9b6SIntel if (port->rx_queue_stats_mapping_enabled) { 838013af9b6SIntel printf("\n"); 839013af9b6SIntel for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) { 840013af9b6SIntel printf(" Stats reg %2d RX-packets:%14"PRIu64 841013af9b6SIntel " RX-errors:%14"PRIu64 842013af9b6SIntel " RX-bytes:%14"PRIu64"\n", 843013af9b6SIntel i, stats->q_ipackets[i], stats->q_errors[i], stats->q_ibytes[i]); 844013af9b6SIntel } 845013af9b6SIntel printf("\n"); 846013af9b6SIntel } 847013af9b6SIntel if (port->tx_queue_stats_mapping_enabled) { 848013af9b6SIntel for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) { 849013af9b6SIntel printf(" Stats reg %2d TX-packets:%14"PRIu64 850013af9b6SIntel " TX-bytes:%14"PRIu64"\n", 851013af9b6SIntel i, stats->q_opackets[i], stats->q_obytes[i]); 852013af9b6SIntel } 853013af9b6SIntel } 854013af9b6SIntel 855af75078fSIntel printf(" %s--------------------------------%s\n", 856af75078fSIntel fwd_stats_border, fwd_stats_border); 857af75078fSIntel } 858af75078fSIntel 859af75078fSIntel static void 860af75078fSIntel fwd_stream_stats_display(streamid_t stream_id) 861af75078fSIntel { 862af75078fSIntel struct fwd_stream *fs; 863af75078fSIntel static const char *fwd_top_stats_border = "-------"; 864af75078fSIntel 865af75078fSIntel fs = fwd_streams[stream_id]; 866af75078fSIntel if ((fs->rx_packets == 0) && (fs->tx_packets == 0) && 867af75078fSIntel (fs->fwd_dropped == 0)) 868af75078fSIntel return; 869af75078fSIntel printf("\n %s Forward Stats for RX Port=%2d/Queue=%2d -> " 870af75078fSIntel "TX Port=%2d/Queue=%2d %s\n", 871af75078fSIntel fwd_top_stats_border, fs->rx_port, fs->rx_queue, 872af75078fSIntel fs->tx_port, fs->tx_queue, fwd_top_stats_border); 873af75078fSIntel printf(" RX-packets: %-14u TX-packets: %-14u TX-dropped: %-14u", 874af75078fSIntel fs->rx_packets, fs->tx_packets, fs->fwd_dropped); 875af75078fSIntel 876af75078fSIntel /* if checksum mode */ 877af75078fSIntel if (cur_fwd_eng == &csum_fwd_engine) { 878013af9b6SIntel printf(" RX- bad IP checksum: %-14u Rx- bad L4 checksum: " 879013af9b6SIntel "%-14u\n", fs->rx_bad_ip_csum, fs->rx_bad_l4_csum); 880af75078fSIntel } 881af75078fSIntel 882af75078fSIntel #ifdef RTE_TEST_PMD_RECORD_BURST_STATS 883af75078fSIntel pkt_burst_stats_display("RX", &fs->rx_burst_stats); 884af75078fSIntel pkt_burst_stats_display("TX", &fs->tx_burst_stats); 885af75078fSIntel #endif 886af75078fSIntel } 887af75078fSIntel 888af75078fSIntel static void 8897741e4cfSIntel flush_fwd_rx_queues(void) 890af75078fSIntel { 891af75078fSIntel struct rte_mbuf *pkts_burst[MAX_PKT_BURST]; 892af75078fSIntel portid_t rxp; 8937741e4cfSIntel portid_t port_id; 894af75078fSIntel queueid_t rxq; 895af75078fSIntel uint16_t nb_rx; 896af75078fSIntel uint16_t i; 897af75078fSIntel uint8_t j; 898f487715fSReshma Pattan uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0; 899594302c7SJames Poole uint64_t timer_period; 900f487715fSReshma Pattan 901f487715fSReshma Pattan /* convert to number of cycles */ 902594302c7SJames Poole timer_period = rte_get_timer_hz(); /* 1 second timeout */ 903af75078fSIntel 904af75078fSIntel for (j = 0; j < 2; j++) { 9057741e4cfSIntel for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) { 906af75078fSIntel for (rxq = 0; rxq < nb_rxq; rxq++) { 9077741e4cfSIntel port_id = fwd_ports_ids[rxp]; 908f487715fSReshma Pattan /** 909f487715fSReshma Pattan * testpmd can stuck in the below do while loop 910f487715fSReshma Pattan * if rte_eth_rx_burst() always returns nonzero 911f487715fSReshma Pattan * packets. So timer is added to exit this loop 912f487715fSReshma Pattan * after 1sec timer expiry. 913f487715fSReshma Pattan */ 914f487715fSReshma Pattan prev_tsc = rte_rdtsc(); 915af75078fSIntel do { 9167741e4cfSIntel nb_rx = rte_eth_rx_burst(port_id, rxq, 917013af9b6SIntel pkts_burst, MAX_PKT_BURST); 918af75078fSIntel for (i = 0; i < nb_rx; i++) 919af75078fSIntel rte_pktmbuf_free(pkts_burst[i]); 920f487715fSReshma Pattan 921f487715fSReshma Pattan cur_tsc = rte_rdtsc(); 922f487715fSReshma Pattan diff_tsc = cur_tsc - prev_tsc; 923f487715fSReshma Pattan timer_tsc += diff_tsc; 924f487715fSReshma Pattan } while ((nb_rx > 0) && 925f487715fSReshma Pattan (timer_tsc < timer_period)); 926f487715fSReshma Pattan timer_tsc = 0; 927af75078fSIntel } 928af75078fSIntel } 929af75078fSIntel rte_delay_ms(10); /* wait 10 milli-seconds before retrying */ 930af75078fSIntel } 931af75078fSIntel } 932af75078fSIntel 933af75078fSIntel static void 934af75078fSIntel run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd) 935af75078fSIntel { 936af75078fSIntel struct fwd_stream **fsm; 937af75078fSIntel streamid_t nb_fs; 938af75078fSIntel streamid_t sm_id; 9397e4441c8SRemy Horton #ifdef RTE_LIBRTE_BITRATE 9407e4441c8SRemy Horton uint64_t tics_per_1sec; 9417e4441c8SRemy Horton uint64_t tics_datum; 9427e4441c8SRemy Horton uint64_t tics_current; 9437e4441c8SRemy Horton uint8_t idx_port, cnt_ports; 944af75078fSIntel 9457e4441c8SRemy Horton cnt_ports = rte_eth_dev_count(); 9467e4441c8SRemy Horton tics_datum = rte_rdtsc(); 9477e4441c8SRemy Horton tics_per_1sec = rte_get_timer_hz(); 9487e4441c8SRemy Horton #endif 949af75078fSIntel fsm = &fwd_streams[fc->stream_idx]; 950af75078fSIntel nb_fs = fc->stream_nb; 951af75078fSIntel do { 952af75078fSIntel for (sm_id = 0; sm_id < nb_fs; sm_id++) 953af75078fSIntel (*pkt_fwd)(fsm[sm_id]); 9547e4441c8SRemy Horton #ifdef RTE_LIBRTE_BITRATE 9557e4441c8SRemy Horton tics_current = rte_rdtsc(); 9567e4441c8SRemy Horton if (tics_current - tics_datum >= tics_per_1sec) { 9577e4441c8SRemy Horton /* Periodic bitrate calculation */ 9587e4441c8SRemy Horton for (idx_port = 0; idx_port < cnt_ports; idx_port++) 9597e4441c8SRemy Horton rte_stats_bitrate_calc(bitrate_data, idx_port); 9607e4441c8SRemy Horton tics_datum = tics_current; 9617e4441c8SRemy Horton } 9627e4441c8SRemy Horton #endif 96362d3216dSReshma Pattan #ifdef RTE_LIBRTE_LATENCY_STATS 96462d3216dSReshma Pattan if (latencystats_lcore_id == rte_lcore_id()) 96562d3216dSReshma Pattan rte_latencystats_update(); 96662d3216dSReshma Pattan #endif 96762d3216dSReshma Pattan 968af75078fSIntel } while (! fc->stopped); 969af75078fSIntel } 970af75078fSIntel 971af75078fSIntel static int 972af75078fSIntel start_pkt_forward_on_core(void *fwd_arg) 973af75078fSIntel { 974af75078fSIntel run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg, 975af75078fSIntel cur_fwd_config.fwd_eng->packet_fwd); 976af75078fSIntel return 0; 977af75078fSIntel } 978af75078fSIntel 979af75078fSIntel /* 980af75078fSIntel * Run the TXONLY packet forwarding engine to send a single burst of packets. 981af75078fSIntel * Used to start communication flows in network loopback test configurations. 982af75078fSIntel */ 983af75078fSIntel static int 984af75078fSIntel run_one_txonly_burst_on_core(void *fwd_arg) 985af75078fSIntel { 986af75078fSIntel struct fwd_lcore *fwd_lc; 987af75078fSIntel struct fwd_lcore tmp_lcore; 988af75078fSIntel 989af75078fSIntel fwd_lc = (struct fwd_lcore *) fwd_arg; 990af75078fSIntel tmp_lcore = *fwd_lc; 991af75078fSIntel tmp_lcore.stopped = 1; 992af75078fSIntel run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd); 993af75078fSIntel return 0; 994af75078fSIntel } 995af75078fSIntel 996af75078fSIntel /* 997af75078fSIntel * Launch packet forwarding: 998af75078fSIntel * - Setup per-port forwarding context. 999af75078fSIntel * - launch logical cores with their forwarding configuration. 1000af75078fSIntel */ 1001af75078fSIntel static void 1002af75078fSIntel launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore) 1003af75078fSIntel { 1004af75078fSIntel port_fwd_begin_t port_fwd_begin; 1005af75078fSIntel unsigned int i; 1006af75078fSIntel unsigned int lc_id; 1007af75078fSIntel int diag; 1008af75078fSIntel 1009af75078fSIntel port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin; 1010af75078fSIntel if (port_fwd_begin != NULL) { 1011af75078fSIntel for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) 1012af75078fSIntel (*port_fwd_begin)(fwd_ports_ids[i]); 1013af75078fSIntel } 1014af75078fSIntel for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) { 1015af75078fSIntel lc_id = fwd_lcores_cpuids[i]; 1016af75078fSIntel if ((interactive == 0) || (lc_id != rte_lcore_id())) { 1017af75078fSIntel fwd_lcores[i]->stopped = 0; 1018af75078fSIntel diag = rte_eal_remote_launch(pkt_fwd_on_lcore, 1019af75078fSIntel fwd_lcores[i], lc_id); 1020af75078fSIntel if (diag != 0) 1021af75078fSIntel printf("launch lcore %u failed - diag=%d\n", 1022af75078fSIntel lc_id, diag); 1023af75078fSIntel } 1024af75078fSIntel } 1025af75078fSIntel } 1026af75078fSIntel 1027af75078fSIntel /* 1028af75078fSIntel * Launch packet forwarding configuration. 1029af75078fSIntel */ 1030af75078fSIntel void 1031af75078fSIntel start_packet_forwarding(int with_tx_first) 1032af75078fSIntel { 1033af75078fSIntel port_fwd_begin_t port_fwd_begin; 1034af75078fSIntel port_fwd_end_t port_fwd_end; 1035af75078fSIntel struct rte_port *port; 1036af75078fSIntel unsigned int i; 1037af75078fSIntel portid_t pt_id; 1038af75078fSIntel streamid_t sm_id; 1039af75078fSIntel 10405a8fb55cSReshma Pattan if (strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") == 0 && !nb_rxq) 10415a8fb55cSReshma Pattan rte_exit(EXIT_FAILURE, "rxq are 0, cannot use rxonly fwd mode\n"); 10425a8fb55cSReshma Pattan 10435a8fb55cSReshma Pattan if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 && !nb_txq) 10445a8fb55cSReshma Pattan rte_exit(EXIT_FAILURE, "txq are 0, cannot use txonly fwd mode\n"); 10455a8fb55cSReshma Pattan 10465a8fb55cSReshma Pattan if ((strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") != 0 && 10475a8fb55cSReshma Pattan strcmp(cur_fwd_eng->fwd_mode_name, "txonly") != 0) && 10485a8fb55cSReshma Pattan (!nb_rxq || !nb_txq)) 10495a8fb55cSReshma Pattan rte_exit(EXIT_FAILURE, 10505a8fb55cSReshma Pattan "Either rxq or txq are 0, cannot use %s fwd mode\n", 10515a8fb55cSReshma Pattan cur_fwd_eng->fwd_mode_name); 10525a8fb55cSReshma Pattan 1053ce8d5614SIntel if (all_ports_started() == 0) { 1054ce8d5614SIntel printf("Not all ports were started\n"); 1055ce8d5614SIntel return; 1056ce8d5614SIntel } 1057af75078fSIntel if (test_done == 0) { 1058af75078fSIntel printf("Packet forwarding already started\n"); 1059af75078fSIntel return; 1060af75078fSIntel } 1061edf87b4aSBernard Iremonger 1062edf87b4aSBernard Iremonger if (init_fwd_streams() < 0) { 1063edf87b4aSBernard Iremonger printf("Fail from init_fwd_streams()\n"); 1064edf87b4aSBernard Iremonger return; 1065edf87b4aSBernard Iremonger } 1066edf87b4aSBernard Iremonger 10677741e4cfSIntel if(dcb_test) { 10687741e4cfSIntel for (i = 0; i < nb_fwd_ports; i++) { 10697741e4cfSIntel pt_id = fwd_ports_ids[i]; 10707741e4cfSIntel port = &ports[pt_id]; 10717741e4cfSIntel if (!port->dcb_flag) { 10727741e4cfSIntel printf("In DCB mode, all forwarding ports must " 10737741e4cfSIntel "be configured in this mode.\n"); 1074013af9b6SIntel return; 1075013af9b6SIntel } 10767741e4cfSIntel } 10777741e4cfSIntel if (nb_fwd_lcores == 1) { 10787741e4cfSIntel printf("In DCB mode,the nb forwarding cores " 10797741e4cfSIntel "should be larger than 1.\n"); 10807741e4cfSIntel return; 10817741e4cfSIntel } 10827741e4cfSIntel } 1083af75078fSIntel test_done = 0; 10847741e4cfSIntel 10857741e4cfSIntel if(!no_flush_rx) 10867741e4cfSIntel flush_fwd_rx_queues(); 10877741e4cfSIntel 1088af75078fSIntel fwd_config_setup(); 1089933617d8SZhihong Wang pkt_fwd_config_display(&cur_fwd_config); 1090af75078fSIntel rxtx_config_display(); 1091af75078fSIntel 1092af75078fSIntel for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) { 1093af75078fSIntel pt_id = fwd_ports_ids[i]; 1094af75078fSIntel port = &ports[pt_id]; 1095af75078fSIntel rte_eth_stats_get(pt_id, &port->stats); 1096af75078fSIntel port->tx_dropped = 0; 1097013af9b6SIntel 1098013af9b6SIntel map_port_queue_stats_mapping_registers(pt_id, port); 1099af75078fSIntel } 1100af75078fSIntel for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) { 1101af75078fSIntel fwd_streams[sm_id]->rx_packets = 0; 1102af75078fSIntel fwd_streams[sm_id]->tx_packets = 0; 1103af75078fSIntel fwd_streams[sm_id]->fwd_dropped = 0; 1104af75078fSIntel fwd_streams[sm_id]->rx_bad_ip_csum = 0; 1105af75078fSIntel fwd_streams[sm_id]->rx_bad_l4_csum = 0; 1106af75078fSIntel 1107af75078fSIntel #ifdef RTE_TEST_PMD_RECORD_BURST_STATS 1108af75078fSIntel memset(&fwd_streams[sm_id]->rx_burst_stats, 0, 1109af75078fSIntel sizeof(fwd_streams[sm_id]->rx_burst_stats)); 1110af75078fSIntel memset(&fwd_streams[sm_id]->tx_burst_stats, 0, 1111af75078fSIntel sizeof(fwd_streams[sm_id]->tx_burst_stats)); 1112af75078fSIntel #endif 1113af75078fSIntel #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES 1114af75078fSIntel fwd_streams[sm_id]->core_cycles = 0; 1115af75078fSIntel #endif 1116af75078fSIntel } 1117af75078fSIntel if (with_tx_first) { 1118af75078fSIntel port_fwd_begin = tx_only_engine.port_fwd_begin; 1119af75078fSIntel if (port_fwd_begin != NULL) { 1120af75078fSIntel for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) 1121af75078fSIntel (*port_fwd_begin)(fwd_ports_ids[i]); 1122af75078fSIntel } 1123acbf77a6SZhihong Wang while (with_tx_first--) { 1124acbf77a6SZhihong Wang launch_packet_forwarding( 1125acbf77a6SZhihong Wang run_one_txonly_burst_on_core); 1126af75078fSIntel rte_eal_mp_wait_lcore(); 1127acbf77a6SZhihong Wang } 1128af75078fSIntel port_fwd_end = tx_only_engine.port_fwd_end; 1129af75078fSIntel if (port_fwd_end != NULL) { 1130af75078fSIntel for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) 1131af75078fSIntel (*port_fwd_end)(fwd_ports_ids[i]); 1132af75078fSIntel } 1133af75078fSIntel } 1134af75078fSIntel launch_packet_forwarding(start_pkt_forward_on_core); 1135af75078fSIntel } 1136af75078fSIntel 1137af75078fSIntel void 1138af75078fSIntel stop_packet_forwarding(void) 1139af75078fSIntel { 1140af75078fSIntel struct rte_eth_stats stats; 1141af75078fSIntel struct rte_port *port; 1142af75078fSIntel port_fwd_end_t port_fwd_end; 1143af75078fSIntel int i; 1144af75078fSIntel portid_t pt_id; 1145af75078fSIntel streamid_t sm_id; 1146af75078fSIntel lcoreid_t lc_id; 1147af75078fSIntel uint64_t total_recv; 1148af75078fSIntel uint64_t total_xmit; 1149af75078fSIntel uint64_t total_rx_dropped; 1150af75078fSIntel uint64_t total_tx_dropped; 1151af75078fSIntel uint64_t total_rx_nombuf; 1152af75078fSIntel uint64_t tx_dropped; 1153af75078fSIntel uint64_t rx_bad_ip_csum; 1154af75078fSIntel uint64_t rx_bad_l4_csum; 1155af75078fSIntel #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES 1156af75078fSIntel uint64_t fwd_cycles; 1157af75078fSIntel #endif 1158af75078fSIntel static const char *acc_stats_border = "+++++++++++++++"; 1159af75078fSIntel 1160af75078fSIntel if (test_done) { 1161af75078fSIntel printf("Packet forwarding not started\n"); 1162af75078fSIntel return; 1163af75078fSIntel } 1164af75078fSIntel printf("Telling cores to stop..."); 1165af75078fSIntel for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) 1166af75078fSIntel fwd_lcores[lc_id]->stopped = 1; 1167af75078fSIntel printf("\nWaiting for lcores to finish...\n"); 1168af75078fSIntel rte_eal_mp_wait_lcore(); 1169af75078fSIntel port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end; 1170af75078fSIntel if (port_fwd_end != NULL) { 1171af75078fSIntel for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) { 1172af75078fSIntel pt_id = fwd_ports_ids[i]; 1173af75078fSIntel (*port_fwd_end)(pt_id); 1174af75078fSIntel } 1175af75078fSIntel } 1176af75078fSIntel #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES 1177af75078fSIntel fwd_cycles = 0; 1178af75078fSIntel #endif 1179af75078fSIntel for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) { 1180af75078fSIntel if (cur_fwd_config.nb_fwd_streams > 1181af75078fSIntel cur_fwd_config.nb_fwd_ports) { 1182af75078fSIntel fwd_stream_stats_display(sm_id); 1183af75078fSIntel ports[fwd_streams[sm_id]->tx_port].tx_stream = NULL; 1184af75078fSIntel ports[fwd_streams[sm_id]->rx_port].rx_stream = NULL; 1185af75078fSIntel } else { 1186af75078fSIntel ports[fwd_streams[sm_id]->tx_port].tx_stream = 1187af75078fSIntel fwd_streams[sm_id]; 1188af75078fSIntel ports[fwd_streams[sm_id]->rx_port].rx_stream = 1189af75078fSIntel fwd_streams[sm_id]; 1190af75078fSIntel } 1191af75078fSIntel tx_dropped = ports[fwd_streams[sm_id]->tx_port].tx_dropped; 1192af75078fSIntel tx_dropped = (uint64_t) (tx_dropped + 1193af75078fSIntel fwd_streams[sm_id]->fwd_dropped); 1194af75078fSIntel ports[fwd_streams[sm_id]->tx_port].tx_dropped = tx_dropped; 1195af75078fSIntel 1196013af9b6SIntel rx_bad_ip_csum = 1197013af9b6SIntel ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum; 1198af75078fSIntel rx_bad_ip_csum = (uint64_t) (rx_bad_ip_csum + 1199af75078fSIntel fwd_streams[sm_id]->rx_bad_ip_csum); 1200013af9b6SIntel ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum = 1201013af9b6SIntel rx_bad_ip_csum; 1202af75078fSIntel 1203013af9b6SIntel rx_bad_l4_csum = 1204013af9b6SIntel ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum; 1205af75078fSIntel rx_bad_l4_csum = (uint64_t) (rx_bad_l4_csum + 1206af75078fSIntel fwd_streams[sm_id]->rx_bad_l4_csum); 1207013af9b6SIntel ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum = 1208013af9b6SIntel rx_bad_l4_csum; 1209af75078fSIntel 1210af75078fSIntel #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES 1211af75078fSIntel fwd_cycles = (uint64_t) (fwd_cycles + 1212af75078fSIntel fwd_streams[sm_id]->core_cycles); 1213af75078fSIntel #endif 1214af75078fSIntel } 1215af75078fSIntel total_recv = 0; 1216af75078fSIntel total_xmit = 0; 1217af75078fSIntel total_rx_dropped = 0; 1218af75078fSIntel total_tx_dropped = 0; 1219af75078fSIntel total_rx_nombuf = 0; 12207741e4cfSIntel for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) { 1221af75078fSIntel pt_id = fwd_ports_ids[i]; 1222af75078fSIntel 1223af75078fSIntel port = &ports[pt_id]; 1224af75078fSIntel rte_eth_stats_get(pt_id, &stats); 1225af75078fSIntel stats.ipackets -= port->stats.ipackets; 1226af75078fSIntel port->stats.ipackets = 0; 1227af75078fSIntel stats.opackets -= port->stats.opackets; 1228af75078fSIntel port->stats.opackets = 0; 1229af75078fSIntel stats.ibytes -= port->stats.ibytes; 1230af75078fSIntel port->stats.ibytes = 0; 1231af75078fSIntel stats.obytes -= port->stats.obytes; 1232af75078fSIntel port->stats.obytes = 0; 123370bdb186SIvan Boule stats.imissed -= port->stats.imissed; 123470bdb186SIvan Boule port->stats.imissed = 0; 1235af75078fSIntel stats.oerrors -= port->stats.oerrors; 1236af75078fSIntel port->stats.oerrors = 0; 1237af75078fSIntel stats.rx_nombuf -= port->stats.rx_nombuf; 1238af75078fSIntel port->stats.rx_nombuf = 0; 1239af75078fSIntel 1240af75078fSIntel total_recv += stats.ipackets; 1241af75078fSIntel total_xmit += stats.opackets; 124270bdb186SIvan Boule total_rx_dropped += stats.imissed; 1243af75078fSIntel total_tx_dropped += port->tx_dropped; 1244af75078fSIntel total_rx_nombuf += stats.rx_nombuf; 1245af75078fSIntel 1246af75078fSIntel fwd_port_stats_display(pt_id, &stats); 1247af75078fSIntel } 1248af75078fSIntel printf("\n %s Accumulated forward statistics for all ports" 1249af75078fSIntel "%s\n", 1250af75078fSIntel acc_stats_border, acc_stats_border); 1251af75078fSIntel printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: " 1252af75078fSIntel "%-"PRIu64"\n" 1253af75078fSIntel " TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: " 1254af75078fSIntel "%-"PRIu64"\n", 1255af75078fSIntel total_recv, total_rx_dropped, total_recv + total_rx_dropped, 1256af75078fSIntel total_xmit, total_tx_dropped, total_xmit + total_tx_dropped); 1257af75078fSIntel if (total_rx_nombuf > 0) 1258af75078fSIntel printf(" RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf); 1259af75078fSIntel printf(" %s++++++++++++++++++++++++++++++++++++++++++++++" 1260af75078fSIntel "%s\n", 1261af75078fSIntel acc_stats_border, acc_stats_border); 1262af75078fSIntel #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES 1263af75078fSIntel if (total_recv > 0) 1264af75078fSIntel printf("\n CPU cycles/packet=%u (total cycles=" 1265af75078fSIntel "%"PRIu64" / total RX packets=%"PRIu64")\n", 1266af75078fSIntel (unsigned int)(fwd_cycles / total_recv), 1267af75078fSIntel fwd_cycles, total_recv); 1268af75078fSIntel #endif 1269af75078fSIntel printf("\nDone.\n"); 1270af75078fSIntel test_done = 1; 1271af75078fSIntel } 1272af75078fSIntel 1273cfae07fdSOuyang Changchun void 1274cfae07fdSOuyang Changchun dev_set_link_up(portid_t pid) 1275cfae07fdSOuyang Changchun { 1276cfae07fdSOuyang Changchun if (rte_eth_dev_set_link_up((uint8_t)pid) < 0) 1277cfae07fdSOuyang Changchun printf("\nSet link up fail.\n"); 1278cfae07fdSOuyang Changchun } 1279cfae07fdSOuyang Changchun 1280cfae07fdSOuyang Changchun void 1281cfae07fdSOuyang Changchun dev_set_link_down(portid_t pid) 1282cfae07fdSOuyang Changchun { 1283cfae07fdSOuyang Changchun if (rte_eth_dev_set_link_down((uint8_t)pid) < 0) 1284cfae07fdSOuyang Changchun printf("\nSet link down fail.\n"); 1285cfae07fdSOuyang Changchun } 1286cfae07fdSOuyang Changchun 1287ce8d5614SIntel static int 1288ce8d5614SIntel all_ports_started(void) 1289ce8d5614SIntel { 1290ce8d5614SIntel portid_t pi; 1291ce8d5614SIntel struct rte_port *port; 1292ce8d5614SIntel 12937d89b261SGaetan Rivet RTE_ETH_FOREACH_DEV(pi) { 1294ce8d5614SIntel port = &ports[pi]; 1295ce8d5614SIntel /* Check if there is a port which is not started */ 129641b05095SBernard Iremonger if ((port->port_status != RTE_PORT_STARTED) && 129741b05095SBernard Iremonger (port->slave_flag == 0)) 1298ce8d5614SIntel return 0; 1299ce8d5614SIntel } 1300ce8d5614SIntel 1301ce8d5614SIntel /* No port is not started */ 1302ce8d5614SIntel return 1; 1303ce8d5614SIntel } 1304ce8d5614SIntel 1305148f963fSBruce Richardson int 1306edab33b1STetsuya Mukawa all_ports_stopped(void) 1307edab33b1STetsuya Mukawa { 1308edab33b1STetsuya Mukawa portid_t pi; 1309edab33b1STetsuya Mukawa struct rte_port *port; 1310edab33b1STetsuya Mukawa 13117d89b261SGaetan Rivet RTE_ETH_FOREACH_DEV(pi) { 1312edab33b1STetsuya Mukawa port = &ports[pi]; 131341b05095SBernard Iremonger if ((port->port_status != RTE_PORT_STOPPED) && 131441b05095SBernard Iremonger (port->slave_flag == 0)) 1315edab33b1STetsuya Mukawa return 0; 1316edab33b1STetsuya Mukawa } 1317edab33b1STetsuya Mukawa 1318edab33b1STetsuya Mukawa return 1; 1319edab33b1STetsuya Mukawa } 1320edab33b1STetsuya Mukawa 1321edab33b1STetsuya Mukawa int 1322edab33b1STetsuya Mukawa port_is_started(portid_t port_id) 1323edab33b1STetsuya Mukawa { 1324edab33b1STetsuya Mukawa if (port_id_is_invalid(port_id, ENABLED_WARN)) 1325edab33b1STetsuya Mukawa return 0; 1326edab33b1STetsuya Mukawa 1327edab33b1STetsuya Mukawa if (ports[port_id].port_status != RTE_PORT_STARTED) 1328edab33b1STetsuya Mukawa return 0; 1329edab33b1STetsuya Mukawa 1330edab33b1STetsuya Mukawa return 1; 1331edab33b1STetsuya Mukawa } 1332edab33b1STetsuya Mukawa 1333edab33b1STetsuya Mukawa static int 1334edab33b1STetsuya Mukawa port_is_closed(portid_t port_id) 1335edab33b1STetsuya Mukawa { 1336edab33b1STetsuya Mukawa if (port_id_is_invalid(port_id, ENABLED_WARN)) 1337edab33b1STetsuya Mukawa return 0; 1338edab33b1STetsuya Mukawa 1339edab33b1STetsuya Mukawa if (ports[port_id].port_status != RTE_PORT_CLOSED) 1340edab33b1STetsuya Mukawa return 0; 1341edab33b1STetsuya Mukawa 1342edab33b1STetsuya Mukawa return 1; 1343edab33b1STetsuya Mukawa } 1344edab33b1STetsuya Mukawa 1345edab33b1STetsuya Mukawa int 1346ce8d5614SIntel start_port(portid_t pid) 1347ce8d5614SIntel { 134892d2703eSMichael Qiu int diag, need_check_link_status = -1; 1349ce8d5614SIntel portid_t pi; 1350ce8d5614SIntel queueid_t qi; 1351ce8d5614SIntel struct rte_port *port; 13522950a769SDeclan Doherty struct ether_addr mac_addr; 1353*76ad4a2dSGaetan Rivet enum rte_eth_event_type event_type; 1354ce8d5614SIntel 13554468635fSMichael Qiu if (port_id_is_invalid(pid, ENABLED_WARN)) 13564468635fSMichael Qiu return 0; 13574468635fSMichael Qiu 1358ce8d5614SIntel if(dcb_config) 1359ce8d5614SIntel dcb_test = 1; 13607d89b261SGaetan Rivet RTE_ETH_FOREACH_DEV(pi) { 1361edab33b1STetsuya Mukawa if (pid != pi && pid != (portid_t)RTE_PORT_ALL) 1362ce8d5614SIntel continue; 1363ce8d5614SIntel 136492d2703eSMichael Qiu need_check_link_status = 0; 1365ce8d5614SIntel port = &ports[pi]; 1366ce8d5614SIntel if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED, 1367ce8d5614SIntel RTE_PORT_HANDLING) == 0) { 1368ce8d5614SIntel printf("Port %d is now not stopped\n", pi); 1369ce8d5614SIntel continue; 1370ce8d5614SIntel } 1371ce8d5614SIntel 1372ce8d5614SIntel if (port->need_reconfig > 0) { 1373ce8d5614SIntel port->need_reconfig = 0; 1374ce8d5614SIntel 13755706de65SJulien Cretin printf("Configuring Port %d (socket %u)\n", pi, 137620a0286fSLiu Xiaofeng port->socket_id); 1377ce8d5614SIntel /* configure port */ 1378ce8d5614SIntel diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq, 1379ce8d5614SIntel &(port->dev_conf)); 1380ce8d5614SIntel if (diag != 0) { 1381ce8d5614SIntel if (rte_atomic16_cmpset(&(port->port_status), 1382ce8d5614SIntel RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0) 1383ce8d5614SIntel printf("Port %d can not be set back " 1384ce8d5614SIntel "to stopped\n", pi); 1385ce8d5614SIntel printf("Fail to configure port %d\n", pi); 1386ce8d5614SIntel /* try to reconfigure port next time */ 1387ce8d5614SIntel port->need_reconfig = 1; 1388148f963fSBruce Richardson return -1; 1389ce8d5614SIntel } 1390ce8d5614SIntel } 1391ce8d5614SIntel if (port->need_reconfig_queues > 0) { 1392ce8d5614SIntel port->need_reconfig_queues = 0; 1393ce8d5614SIntel /* setup tx queues */ 1394ce8d5614SIntel for (qi = 0; qi < nb_txq; qi++) { 1395b6ea6408SIntel if ((numa_support) && 1396b6ea6408SIntel (txring_numa[pi] != NUMA_NO_CONFIG)) 1397b6ea6408SIntel diag = rte_eth_tx_queue_setup(pi, qi, 1398b6ea6408SIntel nb_txd,txring_numa[pi], 1399b6ea6408SIntel &(port->tx_conf)); 1400b6ea6408SIntel else 1401b6ea6408SIntel diag = rte_eth_tx_queue_setup(pi, qi, 1402b6ea6408SIntel nb_txd,port->socket_id, 1403b6ea6408SIntel &(port->tx_conf)); 1404b6ea6408SIntel 1405ce8d5614SIntel if (diag == 0) 1406ce8d5614SIntel continue; 1407ce8d5614SIntel 1408ce8d5614SIntel /* Fail to setup tx queue, return */ 1409ce8d5614SIntel if (rte_atomic16_cmpset(&(port->port_status), 1410ce8d5614SIntel RTE_PORT_HANDLING, 1411ce8d5614SIntel RTE_PORT_STOPPED) == 0) 1412ce8d5614SIntel printf("Port %d can not be set back " 1413ce8d5614SIntel "to stopped\n", pi); 1414ce8d5614SIntel printf("Fail to configure port %d tx queues\n", pi); 1415ce8d5614SIntel /* try to reconfigure queues next time */ 1416ce8d5614SIntel port->need_reconfig_queues = 1; 1417148f963fSBruce Richardson return -1; 1418ce8d5614SIntel } 1419ce8d5614SIntel /* setup rx queues */ 1420ce8d5614SIntel for (qi = 0; qi < nb_rxq; qi++) { 1421b6ea6408SIntel if ((numa_support) && 1422b6ea6408SIntel (rxring_numa[pi] != NUMA_NO_CONFIG)) { 1423b6ea6408SIntel struct rte_mempool * mp = 1424b6ea6408SIntel mbuf_pool_find(rxring_numa[pi]); 1425b6ea6408SIntel if (mp == NULL) { 1426b6ea6408SIntel printf("Failed to setup RX queue:" 1427b6ea6408SIntel "No mempool allocation" 1428b6ea6408SIntel " on the socket %d\n", 1429b6ea6408SIntel rxring_numa[pi]); 1430148f963fSBruce Richardson return -1; 1431b6ea6408SIntel } 1432b6ea6408SIntel 1433b6ea6408SIntel diag = rte_eth_rx_queue_setup(pi, qi, 1434b6ea6408SIntel nb_rxd,rxring_numa[pi], 1435b6ea6408SIntel &(port->rx_conf),mp); 14361e1d6bddSBernard Iremonger } else { 14371e1d6bddSBernard Iremonger struct rte_mempool *mp = 14381e1d6bddSBernard Iremonger mbuf_pool_find(port->socket_id); 14391e1d6bddSBernard Iremonger if (mp == NULL) { 14401e1d6bddSBernard Iremonger printf("Failed to setup RX queue:" 14411e1d6bddSBernard Iremonger "No mempool allocation" 14421e1d6bddSBernard Iremonger " on the socket %d\n", 14431e1d6bddSBernard Iremonger port->socket_id); 14441e1d6bddSBernard Iremonger return -1; 1445b6ea6408SIntel } 1446b6ea6408SIntel diag = rte_eth_rx_queue_setup(pi, qi, 1447b6ea6408SIntel nb_rxd,port->socket_id, 14481e1d6bddSBernard Iremonger &(port->rx_conf), mp); 14491e1d6bddSBernard Iremonger } 1450ce8d5614SIntel if (diag == 0) 1451ce8d5614SIntel continue; 1452ce8d5614SIntel 1453ce8d5614SIntel /* Fail to setup rx queue, return */ 1454ce8d5614SIntel if (rte_atomic16_cmpset(&(port->port_status), 1455ce8d5614SIntel RTE_PORT_HANDLING, 1456ce8d5614SIntel RTE_PORT_STOPPED) == 0) 1457ce8d5614SIntel printf("Port %d can not be set back " 1458ce8d5614SIntel "to stopped\n", pi); 1459ce8d5614SIntel printf("Fail to configure port %d rx queues\n", pi); 1460ce8d5614SIntel /* try to reconfigure queues next time */ 1461ce8d5614SIntel port->need_reconfig_queues = 1; 1462148f963fSBruce Richardson return -1; 1463ce8d5614SIntel } 1464ce8d5614SIntel } 1465*76ad4a2dSGaetan Rivet 1466*76ad4a2dSGaetan Rivet for (event_type = RTE_ETH_EVENT_UNKNOWN; 1467*76ad4a2dSGaetan Rivet event_type < RTE_ETH_EVENT_MAX; 1468*76ad4a2dSGaetan Rivet event_type++) { 1469*76ad4a2dSGaetan Rivet diag = rte_eth_dev_callback_register(pi, 1470*76ad4a2dSGaetan Rivet event_type, 1471*76ad4a2dSGaetan Rivet eth_event_callback, 1472*76ad4a2dSGaetan Rivet NULL); 1473*76ad4a2dSGaetan Rivet if (diag) { 1474*76ad4a2dSGaetan Rivet printf("Failed to setup even callback for event %d\n", 1475*76ad4a2dSGaetan Rivet event_type); 1476*76ad4a2dSGaetan Rivet return -1; 1477*76ad4a2dSGaetan Rivet } 1478*76ad4a2dSGaetan Rivet } 1479*76ad4a2dSGaetan Rivet 1480ce8d5614SIntel /* start port */ 1481ce8d5614SIntel if (rte_eth_dev_start(pi) < 0) { 1482ce8d5614SIntel printf("Fail to start port %d\n", pi); 1483ce8d5614SIntel 1484ce8d5614SIntel /* Fail to setup rx queue, return */ 1485ce8d5614SIntel if (rte_atomic16_cmpset(&(port->port_status), 1486ce8d5614SIntel RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0) 1487ce8d5614SIntel printf("Port %d can not be set back to " 1488ce8d5614SIntel "stopped\n", pi); 1489ce8d5614SIntel continue; 1490ce8d5614SIntel } 1491ce8d5614SIntel 1492ce8d5614SIntel if (rte_atomic16_cmpset(&(port->port_status), 1493ce8d5614SIntel RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0) 1494ce8d5614SIntel printf("Port %d can not be set into started\n", pi); 1495ce8d5614SIntel 14962950a769SDeclan Doherty rte_eth_macaddr_get(pi, &mac_addr); 1497d8c89163SZijie Pan printf("Port %d: %02X:%02X:%02X:%02X:%02X:%02X\n", pi, 14982950a769SDeclan Doherty mac_addr.addr_bytes[0], mac_addr.addr_bytes[1], 14992950a769SDeclan Doherty mac_addr.addr_bytes[2], mac_addr.addr_bytes[3], 15002950a769SDeclan Doherty mac_addr.addr_bytes[4], mac_addr.addr_bytes[5]); 1501d8c89163SZijie Pan 1502ce8d5614SIntel /* at least one port started, need checking link status */ 1503ce8d5614SIntel need_check_link_status = 1; 1504ce8d5614SIntel } 1505ce8d5614SIntel 150692d2703eSMichael Qiu if (need_check_link_status == 1 && !no_link_check) 1507edab33b1STetsuya Mukawa check_all_ports_link_status(RTE_PORT_ALL); 150892d2703eSMichael Qiu else if (need_check_link_status == 0) 1509ce8d5614SIntel printf("Please stop the ports first\n"); 1510ce8d5614SIntel 1511ce8d5614SIntel printf("Done\n"); 1512148f963fSBruce Richardson return 0; 1513ce8d5614SIntel } 1514ce8d5614SIntel 1515ce8d5614SIntel void 1516ce8d5614SIntel stop_port(portid_t pid) 1517ce8d5614SIntel { 1518ce8d5614SIntel portid_t pi; 1519ce8d5614SIntel struct rte_port *port; 1520ce8d5614SIntel int need_check_link_status = 0; 1521ce8d5614SIntel 1522ce8d5614SIntel if (dcb_test) { 1523ce8d5614SIntel dcb_test = 0; 1524ce8d5614SIntel dcb_config = 0; 1525ce8d5614SIntel } 15264468635fSMichael Qiu 15274468635fSMichael Qiu if (port_id_is_invalid(pid, ENABLED_WARN)) 15284468635fSMichael Qiu return; 15294468635fSMichael Qiu 1530ce8d5614SIntel printf("Stopping ports...\n"); 1531ce8d5614SIntel 15327d89b261SGaetan Rivet RTE_ETH_FOREACH_DEV(pi) { 15334468635fSMichael Qiu if (pid != pi && pid != (portid_t)RTE_PORT_ALL) 1534ce8d5614SIntel continue; 1535ce8d5614SIntel 1536a8ef3e3aSBernard Iremonger if (port_is_forwarding(pi) != 0 && test_done == 0) { 1537a8ef3e3aSBernard Iremonger printf("Please remove port %d from forwarding configuration.\n", pi); 1538a8ef3e3aSBernard Iremonger continue; 1539a8ef3e3aSBernard Iremonger } 1540a8ef3e3aSBernard Iremonger 15410e545d30SBernard Iremonger if (port_is_bonding_slave(pi)) { 15420e545d30SBernard Iremonger printf("Please remove port %d from bonded device.\n", pi); 15430e545d30SBernard Iremonger continue; 15440e545d30SBernard Iremonger } 15450e545d30SBernard Iremonger 1546ce8d5614SIntel port = &ports[pi]; 1547ce8d5614SIntel if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED, 1548ce8d5614SIntel RTE_PORT_HANDLING) == 0) 1549ce8d5614SIntel continue; 1550ce8d5614SIntel 1551ce8d5614SIntel rte_eth_dev_stop(pi); 1552ce8d5614SIntel 1553ce8d5614SIntel if (rte_atomic16_cmpset(&(port->port_status), 1554ce8d5614SIntel RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0) 1555ce8d5614SIntel printf("Port %d can not be set into stopped\n", pi); 1556ce8d5614SIntel need_check_link_status = 1; 1557ce8d5614SIntel } 1558bc202406SDavid Marchand if (need_check_link_status && !no_link_check) 1559edab33b1STetsuya Mukawa check_all_ports_link_status(RTE_PORT_ALL); 1560ce8d5614SIntel 1561ce8d5614SIntel printf("Done\n"); 1562ce8d5614SIntel } 1563ce8d5614SIntel 1564ce8d5614SIntel void 1565ce8d5614SIntel close_port(portid_t pid) 1566ce8d5614SIntel { 1567ce8d5614SIntel portid_t pi; 1568ce8d5614SIntel struct rte_port *port; 1569ce8d5614SIntel 15704468635fSMichael Qiu if (port_id_is_invalid(pid, ENABLED_WARN)) 15714468635fSMichael Qiu return; 15724468635fSMichael Qiu 1573ce8d5614SIntel printf("Closing ports...\n"); 1574ce8d5614SIntel 15757d89b261SGaetan Rivet RTE_ETH_FOREACH_DEV(pi) { 15764468635fSMichael Qiu if (pid != pi && pid != (portid_t)RTE_PORT_ALL) 1577ce8d5614SIntel continue; 1578ce8d5614SIntel 1579a8ef3e3aSBernard Iremonger if (port_is_forwarding(pi) != 0 && test_done == 0) { 1580a8ef3e3aSBernard Iremonger printf("Please remove port %d from forwarding configuration.\n", pi); 1581a8ef3e3aSBernard Iremonger continue; 1582a8ef3e3aSBernard Iremonger } 1583a8ef3e3aSBernard Iremonger 15840e545d30SBernard Iremonger if (port_is_bonding_slave(pi)) { 15850e545d30SBernard Iremonger printf("Please remove port %d from bonded device.\n", pi); 15860e545d30SBernard Iremonger continue; 15870e545d30SBernard Iremonger } 15880e545d30SBernard Iremonger 1589ce8d5614SIntel port = &ports[pi]; 1590ce8d5614SIntel if (rte_atomic16_cmpset(&(port->port_status), 1591d4e8ad64SMichael Qiu RTE_PORT_CLOSED, RTE_PORT_CLOSED) == 1) { 1592d4e8ad64SMichael Qiu printf("Port %d is already closed\n", pi); 1593d4e8ad64SMichael Qiu continue; 1594d4e8ad64SMichael Qiu } 1595d4e8ad64SMichael Qiu 1596d4e8ad64SMichael Qiu if (rte_atomic16_cmpset(&(port->port_status), 1597ce8d5614SIntel RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) { 1598ce8d5614SIntel printf("Port %d is now not stopped\n", pi); 1599ce8d5614SIntel continue; 1600ce8d5614SIntel } 1601ce8d5614SIntel 1602938a184aSAdrien Mazarguil if (port->flow_list) 1603938a184aSAdrien Mazarguil port_flow_flush(pi); 1604ce8d5614SIntel rte_eth_dev_close(pi); 1605ce8d5614SIntel 1606ce8d5614SIntel if (rte_atomic16_cmpset(&(port->port_status), 1607ce8d5614SIntel RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0) 1608b38bb262SPablo de Lara printf("Port %d cannot be set to closed\n", pi); 1609ce8d5614SIntel } 1610ce8d5614SIntel 1611ce8d5614SIntel printf("Done\n"); 1612ce8d5614SIntel } 1613ce8d5614SIntel 1614edab33b1STetsuya Mukawa void 1615edab33b1STetsuya Mukawa attach_port(char *identifier) 1616ce8d5614SIntel { 1617ebf5e9b7SBernard Iremonger portid_t pi = 0; 1618931126baSBernard Iremonger unsigned int socket_id; 1619ce8d5614SIntel 1620edab33b1STetsuya Mukawa printf("Attaching a new port...\n"); 1621edab33b1STetsuya Mukawa 1622edab33b1STetsuya Mukawa if (identifier == NULL) { 1623edab33b1STetsuya Mukawa printf("Invalid parameters are specified\n"); 1624edab33b1STetsuya Mukawa return; 1625ce8d5614SIntel } 1626ce8d5614SIntel 1627edab33b1STetsuya Mukawa if (rte_eth_dev_attach(identifier, &pi)) 1628edab33b1STetsuya Mukawa return; 1629edab33b1STetsuya Mukawa 1630931126baSBernard Iremonger socket_id = (unsigned)rte_eth_dev_socket_id(pi); 1631931126baSBernard Iremonger /* if socket_id is invalid, set to 0 */ 1632931126baSBernard Iremonger if (check_socket_id(socket_id) < 0) 1633931126baSBernard Iremonger socket_id = 0; 1634931126baSBernard Iremonger reconfig(pi, socket_id); 1635edab33b1STetsuya Mukawa rte_eth_promiscuous_enable(pi); 1636edab33b1STetsuya Mukawa 1637edab33b1STetsuya Mukawa nb_ports = rte_eth_dev_count(); 1638edab33b1STetsuya Mukawa 1639edab33b1STetsuya Mukawa ports[pi].port_status = RTE_PORT_STOPPED; 1640edab33b1STetsuya Mukawa 1641edab33b1STetsuya Mukawa printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports); 1642edab33b1STetsuya Mukawa printf("Done\n"); 1643edab33b1STetsuya Mukawa } 1644edab33b1STetsuya Mukawa 1645edab33b1STetsuya Mukawa void 1646edab33b1STetsuya Mukawa detach_port(uint8_t port_id) 16475f4ec54fSChen Jing D(Mark) { 1648edab33b1STetsuya Mukawa char name[RTE_ETH_NAME_MAX_LEN]; 16495f4ec54fSChen Jing D(Mark) 1650edab33b1STetsuya Mukawa printf("Detaching a port...\n"); 16515f4ec54fSChen Jing D(Mark) 1652edab33b1STetsuya Mukawa if (!port_is_closed(port_id)) { 1653edab33b1STetsuya Mukawa printf("Please close port first\n"); 1654edab33b1STetsuya Mukawa return; 1655edab33b1STetsuya Mukawa } 1656edab33b1STetsuya Mukawa 1657938a184aSAdrien Mazarguil if (ports[port_id].flow_list) 1658938a184aSAdrien Mazarguil port_flow_flush(port_id); 1659938a184aSAdrien Mazarguil 1660edab33b1STetsuya Mukawa if (rte_eth_dev_detach(port_id, name)) 1661edab33b1STetsuya Mukawa return; 1662edab33b1STetsuya Mukawa 1663edab33b1STetsuya Mukawa nb_ports = rte_eth_dev_count(); 1664edab33b1STetsuya Mukawa 1665edab33b1STetsuya Mukawa printf("Port '%s' is detached. Now total ports is %d\n", 1666edab33b1STetsuya Mukawa name, nb_ports); 1667edab33b1STetsuya Mukawa printf("Done\n"); 1668edab33b1STetsuya Mukawa return; 16695f4ec54fSChen Jing D(Mark) } 16705f4ec54fSChen Jing D(Mark) 1671af75078fSIntel void 1672af75078fSIntel pmd_test_exit(void) 1673af75078fSIntel { 1674af75078fSIntel portid_t pt_id; 1675af75078fSIntel 16768210ec25SPablo de Lara if (test_done == 0) 16778210ec25SPablo de Lara stop_packet_forwarding(); 16788210ec25SPablo de Lara 1679d3a274ceSZhihong Wang if (ports != NULL) { 1680d3a274ceSZhihong Wang no_link_check = 1; 16817d89b261SGaetan Rivet RTE_ETH_FOREACH_DEV(pt_id) { 1682d3a274ceSZhihong Wang printf("\nShutting down port %d...\n", pt_id); 1683af75078fSIntel fflush(stdout); 1684d3a274ceSZhihong Wang stop_port(pt_id); 1685d3a274ceSZhihong Wang close_port(pt_id); 1686af75078fSIntel } 1687d3a274ceSZhihong Wang } 1688d3a274ceSZhihong Wang printf("\nBye...\n"); 1689af75078fSIntel } 1690af75078fSIntel 1691af75078fSIntel typedef void (*cmd_func_t)(void); 1692af75078fSIntel struct pmd_test_command { 1693af75078fSIntel const char *cmd_name; 1694af75078fSIntel cmd_func_t cmd_func; 1695af75078fSIntel }; 1696af75078fSIntel 1697af75078fSIntel #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0])) 1698af75078fSIntel 1699ce8d5614SIntel /* Check the link status of all ports in up to 9s, and print them finally */ 1700af75078fSIntel static void 1701edab33b1STetsuya Mukawa check_all_ports_link_status(uint32_t port_mask) 1702af75078fSIntel { 1703ce8d5614SIntel #define CHECK_INTERVAL 100 /* 100ms */ 1704ce8d5614SIntel #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */ 1705ce8d5614SIntel uint8_t portid, count, all_ports_up, print_flag = 0; 1706ce8d5614SIntel struct rte_eth_link link; 1707ce8d5614SIntel 1708ce8d5614SIntel printf("Checking link statuses...\n"); 1709ce8d5614SIntel fflush(stdout); 1710ce8d5614SIntel for (count = 0; count <= MAX_CHECK_TIME; count++) { 1711ce8d5614SIntel all_ports_up = 1; 17127d89b261SGaetan Rivet RTE_ETH_FOREACH_DEV(portid) { 1713ce8d5614SIntel if ((port_mask & (1 << portid)) == 0) 1714ce8d5614SIntel continue; 1715ce8d5614SIntel memset(&link, 0, sizeof(link)); 1716ce8d5614SIntel rte_eth_link_get_nowait(portid, &link); 1717ce8d5614SIntel /* print link status if flag set */ 1718ce8d5614SIntel if (print_flag == 1) { 1719ce8d5614SIntel if (link.link_status) 1720ce8d5614SIntel printf("Port %d Link Up - speed %u " 1721ce8d5614SIntel "Mbps - %s\n", (uint8_t)portid, 1722ce8d5614SIntel (unsigned)link.link_speed, 1723ce8d5614SIntel (link.link_duplex == ETH_LINK_FULL_DUPLEX) ? 1724ce8d5614SIntel ("full-duplex") : ("half-duplex\n")); 1725ce8d5614SIntel else 1726ce8d5614SIntel printf("Port %d Link Down\n", 1727ce8d5614SIntel (uint8_t)portid); 1728ce8d5614SIntel continue; 1729ce8d5614SIntel } 1730ce8d5614SIntel /* clear all_ports_up flag if any link down */ 173109419f23SThomas Monjalon if (link.link_status == ETH_LINK_DOWN) { 1732ce8d5614SIntel all_ports_up = 0; 1733ce8d5614SIntel break; 1734ce8d5614SIntel } 1735ce8d5614SIntel } 1736ce8d5614SIntel /* after finally printing all link status, get out */ 1737ce8d5614SIntel if (print_flag == 1) 1738ce8d5614SIntel break; 1739ce8d5614SIntel 1740ce8d5614SIntel if (all_ports_up == 0) { 1741ce8d5614SIntel fflush(stdout); 1742ce8d5614SIntel rte_delay_ms(CHECK_INTERVAL); 1743ce8d5614SIntel } 1744ce8d5614SIntel 1745ce8d5614SIntel /* set the print_flag if all ports up or timeout */ 1746ce8d5614SIntel if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) { 1747ce8d5614SIntel print_flag = 1; 1748ce8d5614SIntel } 1749ce8d5614SIntel } 1750af75078fSIntel } 1751af75078fSIntel 1752*76ad4a2dSGaetan Rivet /* This function is used by the interrupt thread */ 1753*76ad4a2dSGaetan Rivet static void 1754*76ad4a2dSGaetan Rivet eth_event_callback(uint8_t port_id, enum rte_eth_event_type type, void *param) 1755*76ad4a2dSGaetan Rivet { 1756*76ad4a2dSGaetan Rivet static const char * const event_desc[] = { 1757*76ad4a2dSGaetan Rivet [RTE_ETH_EVENT_UNKNOWN] = "Unknown", 1758*76ad4a2dSGaetan Rivet [RTE_ETH_EVENT_INTR_LSC] = "LSC", 1759*76ad4a2dSGaetan Rivet [RTE_ETH_EVENT_QUEUE_STATE] = "Queue state", 1760*76ad4a2dSGaetan Rivet [RTE_ETH_EVENT_INTR_RESET] = "Interrupt reset", 1761*76ad4a2dSGaetan Rivet [RTE_ETH_EVENT_VF_MBOX] = "VF Mbox", 1762*76ad4a2dSGaetan Rivet [RTE_ETH_EVENT_MACSEC] = "MACsec", 1763*76ad4a2dSGaetan Rivet [RTE_ETH_EVENT_INTR_RMV] = "device removal", 1764*76ad4a2dSGaetan Rivet [RTE_ETH_EVENT_MAX] = NULL, 1765*76ad4a2dSGaetan Rivet }; 1766*76ad4a2dSGaetan Rivet 1767*76ad4a2dSGaetan Rivet RTE_SET_USED(param); 1768*76ad4a2dSGaetan Rivet 1769*76ad4a2dSGaetan Rivet if (type >= RTE_ETH_EVENT_MAX) { 1770*76ad4a2dSGaetan Rivet fprintf(stderr, "\nPort %" PRIu8 ": %s called upon invalid event %d\n", 1771*76ad4a2dSGaetan Rivet port_id, __func__, type); 1772*76ad4a2dSGaetan Rivet fflush(stderr); 1773*76ad4a2dSGaetan Rivet } else { 1774*76ad4a2dSGaetan Rivet printf("\nPort %" PRIu8 ": %s event\n", port_id, 1775*76ad4a2dSGaetan Rivet event_desc[type]); 1776*76ad4a2dSGaetan Rivet fflush(stdout); 1777*76ad4a2dSGaetan Rivet } 1778*76ad4a2dSGaetan Rivet } 1779*76ad4a2dSGaetan Rivet 1780013af9b6SIntel static int 1781013af9b6SIntel set_tx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port) 1782af75078fSIntel { 1783013af9b6SIntel uint16_t i; 1784af75078fSIntel int diag; 1785013af9b6SIntel uint8_t mapping_found = 0; 1786af75078fSIntel 1787013af9b6SIntel for (i = 0; i < nb_tx_queue_stats_mappings; i++) { 1788013af9b6SIntel if ((tx_queue_stats_mappings[i].port_id == port_id) && 1789013af9b6SIntel (tx_queue_stats_mappings[i].queue_id < nb_txq )) { 1790013af9b6SIntel diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id, 1791013af9b6SIntel tx_queue_stats_mappings[i].queue_id, 1792013af9b6SIntel tx_queue_stats_mappings[i].stats_counter_id); 1793013af9b6SIntel if (diag != 0) 1794013af9b6SIntel return diag; 1795013af9b6SIntel mapping_found = 1; 1796af75078fSIntel } 1797013af9b6SIntel } 1798013af9b6SIntel if (mapping_found) 1799013af9b6SIntel port->tx_queue_stats_mapping_enabled = 1; 1800013af9b6SIntel return 0; 1801013af9b6SIntel } 1802013af9b6SIntel 1803013af9b6SIntel static int 1804013af9b6SIntel set_rx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port) 1805013af9b6SIntel { 1806013af9b6SIntel uint16_t i; 1807013af9b6SIntel int diag; 1808013af9b6SIntel uint8_t mapping_found = 0; 1809013af9b6SIntel 1810013af9b6SIntel for (i = 0; i < nb_rx_queue_stats_mappings; i++) { 1811013af9b6SIntel if ((rx_queue_stats_mappings[i].port_id == port_id) && 1812013af9b6SIntel (rx_queue_stats_mappings[i].queue_id < nb_rxq )) { 1813013af9b6SIntel diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id, 1814013af9b6SIntel rx_queue_stats_mappings[i].queue_id, 1815013af9b6SIntel rx_queue_stats_mappings[i].stats_counter_id); 1816013af9b6SIntel if (diag != 0) 1817013af9b6SIntel return diag; 1818013af9b6SIntel mapping_found = 1; 1819013af9b6SIntel } 1820013af9b6SIntel } 1821013af9b6SIntel if (mapping_found) 1822013af9b6SIntel port->rx_queue_stats_mapping_enabled = 1; 1823013af9b6SIntel return 0; 1824013af9b6SIntel } 1825013af9b6SIntel 1826013af9b6SIntel static void 1827013af9b6SIntel map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port) 1828013af9b6SIntel { 1829013af9b6SIntel int diag = 0; 1830013af9b6SIntel 1831013af9b6SIntel diag = set_tx_queue_stats_mapping_registers(pi, port); 1832af75078fSIntel if (diag != 0) { 1833013af9b6SIntel if (diag == -ENOTSUP) { 1834013af9b6SIntel port->tx_queue_stats_mapping_enabled = 0; 1835013af9b6SIntel printf("TX queue stats mapping not supported port id=%d\n", pi); 1836013af9b6SIntel } 1837013af9b6SIntel else 1838013af9b6SIntel rte_exit(EXIT_FAILURE, 1839013af9b6SIntel "set_tx_queue_stats_mapping_registers " 1840013af9b6SIntel "failed for port id=%d diag=%d\n", 1841af75078fSIntel pi, diag); 1842af75078fSIntel } 1843013af9b6SIntel 1844013af9b6SIntel diag = set_rx_queue_stats_mapping_registers(pi, port); 1845af75078fSIntel if (diag != 0) { 1846013af9b6SIntel if (diag == -ENOTSUP) { 1847013af9b6SIntel port->rx_queue_stats_mapping_enabled = 0; 1848013af9b6SIntel printf("RX queue stats mapping not supported port id=%d\n", pi); 1849013af9b6SIntel } 1850013af9b6SIntel else 1851013af9b6SIntel rte_exit(EXIT_FAILURE, 1852013af9b6SIntel "set_rx_queue_stats_mapping_registers " 1853013af9b6SIntel "failed for port id=%d diag=%d\n", 1854af75078fSIntel pi, diag); 1855af75078fSIntel } 1856af75078fSIntel } 1857af75078fSIntel 1858f2c5125aSPablo de Lara static void 1859f2c5125aSPablo de Lara rxtx_port_config(struct rte_port *port) 1860f2c5125aSPablo de Lara { 1861f2c5125aSPablo de Lara port->rx_conf = port->dev_info.default_rxconf; 1862f2c5125aSPablo de Lara port->tx_conf = port->dev_info.default_txconf; 1863f2c5125aSPablo de Lara 1864f2c5125aSPablo de Lara /* Check if any RX/TX parameters have been passed */ 1865f2c5125aSPablo de Lara if (rx_pthresh != RTE_PMD_PARAM_UNSET) 1866f2c5125aSPablo de Lara port->rx_conf.rx_thresh.pthresh = rx_pthresh; 1867f2c5125aSPablo de Lara 1868f2c5125aSPablo de Lara if (rx_hthresh != RTE_PMD_PARAM_UNSET) 1869f2c5125aSPablo de Lara port->rx_conf.rx_thresh.hthresh = rx_hthresh; 1870f2c5125aSPablo de Lara 1871f2c5125aSPablo de Lara if (rx_wthresh != RTE_PMD_PARAM_UNSET) 1872f2c5125aSPablo de Lara port->rx_conf.rx_thresh.wthresh = rx_wthresh; 1873f2c5125aSPablo de Lara 1874f2c5125aSPablo de Lara if (rx_free_thresh != RTE_PMD_PARAM_UNSET) 1875f2c5125aSPablo de Lara port->rx_conf.rx_free_thresh = rx_free_thresh; 1876f2c5125aSPablo de Lara 1877f2c5125aSPablo de Lara if (rx_drop_en != RTE_PMD_PARAM_UNSET) 1878f2c5125aSPablo de Lara port->rx_conf.rx_drop_en = rx_drop_en; 1879f2c5125aSPablo de Lara 1880f2c5125aSPablo de Lara if (tx_pthresh != RTE_PMD_PARAM_UNSET) 1881f2c5125aSPablo de Lara port->tx_conf.tx_thresh.pthresh = tx_pthresh; 1882f2c5125aSPablo de Lara 1883f2c5125aSPablo de Lara if (tx_hthresh != RTE_PMD_PARAM_UNSET) 1884f2c5125aSPablo de Lara port->tx_conf.tx_thresh.hthresh = tx_hthresh; 1885f2c5125aSPablo de Lara 1886f2c5125aSPablo de Lara if (tx_wthresh != RTE_PMD_PARAM_UNSET) 1887f2c5125aSPablo de Lara port->tx_conf.tx_thresh.wthresh = tx_wthresh; 1888f2c5125aSPablo de Lara 1889f2c5125aSPablo de Lara if (tx_rs_thresh != RTE_PMD_PARAM_UNSET) 1890f2c5125aSPablo de Lara port->tx_conf.tx_rs_thresh = tx_rs_thresh; 1891f2c5125aSPablo de Lara 1892f2c5125aSPablo de Lara if (tx_free_thresh != RTE_PMD_PARAM_UNSET) 1893f2c5125aSPablo de Lara port->tx_conf.tx_free_thresh = tx_free_thresh; 1894f2c5125aSPablo de Lara 1895f2c5125aSPablo de Lara if (txq_flags != RTE_PMD_PARAM_UNSET) 1896f2c5125aSPablo de Lara port->tx_conf.txq_flags = txq_flags; 1897f2c5125aSPablo de Lara } 1898f2c5125aSPablo de Lara 1899013af9b6SIntel void 1900013af9b6SIntel init_port_config(void) 1901013af9b6SIntel { 1902013af9b6SIntel portid_t pid; 1903013af9b6SIntel struct rte_port *port; 1904013af9b6SIntel 19057d89b261SGaetan Rivet RTE_ETH_FOREACH_DEV(pid) { 1906013af9b6SIntel port = &ports[pid]; 1907013af9b6SIntel port->dev_conf.rxmode = rx_mode; 1908013af9b6SIntel port->dev_conf.fdir_conf = fdir_conf; 19093ce690d3SBruce Richardson if (nb_rxq > 1) { 1910013af9b6SIntel port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL; 1911013af9b6SIntel port->dev_conf.rx_adv_conf.rss_conf.rss_hf = rss_hf; 1912af75078fSIntel } else { 1913013af9b6SIntel port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL; 1914013af9b6SIntel port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0; 1915af75078fSIntel } 19163ce690d3SBruce Richardson 19175f592039SJingjing Wu if (port->dcb_flag == 0) { 19183ce690d3SBruce Richardson if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0) 19193ce690d3SBruce Richardson port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_RSS; 19203ce690d3SBruce Richardson else 19213ce690d3SBruce Richardson port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE; 19223ce690d3SBruce Richardson } 19233ce690d3SBruce Richardson 1924f2c5125aSPablo de Lara rxtx_port_config(port); 1925013af9b6SIntel 1926013af9b6SIntel rte_eth_macaddr_get(pid, &port->eth_addr); 1927013af9b6SIntel 1928013af9b6SIntel map_port_queue_stats_mapping_registers(pid, port); 19297b7e5ba7SIntel #ifdef RTE_NIC_BYPASS 19307b7e5ba7SIntel rte_eth_dev_bypass_init(pid); 19317b7e5ba7SIntel #endif 1932013af9b6SIntel } 1933013af9b6SIntel } 1934013af9b6SIntel 193541b05095SBernard Iremonger void set_port_slave_flag(portid_t slave_pid) 193641b05095SBernard Iremonger { 193741b05095SBernard Iremonger struct rte_port *port; 193841b05095SBernard Iremonger 193941b05095SBernard Iremonger port = &ports[slave_pid]; 194041b05095SBernard Iremonger port->slave_flag = 1; 194141b05095SBernard Iremonger } 194241b05095SBernard Iremonger 194341b05095SBernard Iremonger void clear_port_slave_flag(portid_t slave_pid) 194441b05095SBernard Iremonger { 194541b05095SBernard Iremonger struct rte_port *port; 194641b05095SBernard Iremonger 194741b05095SBernard Iremonger port = &ports[slave_pid]; 194841b05095SBernard Iremonger port->slave_flag = 0; 194941b05095SBernard Iremonger } 195041b05095SBernard Iremonger 19510e545d30SBernard Iremonger uint8_t port_is_bonding_slave(portid_t slave_pid) 19520e545d30SBernard Iremonger { 19530e545d30SBernard Iremonger struct rte_port *port; 19540e545d30SBernard Iremonger 19550e545d30SBernard Iremonger port = &ports[slave_pid]; 19560e545d30SBernard Iremonger return port->slave_flag; 19570e545d30SBernard Iremonger } 19580e545d30SBernard Iremonger 1959013af9b6SIntel const uint16_t vlan_tags[] = { 1960013af9b6SIntel 0, 1, 2, 3, 4, 5, 6, 7, 1961013af9b6SIntel 8, 9, 10, 11, 12, 13, 14, 15, 1962013af9b6SIntel 16, 17, 18, 19, 20, 21, 22, 23, 1963013af9b6SIntel 24, 25, 26, 27, 28, 29, 30, 31 1964013af9b6SIntel }; 1965013af9b6SIntel 1966013af9b6SIntel static int 19671a572499SJingjing Wu get_eth_dcb_conf(struct rte_eth_conf *eth_conf, 19681a572499SJingjing Wu enum dcb_mode_enable dcb_mode, 19691a572499SJingjing Wu enum rte_eth_nb_tcs num_tcs, 19701a572499SJingjing Wu uint8_t pfc_en) 1971013af9b6SIntel { 1972013af9b6SIntel uint8_t i; 1973af75078fSIntel 1974af75078fSIntel /* 1975013af9b6SIntel * Builds up the correct configuration for dcb+vt based on the vlan tags array 1976013af9b6SIntel * given above, and the number of traffic classes available for use. 1977af75078fSIntel */ 19781a572499SJingjing Wu if (dcb_mode == DCB_VT_ENABLED) { 19791a572499SJingjing Wu struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf = 19801a572499SJingjing Wu ð_conf->rx_adv_conf.vmdq_dcb_conf; 19811a572499SJingjing Wu struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf = 19821a572499SJingjing Wu ð_conf->tx_adv_conf.vmdq_dcb_tx_conf; 1983013af9b6SIntel 1984547d946cSNirmoy Das /* VMDQ+DCB RX and TX configurations */ 19851a572499SJingjing Wu vmdq_rx_conf->enable_default_pool = 0; 19861a572499SJingjing Wu vmdq_rx_conf->default_pool = 0; 19871a572499SJingjing Wu vmdq_rx_conf->nb_queue_pools = 19881a572499SJingjing Wu (num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS); 19891a572499SJingjing Wu vmdq_tx_conf->nb_queue_pools = 19901a572499SJingjing Wu (num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS); 1991013af9b6SIntel 19921a572499SJingjing Wu vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools; 19931a572499SJingjing Wu for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) { 19941a572499SJingjing Wu vmdq_rx_conf->pool_map[i].vlan_id = vlan_tags[i]; 19951a572499SJingjing Wu vmdq_rx_conf->pool_map[i].pools = 19961a572499SJingjing Wu 1 << (i % vmdq_rx_conf->nb_queue_pools); 1997af75078fSIntel } 1998013af9b6SIntel for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) { 19991a572499SJingjing Wu vmdq_rx_conf->dcb_tc[i] = i; 20001a572499SJingjing Wu vmdq_tx_conf->dcb_tc[i] = i; 2001013af9b6SIntel } 2002013af9b6SIntel 2003013af9b6SIntel /* set DCB mode of RX and TX of multiple queues */ 200432e7aa0bSIntel eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB; 200532e7aa0bSIntel eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB; 20061a572499SJingjing Wu } else { 20071a572499SJingjing Wu struct rte_eth_dcb_rx_conf *rx_conf = 20081a572499SJingjing Wu ð_conf->rx_adv_conf.dcb_rx_conf; 20091a572499SJingjing Wu struct rte_eth_dcb_tx_conf *tx_conf = 20101a572499SJingjing Wu ð_conf->tx_adv_conf.dcb_tx_conf; 2011013af9b6SIntel 20121a572499SJingjing Wu rx_conf->nb_tcs = num_tcs; 20131a572499SJingjing Wu tx_conf->nb_tcs = num_tcs; 20141a572499SJingjing Wu 2015bcd0e432SJingjing Wu for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) { 2016bcd0e432SJingjing Wu rx_conf->dcb_tc[i] = i % num_tcs; 2017bcd0e432SJingjing Wu tx_conf->dcb_tc[i] = i % num_tcs; 2018013af9b6SIntel } 20191a572499SJingjing Wu eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB_RSS; 20201a572499SJingjing Wu eth_conf->rx_adv_conf.rss_conf.rss_hf = rss_hf; 202132e7aa0bSIntel eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB; 20221a572499SJingjing Wu } 20231a572499SJingjing Wu 20241a572499SJingjing Wu if (pfc_en) 20251a572499SJingjing Wu eth_conf->dcb_capability_en = 20261a572499SJingjing Wu ETH_DCB_PG_SUPPORT | ETH_DCB_PFC_SUPPORT; 2027013af9b6SIntel else 2028013af9b6SIntel eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT; 2029013af9b6SIntel 2030013af9b6SIntel return 0; 2031013af9b6SIntel } 2032013af9b6SIntel 2033013af9b6SIntel int 20341a572499SJingjing Wu init_port_dcb_config(portid_t pid, 20351a572499SJingjing Wu enum dcb_mode_enable dcb_mode, 20361a572499SJingjing Wu enum rte_eth_nb_tcs num_tcs, 20371a572499SJingjing Wu uint8_t pfc_en) 2038013af9b6SIntel { 2039013af9b6SIntel struct rte_eth_conf port_conf; 2040013af9b6SIntel struct rte_port *rte_port; 2041013af9b6SIntel int retval; 2042013af9b6SIntel uint16_t i; 2043013af9b6SIntel 20442a977b89SWenzhuo Lu rte_port = &ports[pid]; 2045013af9b6SIntel 2046013af9b6SIntel memset(&port_conf, 0, sizeof(struct rte_eth_conf)); 2047013af9b6SIntel /* Enter DCB configuration status */ 2048013af9b6SIntel dcb_config = 1; 2049013af9b6SIntel 2050013af9b6SIntel /*set configuration of DCB in vt mode and DCB in non-vt mode*/ 20511a572499SJingjing Wu retval = get_eth_dcb_conf(&port_conf, dcb_mode, num_tcs, pfc_en); 2052013af9b6SIntel if (retval < 0) 2053013af9b6SIntel return retval; 20542a977b89SWenzhuo Lu port_conf.rxmode.hw_vlan_filter = 1; 2055013af9b6SIntel 20562a977b89SWenzhuo Lu /** 20572a977b89SWenzhuo Lu * Write the configuration into the device. 20582a977b89SWenzhuo Lu * Set the numbers of RX & TX queues to 0, so 20592a977b89SWenzhuo Lu * the RX & TX queues will not be setup. 20602a977b89SWenzhuo Lu */ 20612a977b89SWenzhuo Lu (void)rte_eth_dev_configure(pid, 0, 0, &port_conf); 20622a977b89SWenzhuo Lu 20632a977b89SWenzhuo Lu rte_eth_dev_info_get(pid, &rte_port->dev_info); 20642a977b89SWenzhuo Lu 20652a977b89SWenzhuo Lu /* If dev_info.vmdq_pool_base is greater than 0, 20662a977b89SWenzhuo Lu * the queue id of vmdq pools is started after pf queues. 20672a977b89SWenzhuo Lu */ 20682a977b89SWenzhuo Lu if (dcb_mode == DCB_VT_ENABLED && 20692a977b89SWenzhuo Lu rte_port->dev_info.vmdq_pool_base > 0) { 20702a977b89SWenzhuo Lu printf("VMDQ_DCB multi-queue mode is nonsensical" 20712a977b89SWenzhuo Lu " for port %d.", pid); 20722a977b89SWenzhuo Lu return -1; 20732a977b89SWenzhuo Lu } 20742a977b89SWenzhuo Lu 20752a977b89SWenzhuo Lu /* Assume the ports in testpmd have the same dcb capability 20762a977b89SWenzhuo Lu * and has the same number of rxq and txq in dcb mode 20772a977b89SWenzhuo Lu */ 20782a977b89SWenzhuo Lu if (dcb_mode == DCB_VT_ENABLED) { 207986ef65eeSBernard Iremonger if (rte_port->dev_info.max_vfs > 0) { 208086ef65eeSBernard Iremonger nb_rxq = rte_port->dev_info.nb_rx_queues; 208186ef65eeSBernard Iremonger nb_txq = rte_port->dev_info.nb_tx_queues; 208286ef65eeSBernard Iremonger } else { 20832a977b89SWenzhuo Lu nb_rxq = rte_port->dev_info.max_rx_queues; 20842a977b89SWenzhuo Lu nb_txq = rte_port->dev_info.max_tx_queues; 208586ef65eeSBernard Iremonger } 20862a977b89SWenzhuo Lu } else { 20872a977b89SWenzhuo Lu /*if vt is disabled, use all pf queues */ 20882a977b89SWenzhuo Lu if (rte_port->dev_info.vmdq_pool_base == 0) { 20892a977b89SWenzhuo Lu nb_rxq = rte_port->dev_info.max_rx_queues; 20902a977b89SWenzhuo Lu nb_txq = rte_port->dev_info.max_tx_queues; 20912a977b89SWenzhuo Lu } else { 20922a977b89SWenzhuo Lu nb_rxq = (queueid_t)num_tcs; 20932a977b89SWenzhuo Lu nb_txq = (queueid_t)num_tcs; 20942a977b89SWenzhuo Lu 20952a977b89SWenzhuo Lu } 20962a977b89SWenzhuo Lu } 20972a977b89SWenzhuo Lu rx_free_thresh = 64; 20982a977b89SWenzhuo Lu 2099013af9b6SIntel memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf)); 2100013af9b6SIntel 2101f2c5125aSPablo de Lara rxtx_port_config(rte_port); 2102013af9b6SIntel /* VLAN filter */ 2103013af9b6SIntel rte_port->dev_conf.rxmode.hw_vlan_filter = 1; 21041a572499SJingjing Wu for (i = 0; i < RTE_DIM(vlan_tags); i++) 2105013af9b6SIntel rx_vft_set(pid, vlan_tags[i], 1); 2106013af9b6SIntel 2107013af9b6SIntel rte_eth_macaddr_get(pid, &rte_port->eth_addr); 2108013af9b6SIntel map_port_queue_stats_mapping_registers(pid, rte_port); 2109013af9b6SIntel 21107741e4cfSIntel rte_port->dcb_flag = 1; 21117741e4cfSIntel 2112013af9b6SIntel return 0; 2113af75078fSIntel } 2114af75078fSIntel 2115ffc468ffSTetsuya Mukawa static void 2116ffc468ffSTetsuya Mukawa init_port(void) 2117ffc468ffSTetsuya Mukawa { 2118ffc468ffSTetsuya Mukawa /* Configuration of Ethernet ports. */ 2119ffc468ffSTetsuya Mukawa ports = rte_zmalloc("testpmd: ports", 2120ffc468ffSTetsuya Mukawa sizeof(struct rte_port) * RTE_MAX_ETHPORTS, 2121ffc468ffSTetsuya Mukawa RTE_CACHE_LINE_SIZE); 2122ffc468ffSTetsuya Mukawa if (ports == NULL) { 2123ffc468ffSTetsuya Mukawa rte_exit(EXIT_FAILURE, 2124ffc468ffSTetsuya Mukawa "rte_zmalloc(%d struct rte_port) failed\n", 2125ffc468ffSTetsuya Mukawa RTE_MAX_ETHPORTS); 2126ffc468ffSTetsuya Mukawa } 2127ffc468ffSTetsuya Mukawa } 2128ffc468ffSTetsuya Mukawa 2129d3a274ceSZhihong Wang static void 2130d3a274ceSZhihong Wang force_quit(void) 2131d3a274ceSZhihong Wang { 2132d3a274ceSZhihong Wang pmd_test_exit(); 2133d3a274ceSZhihong Wang prompt_exit(); 2134d3a274ceSZhihong Wang } 2135d3a274ceSZhihong Wang 2136d3a274ceSZhihong Wang static void 2137d3a274ceSZhihong Wang signal_handler(int signum) 2138d3a274ceSZhihong Wang { 2139d3a274ceSZhihong Wang if (signum == SIGINT || signum == SIGTERM) { 2140d3a274ceSZhihong Wang printf("\nSignal %d received, preparing to exit...\n", 2141d3a274ceSZhihong Wang signum); 2142102b7329SReshma Pattan #ifdef RTE_LIBRTE_PDUMP 2143102b7329SReshma Pattan /* uninitialize packet capture framework */ 2144102b7329SReshma Pattan rte_pdump_uninit(); 2145102b7329SReshma Pattan #endif 214662d3216dSReshma Pattan #ifdef RTE_LIBRTE_LATENCY_STATS 214762d3216dSReshma Pattan rte_latencystats_uninit(); 214862d3216dSReshma Pattan #endif 2149d3a274ceSZhihong Wang force_quit(); 2150d3a274ceSZhihong Wang /* exit with the expected status */ 2151d3a274ceSZhihong Wang signal(signum, SIG_DFL); 2152d3a274ceSZhihong Wang kill(getpid(), signum); 2153d3a274ceSZhihong Wang } 2154d3a274ceSZhihong Wang } 2155d3a274ceSZhihong Wang 2156af75078fSIntel int 2157af75078fSIntel main(int argc, char** argv) 2158af75078fSIntel { 2159af75078fSIntel int diag; 2160013af9b6SIntel uint8_t port_id; 2161af75078fSIntel 2162d3a274ceSZhihong Wang signal(SIGINT, signal_handler); 2163d3a274ceSZhihong Wang signal(SIGTERM, signal_handler); 2164d3a274ceSZhihong Wang 2165af75078fSIntel diag = rte_eal_init(argc, argv); 2166af75078fSIntel if (diag < 0) 2167af75078fSIntel rte_panic("Cannot init EAL\n"); 2168af75078fSIntel 2169102b7329SReshma Pattan #ifdef RTE_LIBRTE_PDUMP 2170102b7329SReshma Pattan /* initialize packet capture framework */ 2171102b7329SReshma Pattan rte_pdump_init(NULL); 2172102b7329SReshma Pattan #endif 2173102b7329SReshma Pattan 2174af75078fSIntel nb_ports = (portid_t) rte_eth_dev_count(); 2175af75078fSIntel if (nb_ports == 0) 2176edab33b1STetsuya Mukawa RTE_LOG(WARNING, EAL, "No probed ethernet devices\n"); 2177af75078fSIntel 2178ffc468ffSTetsuya Mukawa /* allocate port structures, and init them */ 2179ffc468ffSTetsuya Mukawa init_port(); 2180ffc468ffSTetsuya Mukawa 2181af75078fSIntel set_def_fwd_config(); 2182af75078fSIntel if (nb_lcores == 0) 2183af75078fSIntel rte_panic("Empty set of forwarding logical cores - check the " 2184af75078fSIntel "core mask supplied in the command parameters\n"); 2185af75078fSIntel 2186af75078fSIntel argc -= diag; 2187af75078fSIntel argv += diag; 2188af75078fSIntel if (argc > 1) 2189af75078fSIntel launch_args_parse(argc, argv); 2190af75078fSIntel 21915a8fb55cSReshma Pattan if (!nb_rxq && !nb_txq) 21925a8fb55cSReshma Pattan printf("Warning: Either rx or tx queues should be non-zero\n"); 21935a8fb55cSReshma Pattan 21945a8fb55cSReshma Pattan if (nb_rxq > 1 && nb_rxq > nb_txq) 2195af75078fSIntel printf("Warning: nb_rxq=%d enables RSS configuration, " 2196af75078fSIntel "but nb_txq=%d will prevent to fully test it.\n", 2197af75078fSIntel nb_rxq, nb_txq); 2198af75078fSIntel 2199af75078fSIntel init_config(); 2200148f963fSBruce Richardson if (start_port(RTE_PORT_ALL) != 0) 2201148f963fSBruce Richardson rte_exit(EXIT_FAILURE, "Start ports failed\n"); 2202af75078fSIntel 2203ce8d5614SIntel /* set all ports to promiscuous mode by default */ 22047d89b261SGaetan Rivet RTE_ETH_FOREACH_DEV(port_id) 2205ce8d5614SIntel rte_eth_promiscuous_enable(port_id); 2206af75078fSIntel 22077e4441c8SRemy Horton /* Init metrics library */ 22087e4441c8SRemy Horton rte_metrics_init(rte_socket_id()); 22097e4441c8SRemy Horton 221062d3216dSReshma Pattan #ifdef RTE_LIBRTE_LATENCY_STATS 221162d3216dSReshma Pattan if (latencystats_enabled != 0) { 221262d3216dSReshma Pattan int ret = rte_latencystats_init(1, NULL); 221362d3216dSReshma Pattan if (ret) 221462d3216dSReshma Pattan printf("Warning: latencystats init()" 221562d3216dSReshma Pattan " returned error %d\n", ret); 221662d3216dSReshma Pattan printf("Latencystats running on lcore %d\n", 221762d3216dSReshma Pattan latencystats_lcore_id); 221862d3216dSReshma Pattan } 221962d3216dSReshma Pattan #endif 222062d3216dSReshma Pattan 22217e4441c8SRemy Horton /* Setup bitrate stats */ 22227e4441c8SRemy Horton #ifdef RTE_LIBRTE_BITRATE 22237e4441c8SRemy Horton bitrate_data = rte_stats_bitrate_create(); 22247e4441c8SRemy Horton if (bitrate_data == NULL) 22257e4441c8SRemy Horton rte_exit(EXIT_FAILURE, "Could not allocate bitrate data.\n"); 22267e4441c8SRemy Horton rte_stats_bitrate_reg(bitrate_data); 22277e4441c8SRemy Horton #endif 22287e4441c8SRemy Horton 22297e4441c8SRemy Horton 22300d56cb81SThomas Monjalon #ifdef RTE_LIBRTE_CMDLINE 2231ca7feb22SCyril Chemparathy if (interactive == 1) { 2232ca7feb22SCyril Chemparathy if (auto_start) { 2233ca7feb22SCyril Chemparathy printf("Start automatic packet forwarding\n"); 2234ca7feb22SCyril Chemparathy start_packet_forwarding(0); 2235ca7feb22SCyril Chemparathy } 2236af75078fSIntel prompt(); 2237ca7feb22SCyril Chemparathy } else 22380d56cb81SThomas Monjalon #endif 22390d56cb81SThomas Monjalon { 2240af75078fSIntel char c; 2241af75078fSIntel int rc; 2242af75078fSIntel 2243af75078fSIntel printf("No commandline core given, start packet forwarding\n"); 2244af75078fSIntel start_packet_forwarding(0); 2245af75078fSIntel printf("Press enter to exit\n"); 2246af75078fSIntel rc = read(0, &c, 1); 2247d3a274ceSZhihong Wang pmd_test_exit(); 2248af75078fSIntel if (rc < 0) 2249af75078fSIntel return 1; 2250af75078fSIntel } 2251af75078fSIntel 2252af75078fSIntel return 0; 2253af75078fSIntel } 2254