1174a1631SBruce Richardson /* SPDX-License-Identifier: BSD-3-Clause 2174a1631SBruce Richardson * Copyright(c) 2010-2017 Intel Corporation 3af75078fSIntel */ 4af75078fSIntel 5af75078fSIntel #include <stdarg.h> 6af75078fSIntel #include <stdio.h> 7af75078fSIntel #include <stdlib.h> 8af75078fSIntel #include <signal.h> 9af75078fSIntel #include <string.h> 10af75078fSIntel #include <time.h> 11af75078fSIntel #include <fcntl.h> 121c036b16SEelco Chaudron #include <sys/mman.h> 13af75078fSIntel #include <sys/types.h> 14af75078fSIntel #include <errno.h> 15af75078fSIntel 16af75078fSIntel #include <sys/queue.h> 17af75078fSIntel #include <sys/stat.h> 18af75078fSIntel 19af75078fSIntel #include <stdint.h> 20af75078fSIntel #include <unistd.h> 21af75078fSIntel #include <inttypes.h> 22af75078fSIntel 23af75078fSIntel #include <rte_common.h> 24d1eb542eSOlivier Matz #include <rte_errno.h> 25af75078fSIntel #include <rte_byteorder.h> 26af75078fSIntel #include <rte_log.h> 27af75078fSIntel #include <rte_debug.h> 28af75078fSIntel #include <rte_cycles.h> 29af75078fSIntel #include <rte_memory.h> 30af75078fSIntel #include <rte_memcpy.h> 31af75078fSIntel #include <rte_launch.h> 32af75078fSIntel #include <rte_eal.h> 33284c908cSGaetan Rivet #include <rte_alarm.h> 34af75078fSIntel #include <rte_per_lcore.h> 35af75078fSIntel #include <rte_lcore.h> 36af75078fSIntel #include <rte_atomic.h> 37af75078fSIntel #include <rte_branch_prediction.h> 38af75078fSIntel #include <rte_mempool.h> 39af75078fSIntel #include <rte_malloc.h> 40af75078fSIntel #include <rte_mbuf.h> 41af75078fSIntel #include <rte_interrupts.h> 42af75078fSIntel #include <rte_pci.h> 43af75078fSIntel #include <rte_ether.h> 44af75078fSIntel #include <rte_ethdev.h> 45edab33b1STetsuya Mukawa #include <rte_dev.h> 46af75078fSIntel #include <rte_string_fns.h> 47e261265eSRadu Nicolau #ifdef RTE_LIBRTE_IXGBE_PMD 48e261265eSRadu Nicolau #include <rte_pmd_ixgbe.h> 49e261265eSRadu Nicolau #endif 50102b7329SReshma Pattan #ifdef RTE_LIBRTE_PDUMP 51102b7329SReshma Pattan #include <rte_pdump.h> 52102b7329SReshma Pattan #endif 53938a184aSAdrien Mazarguil #include <rte_flow.h> 547e4441c8SRemy Horton #include <rte_metrics.h> 557e4441c8SRemy Horton #ifdef RTE_LIBRTE_BITRATE 567e4441c8SRemy Horton #include <rte_bitrate.h> 577e4441c8SRemy Horton #endif 5862d3216dSReshma Pattan #ifdef RTE_LIBRTE_LATENCY_STATS 5962d3216dSReshma Pattan #include <rte_latencystats.h> 6062d3216dSReshma Pattan #endif 61af75078fSIntel 62af75078fSIntel #include "testpmd.h" 63af75078fSIntel 64af75078fSIntel uint16_t verbose_level = 0; /**< Silent by default. */ 65285fd101SOlivier Matz int testpmd_logtype; /**< Log type for testpmd logs */ 66af75078fSIntel 67af75078fSIntel /* use master core for command line ? */ 68af75078fSIntel uint8_t interactive = 0; 69ca7feb22SCyril Chemparathy uint8_t auto_start = 0; 7099cabef0SPablo de Lara uint8_t tx_first; 7181ef862bSAllain Legacy char cmdline_filename[PATH_MAX] = {0}; 72af75078fSIntel 73af75078fSIntel /* 74af75078fSIntel * NUMA support configuration. 75af75078fSIntel * When set, the NUMA support attempts to dispatch the allocation of the 76af75078fSIntel * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the 77af75078fSIntel * probed ports among the CPU sockets 0 and 1. 78af75078fSIntel * Otherwise, all memory is allocated from CPU socket 0. 79af75078fSIntel */ 80999b2ee0SBruce Richardson uint8_t numa_support = 1; /**< numa enabled by default */ 81af75078fSIntel 82af75078fSIntel /* 83b6ea6408SIntel * In UMA mode,all memory is allocated from socket 0 if --socket-num is 84b6ea6408SIntel * not configured. 85b6ea6408SIntel */ 86b6ea6408SIntel uint8_t socket_num = UMA_NO_CONFIG; 87b6ea6408SIntel 88b6ea6408SIntel /* 89148f963fSBruce Richardson * Use ANONYMOUS mapped memory (might be not physically continuous) for mbufs. 90148f963fSBruce Richardson */ 91148f963fSBruce Richardson uint8_t mp_anon = 0; 92148f963fSBruce Richardson 93148f963fSBruce Richardson /* 94af75078fSIntel * Record the Ethernet address of peer target ports to which packets are 95af75078fSIntel * forwarded. 96547d946cSNirmoy Das * Must be instantiated with the ethernet addresses of peer traffic generator 97af75078fSIntel * ports. 98af75078fSIntel */ 99af75078fSIntel struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS]; 100af75078fSIntel portid_t nb_peer_eth_addrs = 0; 101af75078fSIntel 102af75078fSIntel /* 103af75078fSIntel * Probed Target Environment. 104af75078fSIntel */ 105af75078fSIntel struct rte_port *ports; /**< For all probed ethernet ports. */ 106af75078fSIntel portid_t nb_ports; /**< Number of probed ethernet ports. */ 107af75078fSIntel struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */ 108af75078fSIntel lcoreid_t nb_lcores; /**< Number of probed logical cores. */ 109af75078fSIntel 110af75078fSIntel /* 111af75078fSIntel * Test Forwarding Configuration. 112af75078fSIntel * nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores 113af75078fSIntel * nb_fwd_ports <= nb_cfg_ports <= nb_ports 114af75078fSIntel */ 115af75078fSIntel lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */ 116af75078fSIntel lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */ 117af75078fSIntel portid_t nb_cfg_ports; /**< Number of configured ports. */ 118af75078fSIntel portid_t nb_fwd_ports; /**< Number of forwarding ports. */ 119af75078fSIntel 120af75078fSIntel unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */ 121af75078fSIntel portid_t fwd_ports_ids[RTE_MAX_ETHPORTS]; /**< Port ids configuration. */ 122af75078fSIntel 123af75078fSIntel struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */ 124af75078fSIntel streamid_t nb_fwd_streams; /**< Is equal to (nb_ports * nb_rxq). */ 125af75078fSIntel 126af75078fSIntel /* 127af75078fSIntel * Forwarding engines. 128af75078fSIntel */ 129af75078fSIntel struct fwd_engine * fwd_engines[] = { 130af75078fSIntel &io_fwd_engine, 131af75078fSIntel &mac_fwd_engine, 132d47388f1SCyril Chemparathy &mac_swap_engine, 133e9e23a61SCyril Chemparathy &flow_gen_engine, 134af75078fSIntel &rx_only_engine, 135af75078fSIntel &tx_only_engine, 136af75078fSIntel &csum_fwd_engine, 137168dfa61SIvan Boule &icmp_echo_engine, 1385b590fbeSJasvinder Singh #if defined RTE_LIBRTE_PMD_SOFTNIC && defined RTE_LIBRTE_SCHED 1395b590fbeSJasvinder Singh &softnic_tm_engine, 1405b590fbeSJasvinder Singh &softnic_tm_bypass_engine, 1415b590fbeSJasvinder Singh #endif 142af75078fSIntel #ifdef RTE_LIBRTE_IEEE1588 143af75078fSIntel &ieee1588_fwd_engine, 144af75078fSIntel #endif 145af75078fSIntel NULL, 146af75078fSIntel }; 147af75078fSIntel 148af75078fSIntel struct fwd_config cur_fwd_config; 149af75078fSIntel struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */ 150bf56fce1SZhihong Wang uint32_t retry_enabled; 151bf56fce1SZhihong Wang uint32_t burst_tx_delay_time = BURST_TX_WAIT_US; 152bf56fce1SZhihong Wang uint32_t burst_tx_retry_num = BURST_TX_RETRIES; 153af75078fSIntel 154af75078fSIntel uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */ 155c8798818SIntel uint32_t param_total_num_mbufs = 0; /**< number of mbufs in all pools - if 156c8798818SIntel * specified on command-line. */ 157cfea1f30SPablo de Lara uint16_t stats_period; /**< Period to show statistics (disabled by default) */ 158d9a191a0SPhil Yang 159d9a191a0SPhil Yang /* 160d9a191a0SPhil Yang * In container, it cannot terminate the process which running with 'stats-period' 161d9a191a0SPhil Yang * option. Set flag to exit stats period loop after received SIGINT/SIGTERM. 162d9a191a0SPhil Yang */ 163d9a191a0SPhil Yang uint8_t f_quit; 164d9a191a0SPhil Yang 165af75078fSIntel /* 166af75078fSIntel * Configuration of packet segments used by the "txonly" processing engine. 167af75078fSIntel */ 168af75078fSIntel uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */ 169af75078fSIntel uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = { 170af75078fSIntel TXONLY_DEF_PACKET_LEN, 171af75078fSIntel }; 172af75078fSIntel uint8_t tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */ 173af75078fSIntel 17479bec05bSKonstantin Ananyev enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF; 17579bec05bSKonstantin Ananyev /**< Split policy for packets to TX. */ 17679bec05bSKonstantin Ananyev 177af75078fSIntel uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */ 178e9378bbcSCunming Liang uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */ 179af75078fSIntel 180900550deSIntel /* current configuration is in DCB or not,0 means it is not in DCB mode */ 181900550deSIntel uint8_t dcb_config = 0; 182900550deSIntel 183900550deSIntel /* Whether the dcb is in testing status */ 184900550deSIntel uint8_t dcb_test = 0; 185900550deSIntel 186af75078fSIntel /* 187af75078fSIntel * Configurable number of RX/TX queues. 188af75078fSIntel */ 189af75078fSIntel queueid_t nb_rxq = 1; /**< Number of RX queues per port. */ 190af75078fSIntel queueid_t nb_txq = 1; /**< Number of TX queues per port. */ 191af75078fSIntel 192af75078fSIntel /* 193af75078fSIntel * Configurable number of RX/TX ring descriptors. 194af75078fSIntel */ 195af75078fSIntel #define RTE_TEST_RX_DESC_DEFAULT 128 196af75078fSIntel #define RTE_TEST_TX_DESC_DEFAULT 512 197af75078fSIntel uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */ 198af75078fSIntel uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */ 199af75078fSIntel 200f2c5125aSPablo de Lara #define RTE_PMD_PARAM_UNSET -1 201af75078fSIntel /* 202af75078fSIntel * Configurable values of RX and TX ring threshold registers. 203af75078fSIntel */ 204af75078fSIntel 205f2c5125aSPablo de Lara int8_t rx_pthresh = RTE_PMD_PARAM_UNSET; 206f2c5125aSPablo de Lara int8_t rx_hthresh = RTE_PMD_PARAM_UNSET; 207f2c5125aSPablo de Lara int8_t rx_wthresh = RTE_PMD_PARAM_UNSET; 208af75078fSIntel 209f2c5125aSPablo de Lara int8_t tx_pthresh = RTE_PMD_PARAM_UNSET; 210f2c5125aSPablo de Lara int8_t tx_hthresh = RTE_PMD_PARAM_UNSET; 211f2c5125aSPablo de Lara int8_t tx_wthresh = RTE_PMD_PARAM_UNSET; 212af75078fSIntel 213af75078fSIntel /* 214af75078fSIntel * Configurable value of RX free threshold. 215af75078fSIntel */ 216f2c5125aSPablo de Lara int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET; 217af75078fSIntel 218af75078fSIntel /* 219ce8d5614SIntel * Configurable value of RX drop enable. 220ce8d5614SIntel */ 221f2c5125aSPablo de Lara int8_t rx_drop_en = RTE_PMD_PARAM_UNSET; 222ce8d5614SIntel 223ce8d5614SIntel /* 224af75078fSIntel * Configurable value of TX free threshold. 225af75078fSIntel */ 226f2c5125aSPablo de Lara int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET; 227af75078fSIntel 228af75078fSIntel /* 229af75078fSIntel * Configurable value of TX RS bit threshold. 230af75078fSIntel */ 231f2c5125aSPablo de Lara int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET; 232af75078fSIntel 233af75078fSIntel /* 234ce8d5614SIntel * Configurable value of TX queue flags. 235ce8d5614SIntel */ 236f2c5125aSPablo de Lara int32_t txq_flags = RTE_PMD_PARAM_UNSET; 237ce8d5614SIntel 238ce8d5614SIntel /* 239af75078fSIntel * Receive Side Scaling (RSS) configuration. 240af75078fSIntel */ 2418a387fa8SHelin Zhang uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */ 242af75078fSIntel 243af75078fSIntel /* 244af75078fSIntel * Port topology configuration 245af75078fSIntel */ 246af75078fSIntel uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */ 247af75078fSIntel 2487741e4cfSIntel /* 2497741e4cfSIntel * Avoids to flush all the RX streams before starts forwarding. 2507741e4cfSIntel */ 2517741e4cfSIntel uint8_t no_flush_rx = 0; /* flush by default */ 2527741e4cfSIntel 253af75078fSIntel /* 2547ee3e944SVasily Philipov * Flow API isolated mode. 2557ee3e944SVasily Philipov */ 2567ee3e944SVasily Philipov uint8_t flow_isolate_all; 2577ee3e944SVasily Philipov 2587ee3e944SVasily Philipov /* 259bc202406SDavid Marchand * Avoids to check link status when starting/stopping a port. 260bc202406SDavid Marchand */ 261bc202406SDavid Marchand uint8_t no_link_check = 0; /* check by default */ 262bc202406SDavid Marchand 263bc202406SDavid Marchand /* 2648ea656f8SGaetan Rivet * Enable link status change notification 2658ea656f8SGaetan Rivet */ 2668ea656f8SGaetan Rivet uint8_t lsc_interrupt = 1; /* enabled by default */ 2678ea656f8SGaetan Rivet 2688ea656f8SGaetan Rivet /* 269284c908cSGaetan Rivet * Enable device removal notification. 270284c908cSGaetan Rivet */ 271284c908cSGaetan Rivet uint8_t rmv_interrupt = 1; /* enabled by default */ 272284c908cSGaetan Rivet 273284c908cSGaetan Rivet /* 2743af72783SGaetan Rivet * Display or mask ether events 2753af72783SGaetan Rivet * Default to all events except VF_MBOX 2763af72783SGaetan Rivet */ 2773af72783SGaetan Rivet uint32_t event_print_mask = (UINT32_C(1) << RTE_ETH_EVENT_UNKNOWN) | 2783af72783SGaetan Rivet (UINT32_C(1) << RTE_ETH_EVENT_INTR_LSC) | 2793af72783SGaetan Rivet (UINT32_C(1) << RTE_ETH_EVENT_QUEUE_STATE) | 2803af72783SGaetan Rivet (UINT32_C(1) << RTE_ETH_EVENT_INTR_RESET) | 2813af72783SGaetan Rivet (UINT32_C(1) << RTE_ETH_EVENT_MACSEC) | 2823af72783SGaetan Rivet (UINT32_C(1) << RTE_ETH_EVENT_INTR_RMV); 2833af72783SGaetan Rivet 2843af72783SGaetan Rivet /* 2857b7e5ba7SIntel * NIC bypass mode configuration options. 2867b7e5ba7SIntel */ 2877b7e5ba7SIntel 28850c4440eSThomas Monjalon #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS 2897b7e5ba7SIntel /* The NIC bypass watchdog timeout. */ 290e261265eSRadu Nicolau uint32_t bypass_timeout = RTE_PMD_IXGBE_BYPASS_TMT_OFF; 2917b7e5ba7SIntel #endif 2927b7e5ba7SIntel 293e261265eSRadu Nicolau 29462d3216dSReshma Pattan #ifdef RTE_LIBRTE_LATENCY_STATS 29562d3216dSReshma Pattan 29662d3216dSReshma Pattan /* 29762d3216dSReshma Pattan * Set when latency stats is enabled in the commandline 29862d3216dSReshma Pattan */ 29962d3216dSReshma Pattan uint8_t latencystats_enabled; 30062d3216dSReshma Pattan 30162d3216dSReshma Pattan /* 30262d3216dSReshma Pattan * Lcore ID to serive latency statistics. 30362d3216dSReshma Pattan */ 30462d3216dSReshma Pattan lcoreid_t latencystats_lcore_id = -1; 30562d3216dSReshma Pattan 30662d3216dSReshma Pattan #endif 30762d3216dSReshma Pattan 3087b7e5ba7SIntel /* 309af75078fSIntel * Ethernet device configuration. 310af75078fSIntel */ 311af75078fSIntel struct rte_eth_rxmode rx_mode = { 312af75078fSIntel .max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */ 3130074d02fSShahaf Shuler .offloads = (DEV_RX_OFFLOAD_VLAN_FILTER | 3140074d02fSShahaf Shuler DEV_RX_OFFLOAD_VLAN_STRIP | 3150074d02fSShahaf Shuler DEV_RX_OFFLOAD_CRC_STRIP), 3160074d02fSShahaf Shuler .ignore_offload_bitfield = 1, 317af75078fSIntel }; 318af75078fSIntel 319*fd8c20aaSShahaf Shuler struct rte_eth_txmode tx_mode; 320*fd8c20aaSShahaf Shuler 321af75078fSIntel struct rte_fdir_conf fdir_conf = { 322af75078fSIntel .mode = RTE_FDIR_MODE_NONE, 323af75078fSIntel .pballoc = RTE_FDIR_PBALLOC_64K, 324af75078fSIntel .status = RTE_FDIR_REPORT_STATUS, 325d9d5e6f2SJingjing Wu .mask = { 326d9d5e6f2SJingjing Wu .vlan_tci_mask = 0x0, 327d9d5e6f2SJingjing Wu .ipv4_mask = { 328d9d5e6f2SJingjing Wu .src_ip = 0xFFFFFFFF, 329d9d5e6f2SJingjing Wu .dst_ip = 0xFFFFFFFF, 330d9d5e6f2SJingjing Wu }, 331d9d5e6f2SJingjing Wu .ipv6_mask = { 332d9d5e6f2SJingjing Wu .src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF}, 333d9d5e6f2SJingjing Wu .dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF}, 334d9d5e6f2SJingjing Wu }, 335d9d5e6f2SJingjing Wu .src_port_mask = 0xFFFF, 336d9d5e6f2SJingjing Wu .dst_port_mask = 0xFFFF, 33747b3ac6bSWenzhuo Lu .mac_addr_byte_mask = 0xFF, 33847b3ac6bSWenzhuo Lu .tunnel_type_mask = 1, 33947b3ac6bSWenzhuo Lu .tunnel_id_mask = 0xFFFFFFFF, 340d9d5e6f2SJingjing Wu }, 341af75078fSIntel .drop_queue = 127, 342af75078fSIntel }; 343af75078fSIntel 3442950a769SDeclan Doherty volatile int test_done = 1; /* stop packet forwarding when set to 1. */ 345af75078fSIntel 346ed30d9b6SIntel struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS]; 347ed30d9b6SIntel struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS]; 348ed30d9b6SIntel 349ed30d9b6SIntel struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array; 350ed30d9b6SIntel struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array; 351ed30d9b6SIntel 352ed30d9b6SIntel uint16_t nb_tx_queue_stats_mappings = 0; 353ed30d9b6SIntel uint16_t nb_rx_queue_stats_mappings = 0; 354ed30d9b6SIntel 355a4fd5eeeSElza Mathew /* 356a4fd5eeeSElza Mathew * Display zero values by default for xstats 357a4fd5eeeSElza Mathew */ 358a4fd5eeeSElza Mathew uint8_t xstats_hide_zero; 359a4fd5eeeSElza Mathew 360c9cafcc8SShahaf Shuler unsigned int num_sockets = 0; 361c9cafcc8SShahaf Shuler unsigned int socket_ids[RTE_MAX_NUMA_NODES]; 3627acf894dSStephen Hurd 363e25e6c70SRemy Horton #ifdef RTE_LIBRTE_BITRATE 3647e4441c8SRemy Horton /* Bitrate statistics */ 3657e4441c8SRemy Horton struct rte_stats_bitrates *bitrate_data; 366e25e6c70SRemy Horton lcoreid_t bitrate_lcore_id; 367e25e6c70SRemy Horton uint8_t bitrate_enabled; 368e25e6c70SRemy Horton #endif 3697e4441c8SRemy Horton 370b40f8d78SJiayu Hu struct gro_status gro_ports[RTE_MAX_ETHPORTS]; 371b7091f1dSJiayu Hu uint8_t gro_flush_cycles = GRO_DEFAULT_FLUSH_CYCLES; 372b40f8d78SJiayu Hu 373ed30d9b6SIntel /* Forward function declarations */ 37428caa76aSZhiyong Yang static void map_port_queue_stats_mapping_registers(portid_t pi, 37528caa76aSZhiyong Yang struct rte_port *port); 376edab33b1STetsuya Mukawa static void check_all_ports_link_status(uint32_t port_mask); 377f8244c63SZhiyong Yang static int eth_event_callback(portid_t port_id, 37876ad4a2dSGaetan Rivet enum rte_eth_event_type type, 379d6af1a13SBernard Iremonger void *param, void *ret_param); 380ce8d5614SIntel 381ce8d5614SIntel /* 382ce8d5614SIntel * Check if all the ports are started. 383ce8d5614SIntel * If yes, return positive value. If not, return zero. 384ce8d5614SIntel */ 385ce8d5614SIntel static int all_ports_started(void); 386ed30d9b6SIntel 38752f38a20SJiayu Hu struct gso_status gso_ports[RTE_MAX_ETHPORTS]; 38852f38a20SJiayu Hu uint16_t gso_max_segment_size = ETHER_MAX_LEN - ETHER_CRC_LEN; 38952f38a20SJiayu Hu 390af75078fSIntel /* 39198a7ea33SJerin Jacob * Helper function to check if socket is already discovered. 392c9cafcc8SShahaf Shuler * If yes, return positive value. If not, return zero. 393c9cafcc8SShahaf Shuler */ 394c9cafcc8SShahaf Shuler int 395c9cafcc8SShahaf Shuler new_socket_id(unsigned int socket_id) 396c9cafcc8SShahaf Shuler { 397c9cafcc8SShahaf Shuler unsigned int i; 398c9cafcc8SShahaf Shuler 399c9cafcc8SShahaf Shuler for (i = 0; i < num_sockets; i++) { 400c9cafcc8SShahaf Shuler if (socket_ids[i] == socket_id) 401c9cafcc8SShahaf Shuler return 0; 402c9cafcc8SShahaf Shuler } 403c9cafcc8SShahaf Shuler return 1; 404c9cafcc8SShahaf Shuler } 405c9cafcc8SShahaf Shuler 406c9cafcc8SShahaf Shuler /* 407af75078fSIntel * Setup default configuration. 408af75078fSIntel */ 409af75078fSIntel static void 410af75078fSIntel set_default_fwd_lcores_config(void) 411af75078fSIntel { 412af75078fSIntel unsigned int i; 413af75078fSIntel unsigned int nb_lc; 4147acf894dSStephen Hurd unsigned int sock_num; 415af75078fSIntel 416af75078fSIntel nb_lc = 0; 417af75078fSIntel for (i = 0; i < RTE_MAX_LCORE; i++) { 418c9cafcc8SShahaf Shuler sock_num = rte_lcore_to_socket_id(i); 419c9cafcc8SShahaf Shuler if (new_socket_id(sock_num)) { 420c9cafcc8SShahaf Shuler if (num_sockets >= RTE_MAX_NUMA_NODES) { 421c9cafcc8SShahaf Shuler rte_exit(EXIT_FAILURE, 422c9cafcc8SShahaf Shuler "Total sockets greater than %u\n", 423c9cafcc8SShahaf Shuler RTE_MAX_NUMA_NODES); 424c9cafcc8SShahaf Shuler } 425c9cafcc8SShahaf Shuler socket_ids[num_sockets++] = sock_num; 4267acf894dSStephen Hurd } 427f54fe5eeSStephen Hurd if (!rte_lcore_is_enabled(i)) 428f54fe5eeSStephen Hurd continue; 429f54fe5eeSStephen Hurd if (i == rte_get_master_lcore()) 430f54fe5eeSStephen Hurd continue; 431f54fe5eeSStephen Hurd fwd_lcores_cpuids[nb_lc++] = i; 432af75078fSIntel } 433af75078fSIntel nb_lcores = (lcoreid_t) nb_lc; 434af75078fSIntel nb_cfg_lcores = nb_lcores; 435af75078fSIntel nb_fwd_lcores = 1; 436af75078fSIntel } 437af75078fSIntel 438af75078fSIntel static void 439af75078fSIntel set_def_peer_eth_addrs(void) 440af75078fSIntel { 441af75078fSIntel portid_t i; 442af75078fSIntel 443af75078fSIntel for (i = 0; i < RTE_MAX_ETHPORTS; i++) { 444af75078fSIntel peer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR; 445af75078fSIntel peer_eth_addrs[i].addr_bytes[5] = i; 446af75078fSIntel } 447af75078fSIntel } 448af75078fSIntel 449af75078fSIntel static void 450af75078fSIntel set_default_fwd_ports_config(void) 451af75078fSIntel { 452af75078fSIntel portid_t pt_id; 45365a7360cSMatan Azrad int i = 0; 454af75078fSIntel 45565a7360cSMatan Azrad RTE_ETH_FOREACH_DEV(pt_id) 45665a7360cSMatan Azrad fwd_ports_ids[i++] = pt_id; 457af75078fSIntel 458af75078fSIntel nb_cfg_ports = nb_ports; 459af75078fSIntel nb_fwd_ports = nb_ports; 460af75078fSIntel } 461af75078fSIntel 462af75078fSIntel void 463af75078fSIntel set_def_fwd_config(void) 464af75078fSIntel { 465af75078fSIntel set_default_fwd_lcores_config(); 466af75078fSIntel set_def_peer_eth_addrs(); 467af75078fSIntel set_default_fwd_ports_config(); 468af75078fSIntel } 469af75078fSIntel 470af75078fSIntel /* 471af75078fSIntel * Configuration initialisation done once at init time. 472af75078fSIntel */ 473af75078fSIntel static void 474af75078fSIntel mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf, 475af75078fSIntel unsigned int socket_id) 476af75078fSIntel { 477af75078fSIntel char pool_name[RTE_MEMPOOL_NAMESIZE]; 478bece7b6cSChristian Ehrhardt struct rte_mempool *rte_mp = NULL; 479af75078fSIntel uint32_t mb_size; 480af75078fSIntel 481dfb03bbeSOlivier Matz mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size; 482af75078fSIntel mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name)); 483148f963fSBruce Richardson 484285fd101SOlivier Matz TESTPMD_LOG(INFO, 485d1eb542eSOlivier Matz "create a new mbuf pool <%s>: n=%u, size=%u, socket=%u\n", 486d1eb542eSOlivier Matz pool_name, nb_mbuf, mbuf_seg_size, socket_id); 487d1eb542eSOlivier Matz 488b19a0c75SOlivier Matz if (mp_anon != 0) { 489b19a0c75SOlivier Matz rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf, 490bece7b6cSChristian Ehrhardt mb_size, (unsigned) mb_mempool_cache, 491148f963fSBruce Richardson sizeof(struct rte_pktmbuf_pool_private), 492148f963fSBruce Richardson socket_id, 0); 49324427bb9SOlivier Matz if (rte_mp == NULL) 49424427bb9SOlivier Matz goto err; 495b19a0c75SOlivier Matz 496b19a0c75SOlivier Matz if (rte_mempool_populate_anon(rte_mp) == 0) { 497b19a0c75SOlivier Matz rte_mempool_free(rte_mp); 498b19a0c75SOlivier Matz rte_mp = NULL; 49924427bb9SOlivier Matz goto err; 500b19a0c75SOlivier Matz } 501b19a0c75SOlivier Matz rte_pktmbuf_pool_init(rte_mp, NULL); 502b19a0c75SOlivier Matz rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL); 503b19a0c75SOlivier Matz } else { 504ea0c20eaSOlivier Matz /* wrapper to rte_mempool_create() */ 505ea0c20eaSOlivier Matz rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf, 506ea0c20eaSOlivier Matz mb_mempool_cache, 0, mbuf_seg_size, socket_id); 507bece7b6cSChristian Ehrhardt } 508148f963fSBruce Richardson 50924427bb9SOlivier Matz err: 510af75078fSIntel if (rte_mp == NULL) { 511d1eb542eSOlivier Matz rte_exit(EXIT_FAILURE, 512d1eb542eSOlivier Matz "Creation of mbuf pool for socket %u failed: %s\n", 513d1eb542eSOlivier Matz socket_id, rte_strerror(rte_errno)); 514148f963fSBruce Richardson } else if (verbose_level > 0) { 515591a9d79SStephen Hemminger rte_mempool_dump(stdout, rte_mp); 516af75078fSIntel } 517af75078fSIntel } 518af75078fSIntel 51920a0286fSLiu Xiaofeng /* 52020a0286fSLiu Xiaofeng * Check given socket id is valid or not with NUMA mode, 52120a0286fSLiu Xiaofeng * if valid, return 0, else return -1 52220a0286fSLiu Xiaofeng */ 52320a0286fSLiu Xiaofeng static int 52420a0286fSLiu Xiaofeng check_socket_id(const unsigned int socket_id) 52520a0286fSLiu Xiaofeng { 52620a0286fSLiu Xiaofeng static int warning_once = 0; 52720a0286fSLiu Xiaofeng 528c9cafcc8SShahaf Shuler if (new_socket_id(socket_id)) { 52920a0286fSLiu Xiaofeng if (!warning_once && numa_support) 53020a0286fSLiu Xiaofeng printf("Warning: NUMA should be configured manually by" 53120a0286fSLiu Xiaofeng " using --port-numa-config and" 53220a0286fSLiu Xiaofeng " --ring-numa-config parameters along with" 53320a0286fSLiu Xiaofeng " --numa.\n"); 53420a0286fSLiu Xiaofeng warning_once = 1; 53520a0286fSLiu Xiaofeng return -1; 53620a0286fSLiu Xiaofeng } 53720a0286fSLiu Xiaofeng return 0; 53820a0286fSLiu Xiaofeng } 53920a0286fSLiu Xiaofeng 540af75078fSIntel static void 541af75078fSIntel init_config(void) 542af75078fSIntel { 543ce8d5614SIntel portid_t pid; 544af75078fSIntel struct rte_port *port; 545af75078fSIntel struct rte_mempool *mbp; 546af75078fSIntel unsigned int nb_mbuf_per_pool; 547af75078fSIntel lcoreid_t lc_id; 5487acf894dSStephen Hurd uint8_t port_per_socket[RTE_MAX_NUMA_NODES]; 549b7091f1dSJiayu Hu struct rte_gro_param gro_param; 55052f38a20SJiayu Hu uint32_t gso_types; 551af75078fSIntel 5527acf894dSStephen Hurd memset(port_per_socket,0,RTE_MAX_NUMA_NODES); 553487f9a59SYulong Pei 554487f9a59SYulong Pei if (numa_support) { 555487f9a59SYulong Pei memset(port_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS); 556487f9a59SYulong Pei memset(rxring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS); 557487f9a59SYulong Pei memset(txring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS); 558487f9a59SYulong Pei } 559487f9a59SYulong Pei 560af75078fSIntel /* Configuration of logical cores. */ 561af75078fSIntel fwd_lcores = rte_zmalloc("testpmd: fwd_lcores", 562af75078fSIntel sizeof(struct fwd_lcore *) * nb_lcores, 563fdf20fa7SSergio Gonzalez Monroy RTE_CACHE_LINE_SIZE); 564af75078fSIntel if (fwd_lcores == NULL) { 565ce8d5614SIntel rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) " 566ce8d5614SIntel "failed\n", nb_lcores); 567af75078fSIntel } 568af75078fSIntel for (lc_id = 0; lc_id < nb_lcores; lc_id++) { 569af75078fSIntel fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore", 570af75078fSIntel sizeof(struct fwd_lcore), 571fdf20fa7SSergio Gonzalez Monroy RTE_CACHE_LINE_SIZE); 572af75078fSIntel if (fwd_lcores[lc_id] == NULL) { 573ce8d5614SIntel rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) " 574ce8d5614SIntel "failed\n"); 575af75078fSIntel } 576af75078fSIntel fwd_lcores[lc_id]->cpuid_idx = lc_id; 577af75078fSIntel } 578af75078fSIntel 5797d89b261SGaetan Rivet RTE_ETH_FOREACH_DEV(pid) { 580ce8d5614SIntel port = &ports[pid]; 581*fd8c20aaSShahaf Shuler /* Apply default Tx configuration for all ports */ 582*fd8c20aaSShahaf Shuler port->dev_conf.txmode = tx_mode; 583ce8d5614SIntel rte_eth_dev_info_get(pid, &port->dev_info); 584ce8d5614SIntel 585b6ea6408SIntel if (numa_support) { 586b6ea6408SIntel if (port_numa[pid] != NUMA_NO_CONFIG) 587b6ea6408SIntel port_per_socket[port_numa[pid]]++; 588b6ea6408SIntel else { 589b6ea6408SIntel uint32_t socket_id = rte_eth_dev_socket_id(pid); 59020a0286fSLiu Xiaofeng 59120a0286fSLiu Xiaofeng /* if socket_id is invalid, set to 0 */ 59220a0286fSLiu Xiaofeng if (check_socket_id(socket_id) < 0) 59320a0286fSLiu Xiaofeng socket_id = 0; 594b6ea6408SIntel port_per_socket[socket_id]++; 595b6ea6408SIntel } 596b6ea6408SIntel } 597b6ea6408SIntel 598ce8d5614SIntel /* set flag to initialize port/queue */ 599ce8d5614SIntel port->need_reconfig = 1; 600ce8d5614SIntel port->need_reconfig_queues = 1; 601ce8d5614SIntel } 602ce8d5614SIntel 6033ab64341SOlivier Matz /* 6043ab64341SOlivier Matz * Create pools of mbuf. 6053ab64341SOlivier Matz * If NUMA support is disabled, create a single pool of mbuf in 6063ab64341SOlivier Matz * socket 0 memory by default. 6073ab64341SOlivier Matz * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1. 6083ab64341SOlivier Matz * 6093ab64341SOlivier Matz * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and 6103ab64341SOlivier Matz * nb_txd can be configured at run time. 6113ab64341SOlivier Matz */ 6123ab64341SOlivier Matz if (param_total_num_mbufs) 6133ab64341SOlivier Matz nb_mbuf_per_pool = param_total_num_mbufs; 6143ab64341SOlivier Matz else { 6153ab64341SOlivier Matz nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX + 6163ab64341SOlivier Matz (nb_lcores * mb_mempool_cache) + 6173ab64341SOlivier Matz RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST; 6183ab64341SOlivier Matz nb_mbuf_per_pool *= RTE_MAX_ETHPORTS; 6193ab64341SOlivier Matz } 6203ab64341SOlivier Matz 621b6ea6408SIntel if (numa_support) { 622b6ea6408SIntel uint8_t i; 623ce8d5614SIntel 624c9cafcc8SShahaf Shuler for (i = 0; i < num_sockets; i++) 625c9cafcc8SShahaf Shuler mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 626c9cafcc8SShahaf Shuler socket_ids[i]); 6273ab64341SOlivier Matz } else { 6283ab64341SOlivier Matz if (socket_num == UMA_NO_CONFIG) 6293ab64341SOlivier Matz mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0); 6303ab64341SOlivier Matz else 6313ab64341SOlivier Matz mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 6323ab64341SOlivier Matz socket_num); 6333ab64341SOlivier Matz } 634b6ea6408SIntel 635b6ea6408SIntel init_port_config(); 6365886ae07SAdrien Mazarguil 63752f38a20SJiayu Hu gso_types = DEV_TX_OFFLOAD_TCP_TSO | DEV_TX_OFFLOAD_VXLAN_TNL_TSO | 63852f38a20SJiayu Hu DEV_TX_OFFLOAD_GRE_TNL_TSO; 6395886ae07SAdrien Mazarguil /* 6405886ae07SAdrien Mazarguil * Records which Mbuf pool to use by each logical core, if needed. 6415886ae07SAdrien Mazarguil */ 6425886ae07SAdrien Mazarguil for (lc_id = 0; lc_id < nb_lcores; lc_id++) { 6438fd8bebcSAdrien Mazarguil mbp = mbuf_pool_find( 6448fd8bebcSAdrien Mazarguil rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id])); 6458fd8bebcSAdrien Mazarguil 6465886ae07SAdrien Mazarguil if (mbp == NULL) 6475886ae07SAdrien Mazarguil mbp = mbuf_pool_find(0); 6485886ae07SAdrien Mazarguil fwd_lcores[lc_id]->mbp = mbp; 64952f38a20SJiayu Hu /* initialize GSO context */ 65052f38a20SJiayu Hu fwd_lcores[lc_id]->gso_ctx.direct_pool = mbp; 65152f38a20SJiayu Hu fwd_lcores[lc_id]->gso_ctx.indirect_pool = mbp; 65252f38a20SJiayu Hu fwd_lcores[lc_id]->gso_ctx.gso_types = gso_types; 65352f38a20SJiayu Hu fwd_lcores[lc_id]->gso_ctx.gso_size = ETHER_MAX_LEN - 65452f38a20SJiayu Hu ETHER_CRC_LEN; 65552f38a20SJiayu Hu fwd_lcores[lc_id]->gso_ctx.flag = 0; 6565886ae07SAdrien Mazarguil } 6575886ae07SAdrien Mazarguil 658ce8d5614SIntel /* Configuration of packet forwarding streams. */ 659ce8d5614SIntel if (init_fwd_streams() < 0) 660ce8d5614SIntel rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n"); 6610c0db76fSBernard Iremonger 6620c0db76fSBernard Iremonger fwd_config_setup(); 663b7091f1dSJiayu Hu 664b7091f1dSJiayu Hu /* create a gro context for each lcore */ 665b7091f1dSJiayu Hu gro_param.gro_types = RTE_GRO_TCP_IPV4; 666b7091f1dSJiayu Hu gro_param.max_flow_num = GRO_MAX_FLUSH_CYCLES; 667b7091f1dSJiayu Hu gro_param.max_item_per_flow = MAX_PKT_BURST; 668b7091f1dSJiayu Hu for (lc_id = 0; lc_id < nb_lcores; lc_id++) { 669b7091f1dSJiayu Hu gro_param.socket_id = rte_lcore_to_socket_id( 670b7091f1dSJiayu Hu fwd_lcores_cpuids[lc_id]); 671b7091f1dSJiayu Hu fwd_lcores[lc_id]->gro_ctx = rte_gro_ctx_create(&gro_param); 672b7091f1dSJiayu Hu if (fwd_lcores[lc_id]->gro_ctx == NULL) { 673b7091f1dSJiayu Hu rte_exit(EXIT_FAILURE, 674b7091f1dSJiayu Hu "rte_gro_ctx_create() failed\n"); 675b7091f1dSJiayu Hu } 676b7091f1dSJiayu Hu } 677ce8d5614SIntel } 678ce8d5614SIntel 6792950a769SDeclan Doherty 6802950a769SDeclan Doherty void 681a21d5a4bSDeclan Doherty reconfig(portid_t new_port_id, unsigned socket_id) 6822950a769SDeclan Doherty { 6832950a769SDeclan Doherty struct rte_port *port; 6842950a769SDeclan Doherty 6852950a769SDeclan Doherty /* Reconfiguration of Ethernet ports. */ 6862950a769SDeclan Doherty port = &ports[new_port_id]; 6872950a769SDeclan Doherty rte_eth_dev_info_get(new_port_id, &port->dev_info); 6882950a769SDeclan Doherty 6892950a769SDeclan Doherty /* set flag to initialize port/queue */ 6902950a769SDeclan Doherty port->need_reconfig = 1; 6912950a769SDeclan Doherty port->need_reconfig_queues = 1; 692a21d5a4bSDeclan Doherty port->socket_id = socket_id; 6932950a769SDeclan Doherty 6942950a769SDeclan Doherty init_port_config(); 6952950a769SDeclan Doherty } 6962950a769SDeclan Doherty 6972950a769SDeclan Doherty 698ce8d5614SIntel int 699ce8d5614SIntel init_fwd_streams(void) 700ce8d5614SIntel { 701ce8d5614SIntel portid_t pid; 702ce8d5614SIntel struct rte_port *port; 703ce8d5614SIntel streamid_t sm_id, nb_fwd_streams_new; 7045a8fb55cSReshma Pattan queueid_t q; 705ce8d5614SIntel 706ce8d5614SIntel /* set socket id according to numa or not */ 7077d89b261SGaetan Rivet RTE_ETH_FOREACH_DEV(pid) { 708ce8d5614SIntel port = &ports[pid]; 709ce8d5614SIntel if (nb_rxq > port->dev_info.max_rx_queues) { 710ce8d5614SIntel printf("Fail: nb_rxq(%d) is greater than " 711ce8d5614SIntel "max_rx_queues(%d)\n", nb_rxq, 712ce8d5614SIntel port->dev_info.max_rx_queues); 713ce8d5614SIntel return -1; 714ce8d5614SIntel } 715ce8d5614SIntel if (nb_txq > port->dev_info.max_tx_queues) { 716ce8d5614SIntel printf("Fail: nb_txq(%d) is greater than " 717ce8d5614SIntel "max_tx_queues(%d)\n", nb_txq, 718ce8d5614SIntel port->dev_info.max_tx_queues); 719ce8d5614SIntel return -1; 720ce8d5614SIntel } 72120a0286fSLiu Xiaofeng if (numa_support) { 72220a0286fSLiu Xiaofeng if (port_numa[pid] != NUMA_NO_CONFIG) 72320a0286fSLiu Xiaofeng port->socket_id = port_numa[pid]; 72420a0286fSLiu Xiaofeng else { 725b6ea6408SIntel port->socket_id = rte_eth_dev_socket_id(pid); 72620a0286fSLiu Xiaofeng 72720a0286fSLiu Xiaofeng /* if socket_id is invalid, set to 0 */ 72820a0286fSLiu Xiaofeng if (check_socket_id(port->socket_id) < 0) 72920a0286fSLiu Xiaofeng port->socket_id = 0; 73020a0286fSLiu Xiaofeng } 73120a0286fSLiu Xiaofeng } 732b6ea6408SIntel else { 733b6ea6408SIntel if (socket_num == UMA_NO_CONFIG) 734af75078fSIntel port->socket_id = 0; 735b6ea6408SIntel else 736b6ea6408SIntel port->socket_id = socket_num; 737b6ea6408SIntel } 738af75078fSIntel } 739af75078fSIntel 7405a8fb55cSReshma Pattan q = RTE_MAX(nb_rxq, nb_txq); 7415a8fb55cSReshma Pattan if (q == 0) { 7425a8fb55cSReshma Pattan printf("Fail: Cannot allocate fwd streams as number of queues is 0\n"); 7435a8fb55cSReshma Pattan return -1; 7445a8fb55cSReshma Pattan } 7455a8fb55cSReshma Pattan nb_fwd_streams_new = (streamid_t)(nb_ports * q); 746ce8d5614SIntel if (nb_fwd_streams_new == nb_fwd_streams) 747ce8d5614SIntel return 0; 748ce8d5614SIntel /* clear the old */ 749ce8d5614SIntel if (fwd_streams != NULL) { 750ce8d5614SIntel for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) { 751ce8d5614SIntel if (fwd_streams[sm_id] == NULL) 752ce8d5614SIntel continue; 753ce8d5614SIntel rte_free(fwd_streams[sm_id]); 754ce8d5614SIntel fwd_streams[sm_id] = NULL; 755af75078fSIntel } 756ce8d5614SIntel rte_free(fwd_streams); 757ce8d5614SIntel fwd_streams = NULL; 758ce8d5614SIntel } 759ce8d5614SIntel 760ce8d5614SIntel /* init new */ 761ce8d5614SIntel nb_fwd_streams = nb_fwd_streams_new; 762ce8d5614SIntel fwd_streams = rte_zmalloc("testpmd: fwd_streams", 763fdf20fa7SSergio Gonzalez Monroy sizeof(struct fwd_stream *) * nb_fwd_streams, RTE_CACHE_LINE_SIZE); 764ce8d5614SIntel if (fwd_streams == NULL) 765ce8d5614SIntel rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_stream *)) " 766ce8d5614SIntel "failed\n", nb_fwd_streams); 767ce8d5614SIntel 768af75078fSIntel for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) { 769af75078fSIntel fwd_streams[sm_id] = rte_zmalloc("testpmd: struct fwd_stream", 770fdf20fa7SSergio Gonzalez Monroy sizeof(struct fwd_stream), RTE_CACHE_LINE_SIZE); 771ce8d5614SIntel if (fwd_streams[sm_id] == NULL) 772ce8d5614SIntel rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_stream)" 773ce8d5614SIntel " failed\n"); 774af75078fSIntel } 775ce8d5614SIntel 776ce8d5614SIntel return 0; 777af75078fSIntel } 778af75078fSIntel 779af75078fSIntel #ifdef RTE_TEST_PMD_RECORD_BURST_STATS 780af75078fSIntel static void 781af75078fSIntel pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs) 782af75078fSIntel { 783af75078fSIntel unsigned int total_burst; 784af75078fSIntel unsigned int nb_burst; 785af75078fSIntel unsigned int burst_stats[3]; 786af75078fSIntel uint16_t pktnb_stats[3]; 787af75078fSIntel uint16_t nb_pkt; 788af75078fSIntel int burst_percent[3]; 789af75078fSIntel 790af75078fSIntel /* 791af75078fSIntel * First compute the total number of packet bursts and the 792af75078fSIntel * two highest numbers of bursts of the same number of packets. 793af75078fSIntel */ 794af75078fSIntel total_burst = 0; 795af75078fSIntel burst_stats[0] = burst_stats[1] = burst_stats[2] = 0; 796af75078fSIntel pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0; 797af75078fSIntel for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) { 798af75078fSIntel nb_burst = pbs->pkt_burst_spread[nb_pkt]; 799af75078fSIntel if (nb_burst == 0) 800af75078fSIntel continue; 801af75078fSIntel total_burst += nb_burst; 802af75078fSIntel if (nb_burst > burst_stats[0]) { 803af75078fSIntel burst_stats[1] = burst_stats[0]; 804af75078fSIntel pktnb_stats[1] = pktnb_stats[0]; 805af75078fSIntel burst_stats[0] = nb_burst; 806af75078fSIntel pktnb_stats[0] = nb_pkt; 807af75078fSIntel } 808af75078fSIntel } 809af75078fSIntel if (total_burst == 0) 810af75078fSIntel return; 811af75078fSIntel burst_percent[0] = (burst_stats[0] * 100) / total_burst; 812af75078fSIntel printf(" %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst, 813af75078fSIntel burst_percent[0], (int) pktnb_stats[0]); 814af75078fSIntel if (burst_stats[0] == total_burst) { 815af75078fSIntel printf("]\n"); 816af75078fSIntel return; 817af75078fSIntel } 818af75078fSIntel if (burst_stats[0] + burst_stats[1] == total_burst) { 819af75078fSIntel printf(" + %d%% of %d pkts]\n", 820af75078fSIntel 100 - burst_percent[0], pktnb_stats[1]); 821af75078fSIntel return; 822af75078fSIntel } 823af75078fSIntel burst_percent[1] = (burst_stats[1] * 100) / total_burst; 824af75078fSIntel burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]); 825af75078fSIntel if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) { 826af75078fSIntel printf(" + %d%% of others]\n", 100 - burst_percent[0]); 827af75078fSIntel return; 828af75078fSIntel } 829af75078fSIntel printf(" + %d%% of %d pkts + %d%% of others]\n", 830af75078fSIntel burst_percent[1], (int) pktnb_stats[1], burst_percent[2]); 831af75078fSIntel } 832af75078fSIntel #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */ 833af75078fSIntel 834af75078fSIntel static void 835af75078fSIntel fwd_port_stats_display(portid_t port_id, struct rte_eth_stats *stats) 836af75078fSIntel { 837af75078fSIntel struct rte_port *port; 838013af9b6SIntel uint8_t i; 839af75078fSIntel 840af75078fSIntel static const char *fwd_stats_border = "----------------------"; 841af75078fSIntel 842af75078fSIntel port = &ports[port_id]; 843af75078fSIntel printf("\n %s Forward statistics for port %-2d %s\n", 844af75078fSIntel fwd_stats_border, port_id, fwd_stats_border); 845013af9b6SIntel 846013af9b6SIntel if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) { 847af75078fSIntel printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: " 848af75078fSIntel "%-"PRIu64"\n", 84970bdb186SIvan Boule stats->ipackets, stats->imissed, 85070bdb186SIvan Boule (uint64_t) (stats->ipackets + stats->imissed)); 851af75078fSIntel 852af75078fSIntel if (cur_fwd_eng == &csum_fwd_engine) 853af75078fSIntel printf(" Bad-ipcsum: %-14"PRIu64" Bad-l4csum: %-14"PRIu64" \n", 854af75078fSIntel port->rx_bad_ip_csum, port->rx_bad_l4_csum); 85586057c99SIgor Ryzhov if ((stats->ierrors + stats->rx_nombuf) > 0) { 856f72a0fa6SStephen Hemminger printf(" RX-error: %-"PRIu64"\n", stats->ierrors); 85770bdb186SIvan Boule printf(" RX-nombufs: %-14"PRIu64"\n", stats->rx_nombuf); 85870bdb186SIvan Boule } 859af75078fSIntel 860af75078fSIntel printf(" TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: " 861af75078fSIntel "%-"PRIu64"\n", 862af75078fSIntel stats->opackets, port->tx_dropped, 863af75078fSIntel (uint64_t) (stats->opackets + port->tx_dropped)); 864013af9b6SIntel } 865013af9b6SIntel else { 866013af9b6SIntel printf(" RX-packets: %14"PRIu64" RX-dropped:%14"PRIu64" RX-total:" 867013af9b6SIntel "%14"PRIu64"\n", 86870bdb186SIvan Boule stats->ipackets, stats->imissed, 86970bdb186SIvan Boule (uint64_t) (stats->ipackets + stats->imissed)); 870013af9b6SIntel 871013af9b6SIntel if (cur_fwd_eng == &csum_fwd_engine) 872013af9b6SIntel printf(" Bad-ipcsum:%14"PRIu64" Bad-l4csum:%14"PRIu64"\n", 873013af9b6SIntel port->rx_bad_ip_csum, port->rx_bad_l4_csum); 87486057c99SIgor Ryzhov if ((stats->ierrors + stats->rx_nombuf) > 0) { 875f72a0fa6SStephen Hemminger printf(" RX-error:%"PRIu64"\n", stats->ierrors); 87670bdb186SIvan Boule printf(" RX-nombufs: %14"PRIu64"\n", 87770bdb186SIvan Boule stats->rx_nombuf); 87870bdb186SIvan Boule } 879013af9b6SIntel 880013af9b6SIntel printf(" TX-packets: %14"PRIu64" TX-dropped:%14"PRIu64" TX-total:" 881013af9b6SIntel "%14"PRIu64"\n", 882013af9b6SIntel stats->opackets, port->tx_dropped, 883013af9b6SIntel (uint64_t) (stats->opackets + port->tx_dropped)); 884013af9b6SIntel } 885e659b6b4SIvan Boule 886af75078fSIntel #ifdef RTE_TEST_PMD_RECORD_BURST_STATS 887af75078fSIntel if (port->rx_stream) 888013af9b6SIntel pkt_burst_stats_display("RX", 889013af9b6SIntel &port->rx_stream->rx_burst_stats); 890af75078fSIntel if (port->tx_stream) 891013af9b6SIntel pkt_burst_stats_display("TX", 892013af9b6SIntel &port->tx_stream->tx_burst_stats); 893af75078fSIntel #endif 894af75078fSIntel 895013af9b6SIntel if (port->rx_queue_stats_mapping_enabled) { 896013af9b6SIntel printf("\n"); 897013af9b6SIntel for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) { 898013af9b6SIntel printf(" Stats reg %2d RX-packets:%14"PRIu64 899013af9b6SIntel " RX-errors:%14"PRIu64 900013af9b6SIntel " RX-bytes:%14"PRIu64"\n", 901013af9b6SIntel i, stats->q_ipackets[i], stats->q_errors[i], stats->q_ibytes[i]); 902013af9b6SIntel } 903013af9b6SIntel printf("\n"); 904013af9b6SIntel } 905013af9b6SIntel if (port->tx_queue_stats_mapping_enabled) { 906013af9b6SIntel for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) { 907013af9b6SIntel printf(" Stats reg %2d TX-packets:%14"PRIu64 908013af9b6SIntel " TX-bytes:%14"PRIu64"\n", 909013af9b6SIntel i, stats->q_opackets[i], stats->q_obytes[i]); 910013af9b6SIntel } 911013af9b6SIntel } 912013af9b6SIntel 913af75078fSIntel printf(" %s--------------------------------%s\n", 914af75078fSIntel fwd_stats_border, fwd_stats_border); 915af75078fSIntel } 916af75078fSIntel 917af75078fSIntel static void 918af75078fSIntel fwd_stream_stats_display(streamid_t stream_id) 919af75078fSIntel { 920af75078fSIntel struct fwd_stream *fs; 921af75078fSIntel static const char *fwd_top_stats_border = "-------"; 922af75078fSIntel 923af75078fSIntel fs = fwd_streams[stream_id]; 924af75078fSIntel if ((fs->rx_packets == 0) && (fs->tx_packets == 0) && 925af75078fSIntel (fs->fwd_dropped == 0)) 926af75078fSIntel return; 927af75078fSIntel printf("\n %s Forward Stats for RX Port=%2d/Queue=%2d -> " 928af75078fSIntel "TX Port=%2d/Queue=%2d %s\n", 929af75078fSIntel fwd_top_stats_border, fs->rx_port, fs->rx_queue, 930af75078fSIntel fs->tx_port, fs->tx_queue, fwd_top_stats_border); 931af75078fSIntel printf(" RX-packets: %-14u TX-packets: %-14u TX-dropped: %-14u", 932af75078fSIntel fs->rx_packets, fs->tx_packets, fs->fwd_dropped); 933af75078fSIntel 934af75078fSIntel /* if checksum mode */ 935af75078fSIntel if (cur_fwd_eng == &csum_fwd_engine) { 936013af9b6SIntel printf(" RX- bad IP checksum: %-14u Rx- bad L4 checksum: " 937013af9b6SIntel "%-14u\n", fs->rx_bad_ip_csum, fs->rx_bad_l4_csum); 938af75078fSIntel } 939af75078fSIntel 940af75078fSIntel #ifdef RTE_TEST_PMD_RECORD_BURST_STATS 941af75078fSIntel pkt_burst_stats_display("RX", &fs->rx_burst_stats); 942af75078fSIntel pkt_burst_stats_display("TX", &fs->tx_burst_stats); 943af75078fSIntel #endif 944af75078fSIntel } 945af75078fSIntel 946af75078fSIntel static void 9477741e4cfSIntel flush_fwd_rx_queues(void) 948af75078fSIntel { 949af75078fSIntel struct rte_mbuf *pkts_burst[MAX_PKT_BURST]; 950af75078fSIntel portid_t rxp; 9517741e4cfSIntel portid_t port_id; 952af75078fSIntel queueid_t rxq; 953af75078fSIntel uint16_t nb_rx; 954af75078fSIntel uint16_t i; 955af75078fSIntel uint8_t j; 956f487715fSReshma Pattan uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0; 957594302c7SJames Poole uint64_t timer_period; 958f487715fSReshma Pattan 959f487715fSReshma Pattan /* convert to number of cycles */ 960594302c7SJames Poole timer_period = rte_get_timer_hz(); /* 1 second timeout */ 961af75078fSIntel 962af75078fSIntel for (j = 0; j < 2; j++) { 9637741e4cfSIntel for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) { 964af75078fSIntel for (rxq = 0; rxq < nb_rxq; rxq++) { 9657741e4cfSIntel port_id = fwd_ports_ids[rxp]; 966f487715fSReshma Pattan /** 967f487715fSReshma Pattan * testpmd can stuck in the below do while loop 968f487715fSReshma Pattan * if rte_eth_rx_burst() always returns nonzero 969f487715fSReshma Pattan * packets. So timer is added to exit this loop 970f487715fSReshma Pattan * after 1sec timer expiry. 971f487715fSReshma Pattan */ 972f487715fSReshma Pattan prev_tsc = rte_rdtsc(); 973af75078fSIntel do { 9747741e4cfSIntel nb_rx = rte_eth_rx_burst(port_id, rxq, 975013af9b6SIntel pkts_burst, MAX_PKT_BURST); 976af75078fSIntel for (i = 0; i < nb_rx; i++) 977af75078fSIntel rte_pktmbuf_free(pkts_burst[i]); 978f487715fSReshma Pattan 979f487715fSReshma Pattan cur_tsc = rte_rdtsc(); 980f487715fSReshma Pattan diff_tsc = cur_tsc - prev_tsc; 981f487715fSReshma Pattan timer_tsc += diff_tsc; 982f487715fSReshma Pattan } while ((nb_rx > 0) && 983f487715fSReshma Pattan (timer_tsc < timer_period)); 984f487715fSReshma Pattan timer_tsc = 0; 985af75078fSIntel } 986af75078fSIntel } 987af75078fSIntel rte_delay_ms(10); /* wait 10 milli-seconds before retrying */ 988af75078fSIntel } 989af75078fSIntel } 990af75078fSIntel 991af75078fSIntel static void 992af75078fSIntel run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd) 993af75078fSIntel { 994af75078fSIntel struct fwd_stream **fsm; 995af75078fSIntel streamid_t nb_fs; 996af75078fSIntel streamid_t sm_id; 9977e4441c8SRemy Horton #ifdef RTE_LIBRTE_BITRATE 9987e4441c8SRemy Horton uint64_t tics_per_1sec; 9997e4441c8SRemy Horton uint64_t tics_datum; 10007e4441c8SRemy Horton uint64_t tics_current; 10017e4441c8SRemy Horton uint8_t idx_port, cnt_ports; 1002af75078fSIntel 10037e4441c8SRemy Horton cnt_ports = rte_eth_dev_count(); 10047e4441c8SRemy Horton tics_datum = rte_rdtsc(); 10057e4441c8SRemy Horton tics_per_1sec = rte_get_timer_hz(); 10067e4441c8SRemy Horton #endif 1007af75078fSIntel fsm = &fwd_streams[fc->stream_idx]; 1008af75078fSIntel nb_fs = fc->stream_nb; 1009af75078fSIntel do { 1010af75078fSIntel for (sm_id = 0; sm_id < nb_fs; sm_id++) 1011af75078fSIntel (*pkt_fwd)(fsm[sm_id]); 10127e4441c8SRemy Horton #ifdef RTE_LIBRTE_BITRATE 1013e25e6c70SRemy Horton if (bitrate_enabled != 0 && 1014e25e6c70SRemy Horton bitrate_lcore_id == rte_lcore_id()) { 10157e4441c8SRemy Horton tics_current = rte_rdtsc(); 10167e4441c8SRemy Horton if (tics_current - tics_datum >= tics_per_1sec) { 10177e4441c8SRemy Horton /* Periodic bitrate calculation */ 1018e25e6c70SRemy Horton for (idx_port = 0; 1019e25e6c70SRemy Horton idx_port < cnt_ports; 1020e25e6c70SRemy Horton idx_port++) 1021e25e6c70SRemy Horton rte_stats_bitrate_calc(bitrate_data, 1022e25e6c70SRemy Horton idx_port); 10237e4441c8SRemy Horton tics_datum = tics_current; 10247e4441c8SRemy Horton } 1025e25e6c70SRemy Horton } 10267e4441c8SRemy Horton #endif 102762d3216dSReshma Pattan #ifdef RTE_LIBRTE_LATENCY_STATS 102865eb1e54SPablo de Lara if (latencystats_enabled != 0 && 102965eb1e54SPablo de Lara latencystats_lcore_id == rte_lcore_id()) 103062d3216dSReshma Pattan rte_latencystats_update(); 103162d3216dSReshma Pattan #endif 103262d3216dSReshma Pattan 1033af75078fSIntel } while (! fc->stopped); 1034af75078fSIntel } 1035af75078fSIntel 1036af75078fSIntel static int 1037af75078fSIntel start_pkt_forward_on_core(void *fwd_arg) 1038af75078fSIntel { 1039af75078fSIntel run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg, 1040af75078fSIntel cur_fwd_config.fwd_eng->packet_fwd); 1041af75078fSIntel return 0; 1042af75078fSIntel } 1043af75078fSIntel 1044af75078fSIntel /* 1045af75078fSIntel * Run the TXONLY packet forwarding engine to send a single burst of packets. 1046af75078fSIntel * Used to start communication flows in network loopback test configurations. 1047af75078fSIntel */ 1048af75078fSIntel static int 1049af75078fSIntel run_one_txonly_burst_on_core(void *fwd_arg) 1050af75078fSIntel { 1051af75078fSIntel struct fwd_lcore *fwd_lc; 1052af75078fSIntel struct fwd_lcore tmp_lcore; 1053af75078fSIntel 1054af75078fSIntel fwd_lc = (struct fwd_lcore *) fwd_arg; 1055af75078fSIntel tmp_lcore = *fwd_lc; 1056af75078fSIntel tmp_lcore.stopped = 1; 1057af75078fSIntel run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd); 1058af75078fSIntel return 0; 1059af75078fSIntel } 1060af75078fSIntel 1061af75078fSIntel /* 1062af75078fSIntel * Launch packet forwarding: 1063af75078fSIntel * - Setup per-port forwarding context. 1064af75078fSIntel * - launch logical cores with their forwarding configuration. 1065af75078fSIntel */ 1066af75078fSIntel static void 1067af75078fSIntel launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore) 1068af75078fSIntel { 1069af75078fSIntel port_fwd_begin_t port_fwd_begin; 1070af75078fSIntel unsigned int i; 1071af75078fSIntel unsigned int lc_id; 1072af75078fSIntel int diag; 1073af75078fSIntel 1074af75078fSIntel port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin; 1075af75078fSIntel if (port_fwd_begin != NULL) { 1076af75078fSIntel for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) 1077af75078fSIntel (*port_fwd_begin)(fwd_ports_ids[i]); 1078af75078fSIntel } 1079af75078fSIntel for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) { 1080af75078fSIntel lc_id = fwd_lcores_cpuids[i]; 1081af75078fSIntel if ((interactive == 0) || (lc_id != rte_lcore_id())) { 1082af75078fSIntel fwd_lcores[i]->stopped = 0; 1083af75078fSIntel diag = rte_eal_remote_launch(pkt_fwd_on_lcore, 1084af75078fSIntel fwd_lcores[i], lc_id); 1085af75078fSIntel if (diag != 0) 1086af75078fSIntel printf("launch lcore %u failed - diag=%d\n", 1087af75078fSIntel lc_id, diag); 1088af75078fSIntel } 1089af75078fSIntel } 1090af75078fSIntel } 1091af75078fSIntel 1092af75078fSIntel /* 1093af75078fSIntel * Launch packet forwarding configuration. 1094af75078fSIntel */ 1095af75078fSIntel void 1096af75078fSIntel start_packet_forwarding(int with_tx_first) 1097af75078fSIntel { 1098af75078fSIntel port_fwd_begin_t port_fwd_begin; 1099af75078fSIntel port_fwd_end_t port_fwd_end; 1100af75078fSIntel struct rte_port *port; 1101af75078fSIntel unsigned int i; 1102af75078fSIntel portid_t pt_id; 1103af75078fSIntel streamid_t sm_id; 1104af75078fSIntel 11055a8fb55cSReshma Pattan if (strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") == 0 && !nb_rxq) 11065a8fb55cSReshma Pattan rte_exit(EXIT_FAILURE, "rxq are 0, cannot use rxonly fwd mode\n"); 11075a8fb55cSReshma Pattan 11085a8fb55cSReshma Pattan if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 && !nb_txq) 11095a8fb55cSReshma Pattan rte_exit(EXIT_FAILURE, "txq are 0, cannot use txonly fwd mode\n"); 11105a8fb55cSReshma Pattan 11115a8fb55cSReshma Pattan if ((strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") != 0 && 11125a8fb55cSReshma Pattan strcmp(cur_fwd_eng->fwd_mode_name, "txonly") != 0) && 11135a8fb55cSReshma Pattan (!nb_rxq || !nb_txq)) 11145a8fb55cSReshma Pattan rte_exit(EXIT_FAILURE, 11155a8fb55cSReshma Pattan "Either rxq or txq are 0, cannot use %s fwd mode\n", 11165a8fb55cSReshma Pattan cur_fwd_eng->fwd_mode_name); 11175a8fb55cSReshma Pattan 1118ce8d5614SIntel if (all_ports_started() == 0) { 1119ce8d5614SIntel printf("Not all ports were started\n"); 1120ce8d5614SIntel return; 1121ce8d5614SIntel } 1122af75078fSIntel if (test_done == 0) { 1123af75078fSIntel printf("Packet forwarding already started\n"); 1124af75078fSIntel return; 1125af75078fSIntel } 1126edf87b4aSBernard Iremonger 1127edf87b4aSBernard Iremonger if (init_fwd_streams() < 0) { 1128edf87b4aSBernard Iremonger printf("Fail from init_fwd_streams()\n"); 1129edf87b4aSBernard Iremonger return; 1130edf87b4aSBernard Iremonger } 1131edf87b4aSBernard Iremonger 11327741e4cfSIntel if(dcb_test) { 11337741e4cfSIntel for (i = 0; i < nb_fwd_ports; i++) { 11347741e4cfSIntel pt_id = fwd_ports_ids[i]; 11357741e4cfSIntel port = &ports[pt_id]; 11367741e4cfSIntel if (!port->dcb_flag) { 11377741e4cfSIntel printf("In DCB mode, all forwarding ports must " 11387741e4cfSIntel "be configured in this mode.\n"); 1139013af9b6SIntel return; 1140013af9b6SIntel } 11417741e4cfSIntel } 11427741e4cfSIntel if (nb_fwd_lcores == 1) { 11437741e4cfSIntel printf("In DCB mode,the nb forwarding cores " 11447741e4cfSIntel "should be larger than 1.\n"); 11457741e4cfSIntel return; 11467741e4cfSIntel } 11477741e4cfSIntel } 1148af75078fSIntel test_done = 0; 11497741e4cfSIntel 11507741e4cfSIntel if(!no_flush_rx) 11517741e4cfSIntel flush_fwd_rx_queues(); 11527741e4cfSIntel 1153af75078fSIntel fwd_config_setup(); 1154933617d8SZhihong Wang pkt_fwd_config_display(&cur_fwd_config); 1155af75078fSIntel rxtx_config_display(); 1156af75078fSIntel 1157af75078fSIntel for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) { 1158af75078fSIntel pt_id = fwd_ports_ids[i]; 1159af75078fSIntel port = &ports[pt_id]; 1160af75078fSIntel rte_eth_stats_get(pt_id, &port->stats); 1161af75078fSIntel port->tx_dropped = 0; 1162013af9b6SIntel 1163013af9b6SIntel map_port_queue_stats_mapping_registers(pt_id, port); 1164af75078fSIntel } 1165af75078fSIntel for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) { 1166af75078fSIntel fwd_streams[sm_id]->rx_packets = 0; 1167af75078fSIntel fwd_streams[sm_id]->tx_packets = 0; 1168af75078fSIntel fwd_streams[sm_id]->fwd_dropped = 0; 1169af75078fSIntel fwd_streams[sm_id]->rx_bad_ip_csum = 0; 1170af75078fSIntel fwd_streams[sm_id]->rx_bad_l4_csum = 0; 1171af75078fSIntel 1172af75078fSIntel #ifdef RTE_TEST_PMD_RECORD_BURST_STATS 1173af75078fSIntel memset(&fwd_streams[sm_id]->rx_burst_stats, 0, 1174af75078fSIntel sizeof(fwd_streams[sm_id]->rx_burst_stats)); 1175af75078fSIntel memset(&fwd_streams[sm_id]->tx_burst_stats, 0, 1176af75078fSIntel sizeof(fwd_streams[sm_id]->tx_burst_stats)); 1177af75078fSIntel #endif 1178af75078fSIntel #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES 1179af75078fSIntel fwd_streams[sm_id]->core_cycles = 0; 1180af75078fSIntel #endif 1181af75078fSIntel } 1182af75078fSIntel if (with_tx_first) { 1183af75078fSIntel port_fwd_begin = tx_only_engine.port_fwd_begin; 1184af75078fSIntel if (port_fwd_begin != NULL) { 1185af75078fSIntel for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) 1186af75078fSIntel (*port_fwd_begin)(fwd_ports_ids[i]); 1187af75078fSIntel } 1188acbf77a6SZhihong Wang while (with_tx_first--) { 1189acbf77a6SZhihong Wang launch_packet_forwarding( 1190acbf77a6SZhihong Wang run_one_txonly_burst_on_core); 1191af75078fSIntel rte_eal_mp_wait_lcore(); 1192acbf77a6SZhihong Wang } 1193af75078fSIntel port_fwd_end = tx_only_engine.port_fwd_end; 1194af75078fSIntel if (port_fwd_end != NULL) { 1195af75078fSIntel for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) 1196af75078fSIntel (*port_fwd_end)(fwd_ports_ids[i]); 1197af75078fSIntel } 1198af75078fSIntel } 1199af75078fSIntel launch_packet_forwarding(start_pkt_forward_on_core); 1200af75078fSIntel } 1201af75078fSIntel 1202af75078fSIntel void 1203af75078fSIntel stop_packet_forwarding(void) 1204af75078fSIntel { 1205af75078fSIntel struct rte_eth_stats stats; 1206af75078fSIntel struct rte_port *port; 1207af75078fSIntel port_fwd_end_t port_fwd_end; 1208af75078fSIntel int i; 1209af75078fSIntel portid_t pt_id; 1210af75078fSIntel streamid_t sm_id; 1211af75078fSIntel lcoreid_t lc_id; 1212af75078fSIntel uint64_t total_recv; 1213af75078fSIntel uint64_t total_xmit; 1214af75078fSIntel uint64_t total_rx_dropped; 1215af75078fSIntel uint64_t total_tx_dropped; 1216af75078fSIntel uint64_t total_rx_nombuf; 1217af75078fSIntel uint64_t tx_dropped; 1218af75078fSIntel uint64_t rx_bad_ip_csum; 1219af75078fSIntel uint64_t rx_bad_l4_csum; 1220af75078fSIntel #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES 1221af75078fSIntel uint64_t fwd_cycles; 1222af75078fSIntel #endif 1223b7091f1dSJiayu Hu 1224af75078fSIntel static const char *acc_stats_border = "+++++++++++++++"; 1225af75078fSIntel 1226af75078fSIntel if (test_done) { 1227af75078fSIntel printf("Packet forwarding not started\n"); 1228af75078fSIntel return; 1229af75078fSIntel } 1230af75078fSIntel printf("Telling cores to stop..."); 1231af75078fSIntel for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) 1232af75078fSIntel fwd_lcores[lc_id]->stopped = 1; 1233af75078fSIntel printf("\nWaiting for lcores to finish...\n"); 1234af75078fSIntel rte_eal_mp_wait_lcore(); 1235af75078fSIntel port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end; 1236af75078fSIntel if (port_fwd_end != NULL) { 1237af75078fSIntel for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) { 1238af75078fSIntel pt_id = fwd_ports_ids[i]; 1239af75078fSIntel (*port_fwd_end)(pt_id); 1240af75078fSIntel } 1241af75078fSIntel } 1242af75078fSIntel #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES 1243af75078fSIntel fwd_cycles = 0; 1244af75078fSIntel #endif 1245af75078fSIntel for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) { 1246af75078fSIntel if (cur_fwd_config.nb_fwd_streams > 1247af75078fSIntel cur_fwd_config.nb_fwd_ports) { 1248af75078fSIntel fwd_stream_stats_display(sm_id); 1249af75078fSIntel ports[fwd_streams[sm_id]->tx_port].tx_stream = NULL; 1250af75078fSIntel ports[fwd_streams[sm_id]->rx_port].rx_stream = NULL; 1251af75078fSIntel } else { 1252af75078fSIntel ports[fwd_streams[sm_id]->tx_port].tx_stream = 1253af75078fSIntel fwd_streams[sm_id]; 1254af75078fSIntel ports[fwd_streams[sm_id]->rx_port].rx_stream = 1255af75078fSIntel fwd_streams[sm_id]; 1256af75078fSIntel } 1257af75078fSIntel tx_dropped = ports[fwd_streams[sm_id]->tx_port].tx_dropped; 1258af75078fSIntel tx_dropped = (uint64_t) (tx_dropped + 1259af75078fSIntel fwd_streams[sm_id]->fwd_dropped); 1260af75078fSIntel ports[fwd_streams[sm_id]->tx_port].tx_dropped = tx_dropped; 1261af75078fSIntel 1262013af9b6SIntel rx_bad_ip_csum = 1263013af9b6SIntel ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum; 1264af75078fSIntel rx_bad_ip_csum = (uint64_t) (rx_bad_ip_csum + 1265af75078fSIntel fwd_streams[sm_id]->rx_bad_ip_csum); 1266013af9b6SIntel ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum = 1267013af9b6SIntel rx_bad_ip_csum; 1268af75078fSIntel 1269013af9b6SIntel rx_bad_l4_csum = 1270013af9b6SIntel ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum; 1271af75078fSIntel rx_bad_l4_csum = (uint64_t) (rx_bad_l4_csum + 1272af75078fSIntel fwd_streams[sm_id]->rx_bad_l4_csum); 1273013af9b6SIntel ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum = 1274013af9b6SIntel rx_bad_l4_csum; 1275af75078fSIntel 1276af75078fSIntel #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES 1277af75078fSIntel fwd_cycles = (uint64_t) (fwd_cycles + 1278af75078fSIntel fwd_streams[sm_id]->core_cycles); 1279af75078fSIntel #endif 1280af75078fSIntel } 1281af75078fSIntel total_recv = 0; 1282af75078fSIntel total_xmit = 0; 1283af75078fSIntel total_rx_dropped = 0; 1284af75078fSIntel total_tx_dropped = 0; 1285af75078fSIntel total_rx_nombuf = 0; 12867741e4cfSIntel for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) { 1287af75078fSIntel pt_id = fwd_ports_ids[i]; 1288af75078fSIntel 1289af75078fSIntel port = &ports[pt_id]; 1290af75078fSIntel rte_eth_stats_get(pt_id, &stats); 1291af75078fSIntel stats.ipackets -= port->stats.ipackets; 1292af75078fSIntel port->stats.ipackets = 0; 1293af75078fSIntel stats.opackets -= port->stats.opackets; 1294af75078fSIntel port->stats.opackets = 0; 1295af75078fSIntel stats.ibytes -= port->stats.ibytes; 1296af75078fSIntel port->stats.ibytes = 0; 1297af75078fSIntel stats.obytes -= port->stats.obytes; 1298af75078fSIntel port->stats.obytes = 0; 129970bdb186SIvan Boule stats.imissed -= port->stats.imissed; 130070bdb186SIvan Boule port->stats.imissed = 0; 1301af75078fSIntel stats.oerrors -= port->stats.oerrors; 1302af75078fSIntel port->stats.oerrors = 0; 1303af75078fSIntel stats.rx_nombuf -= port->stats.rx_nombuf; 1304af75078fSIntel port->stats.rx_nombuf = 0; 1305af75078fSIntel 1306af75078fSIntel total_recv += stats.ipackets; 1307af75078fSIntel total_xmit += stats.opackets; 130870bdb186SIvan Boule total_rx_dropped += stats.imissed; 1309af75078fSIntel total_tx_dropped += port->tx_dropped; 1310af75078fSIntel total_rx_nombuf += stats.rx_nombuf; 1311af75078fSIntel 1312af75078fSIntel fwd_port_stats_display(pt_id, &stats); 1313af75078fSIntel } 1314b7091f1dSJiayu Hu 1315af75078fSIntel printf("\n %s Accumulated forward statistics for all ports" 1316af75078fSIntel "%s\n", 1317af75078fSIntel acc_stats_border, acc_stats_border); 1318af75078fSIntel printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: " 1319af75078fSIntel "%-"PRIu64"\n" 1320af75078fSIntel " TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: " 1321af75078fSIntel "%-"PRIu64"\n", 1322af75078fSIntel total_recv, total_rx_dropped, total_recv + total_rx_dropped, 1323af75078fSIntel total_xmit, total_tx_dropped, total_xmit + total_tx_dropped); 1324af75078fSIntel if (total_rx_nombuf > 0) 1325af75078fSIntel printf(" RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf); 1326af75078fSIntel printf(" %s++++++++++++++++++++++++++++++++++++++++++++++" 1327af75078fSIntel "%s\n", 1328af75078fSIntel acc_stats_border, acc_stats_border); 1329af75078fSIntel #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES 1330af75078fSIntel if (total_recv > 0) 1331af75078fSIntel printf("\n CPU cycles/packet=%u (total cycles=" 1332af75078fSIntel "%"PRIu64" / total RX packets=%"PRIu64")\n", 1333af75078fSIntel (unsigned int)(fwd_cycles / total_recv), 1334af75078fSIntel fwd_cycles, total_recv); 1335af75078fSIntel #endif 1336af75078fSIntel printf("\nDone.\n"); 1337af75078fSIntel test_done = 1; 1338af75078fSIntel } 1339af75078fSIntel 1340cfae07fdSOuyang Changchun void 1341cfae07fdSOuyang Changchun dev_set_link_up(portid_t pid) 1342cfae07fdSOuyang Changchun { 1343492ab604SZhiyong Yang if (rte_eth_dev_set_link_up(pid) < 0) 1344cfae07fdSOuyang Changchun printf("\nSet link up fail.\n"); 1345cfae07fdSOuyang Changchun } 1346cfae07fdSOuyang Changchun 1347cfae07fdSOuyang Changchun void 1348cfae07fdSOuyang Changchun dev_set_link_down(portid_t pid) 1349cfae07fdSOuyang Changchun { 1350492ab604SZhiyong Yang if (rte_eth_dev_set_link_down(pid) < 0) 1351cfae07fdSOuyang Changchun printf("\nSet link down fail.\n"); 1352cfae07fdSOuyang Changchun } 1353cfae07fdSOuyang Changchun 1354ce8d5614SIntel static int 1355ce8d5614SIntel all_ports_started(void) 1356ce8d5614SIntel { 1357ce8d5614SIntel portid_t pi; 1358ce8d5614SIntel struct rte_port *port; 1359ce8d5614SIntel 13607d89b261SGaetan Rivet RTE_ETH_FOREACH_DEV(pi) { 1361ce8d5614SIntel port = &ports[pi]; 1362ce8d5614SIntel /* Check if there is a port which is not started */ 136341b05095SBernard Iremonger if ((port->port_status != RTE_PORT_STARTED) && 136441b05095SBernard Iremonger (port->slave_flag == 0)) 1365ce8d5614SIntel return 0; 1366ce8d5614SIntel } 1367ce8d5614SIntel 1368ce8d5614SIntel /* No port is not started */ 1369ce8d5614SIntel return 1; 1370ce8d5614SIntel } 1371ce8d5614SIntel 1372148f963fSBruce Richardson int 13736018eb8cSShahaf Shuler port_is_stopped(portid_t port_id) 13746018eb8cSShahaf Shuler { 13756018eb8cSShahaf Shuler struct rte_port *port = &ports[port_id]; 13766018eb8cSShahaf Shuler 13776018eb8cSShahaf Shuler if ((port->port_status != RTE_PORT_STOPPED) && 13786018eb8cSShahaf Shuler (port->slave_flag == 0)) 13796018eb8cSShahaf Shuler return 0; 13806018eb8cSShahaf Shuler return 1; 13816018eb8cSShahaf Shuler } 13826018eb8cSShahaf Shuler 13836018eb8cSShahaf Shuler int 1384edab33b1STetsuya Mukawa all_ports_stopped(void) 1385edab33b1STetsuya Mukawa { 1386edab33b1STetsuya Mukawa portid_t pi; 1387edab33b1STetsuya Mukawa 13887d89b261SGaetan Rivet RTE_ETH_FOREACH_DEV(pi) { 13896018eb8cSShahaf Shuler if (!port_is_stopped(pi)) 1390edab33b1STetsuya Mukawa return 0; 1391edab33b1STetsuya Mukawa } 1392edab33b1STetsuya Mukawa 1393edab33b1STetsuya Mukawa return 1; 1394edab33b1STetsuya Mukawa } 1395edab33b1STetsuya Mukawa 1396edab33b1STetsuya Mukawa int 1397edab33b1STetsuya Mukawa port_is_started(portid_t port_id) 1398edab33b1STetsuya Mukawa { 1399edab33b1STetsuya Mukawa if (port_id_is_invalid(port_id, ENABLED_WARN)) 1400edab33b1STetsuya Mukawa return 0; 1401edab33b1STetsuya Mukawa 1402edab33b1STetsuya Mukawa if (ports[port_id].port_status != RTE_PORT_STARTED) 1403edab33b1STetsuya Mukawa return 0; 1404edab33b1STetsuya Mukawa 1405edab33b1STetsuya Mukawa return 1; 1406edab33b1STetsuya Mukawa } 1407edab33b1STetsuya Mukawa 1408edab33b1STetsuya Mukawa static int 1409edab33b1STetsuya Mukawa port_is_closed(portid_t port_id) 1410edab33b1STetsuya Mukawa { 1411edab33b1STetsuya Mukawa if (port_id_is_invalid(port_id, ENABLED_WARN)) 1412edab33b1STetsuya Mukawa return 0; 1413edab33b1STetsuya Mukawa 1414edab33b1STetsuya Mukawa if (ports[port_id].port_status != RTE_PORT_CLOSED) 1415edab33b1STetsuya Mukawa return 0; 1416edab33b1STetsuya Mukawa 1417edab33b1STetsuya Mukawa return 1; 1418edab33b1STetsuya Mukawa } 1419edab33b1STetsuya Mukawa 1420edab33b1STetsuya Mukawa int 1421ce8d5614SIntel start_port(portid_t pid) 1422ce8d5614SIntel { 142392d2703eSMichael Qiu int diag, need_check_link_status = -1; 1424ce8d5614SIntel portid_t pi; 1425ce8d5614SIntel queueid_t qi; 1426ce8d5614SIntel struct rte_port *port; 14272950a769SDeclan Doherty struct ether_addr mac_addr; 142876ad4a2dSGaetan Rivet enum rte_eth_event_type event_type; 1429ce8d5614SIntel 14304468635fSMichael Qiu if (port_id_is_invalid(pid, ENABLED_WARN)) 14314468635fSMichael Qiu return 0; 14324468635fSMichael Qiu 1433ce8d5614SIntel if(dcb_config) 1434ce8d5614SIntel dcb_test = 1; 14357d89b261SGaetan Rivet RTE_ETH_FOREACH_DEV(pi) { 1436edab33b1STetsuya Mukawa if (pid != pi && pid != (portid_t)RTE_PORT_ALL) 1437ce8d5614SIntel continue; 1438ce8d5614SIntel 143992d2703eSMichael Qiu need_check_link_status = 0; 1440ce8d5614SIntel port = &ports[pi]; 1441ce8d5614SIntel if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED, 1442ce8d5614SIntel RTE_PORT_HANDLING) == 0) { 1443ce8d5614SIntel printf("Port %d is now not stopped\n", pi); 1444ce8d5614SIntel continue; 1445ce8d5614SIntel } 1446ce8d5614SIntel 1447ce8d5614SIntel if (port->need_reconfig > 0) { 1448ce8d5614SIntel port->need_reconfig = 0; 1449ce8d5614SIntel 14507ee3e944SVasily Philipov if (flow_isolate_all) { 14517ee3e944SVasily Philipov int ret = port_flow_isolate(pi, 1); 14527ee3e944SVasily Philipov if (ret) { 14537ee3e944SVasily Philipov printf("Failed to apply isolated" 14547ee3e944SVasily Philipov " mode on port %d\n", pi); 14557ee3e944SVasily Philipov return -1; 14567ee3e944SVasily Philipov } 14577ee3e944SVasily Philipov } 14587ee3e944SVasily Philipov 14595706de65SJulien Cretin printf("Configuring Port %d (socket %u)\n", pi, 146020a0286fSLiu Xiaofeng port->socket_id); 1461ce8d5614SIntel /* configure port */ 1462ce8d5614SIntel diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq, 1463ce8d5614SIntel &(port->dev_conf)); 1464ce8d5614SIntel if (diag != 0) { 1465ce8d5614SIntel if (rte_atomic16_cmpset(&(port->port_status), 1466ce8d5614SIntel RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0) 1467ce8d5614SIntel printf("Port %d can not be set back " 1468ce8d5614SIntel "to stopped\n", pi); 1469ce8d5614SIntel printf("Fail to configure port %d\n", pi); 1470ce8d5614SIntel /* try to reconfigure port next time */ 1471ce8d5614SIntel port->need_reconfig = 1; 1472148f963fSBruce Richardson return -1; 1473ce8d5614SIntel } 1474ce8d5614SIntel } 1475ce8d5614SIntel if (port->need_reconfig_queues > 0) { 1476ce8d5614SIntel port->need_reconfig_queues = 0; 1477597f9fafSShahaf Shuler port->tx_conf.txq_flags = ETH_TXQ_FLAGS_IGNORE; 1478597f9fafSShahaf Shuler /* Apply Tx offloads configuration */ 1479597f9fafSShahaf Shuler port->tx_conf.offloads = port->dev_conf.txmode.offloads; 1480ce8d5614SIntel /* setup tx queues */ 1481ce8d5614SIntel for (qi = 0; qi < nb_txq; qi++) { 1482b6ea6408SIntel if ((numa_support) && 1483b6ea6408SIntel (txring_numa[pi] != NUMA_NO_CONFIG)) 1484b6ea6408SIntel diag = rte_eth_tx_queue_setup(pi, qi, 1485b6ea6408SIntel nb_txd,txring_numa[pi], 1486b6ea6408SIntel &(port->tx_conf)); 1487b6ea6408SIntel else 1488b6ea6408SIntel diag = rte_eth_tx_queue_setup(pi, qi, 1489b6ea6408SIntel nb_txd,port->socket_id, 1490b6ea6408SIntel &(port->tx_conf)); 1491b6ea6408SIntel 1492ce8d5614SIntel if (diag == 0) 1493ce8d5614SIntel continue; 1494ce8d5614SIntel 1495ce8d5614SIntel /* Fail to setup tx queue, return */ 1496ce8d5614SIntel if (rte_atomic16_cmpset(&(port->port_status), 1497ce8d5614SIntel RTE_PORT_HANDLING, 1498ce8d5614SIntel RTE_PORT_STOPPED) == 0) 1499ce8d5614SIntel printf("Port %d can not be set back " 1500ce8d5614SIntel "to stopped\n", pi); 1501ce8d5614SIntel printf("Fail to configure port %d tx queues\n", pi); 1502ce8d5614SIntel /* try to reconfigure queues next time */ 1503ce8d5614SIntel port->need_reconfig_queues = 1; 1504148f963fSBruce Richardson return -1; 1505ce8d5614SIntel } 15060074d02fSShahaf Shuler /* Apply Rx offloads configuration */ 15070074d02fSShahaf Shuler port->rx_conf.offloads = port->dev_conf.rxmode.offloads; 1508ce8d5614SIntel /* setup rx queues */ 1509ce8d5614SIntel for (qi = 0; qi < nb_rxq; qi++) { 1510b6ea6408SIntel if ((numa_support) && 1511b6ea6408SIntel (rxring_numa[pi] != NUMA_NO_CONFIG)) { 1512b6ea6408SIntel struct rte_mempool * mp = 1513b6ea6408SIntel mbuf_pool_find(rxring_numa[pi]); 1514b6ea6408SIntel if (mp == NULL) { 1515b6ea6408SIntel printf("Failed to setup RX queue:" 1516b6ea6408SIntel "No mempool allocation" 1517b6ea6408SIntel " on the socket %d\n", 1518b6ea6408SIntel rxring_numa[pi]); 1519148f963fSBruce Richardson return -1; 1520b6ea6408SIntel } 1521b6ea6408SIntel 1522b6ea6408SIntel diag = rte_eth_rx_queue_setup(pi, qi, 1523b6ea6408SIntel nb_rxd,rxring_numa[pi], 1524b6ea6408SIntel &(port->rx_conf),mp); 15251e1d6bddSBernard Iremonger } else { 15261e1d6bddSBernard Iremonger struct rte_mempool *mp = 15271e1d6bddSBernard Iremonger mbuf_pool_find(port->socket_id); 15281e1d6bddSBernard Iremonger if (mp == NULL) { 15291e1d6bddSBernard Iremonger printf("Failed to setup RX queue:" 15301e1d6bddSBernard Iremonger "No mempool allocation" 15311e1d6bddSBernard Iremonger " on the socket %d\n", 15321e1d6bddSBernard Iremonger port->socket_id); 15331e1d6bddSBernard Iremonger return -1; 1534b6ea6408SIntel } 1535b6ea6408SIntel diag = rte_eth_rx_queue_setup(pi, qi, 1536b6ea6408SIntel nb_rxd,port->socket_id, 15371e1d6bddSBernard Iremonger &(port->rx_conf), mp); 15381e1d6bddSBernard Iremonger } 1539ce8d5614SIntel if (diag == 0) 1540ce8d5614SIntel continue; 1541ce8d5614SIntel 1542ce8d5614SIntel /* Fail to setup rx queue, return */ 1543ce8d5614SIntel if (rte_atomic16_cmpset(&(port->port_status), 1544ce8d5614SIntel RTE_PORT_HANDLING, 1545ce8d5614SIntel RTE_PORT_STOPPED) == 0) 1546ce8d5614SIntel printf("Port %d can not be set back " 1547ce8d5614SIntel "to stopped\n", pi); 1548ce8d5614SIntel printf("Fail to configure port %d rx queues\n", pi); 1549ce8d5614SIntel /* try to reconfigure queues next time */ 1550ce8d5614SIntel port->need_reconfig_queues = 1; 1551148f963fSBruce Richardson return -1; 1552ce8d5614SIntel } 1553ce8d5614SIntel } 155476ad4a2dSGaetan Rivet 155576ad4a2dSGaetan Rivet for (event_type = RTE_ETH_EVENT_UNKNOWN; 155676ad4a2dSGaetan Rivet event_type < RTE_ETH_EVENT_MAX; 155776ad4a2dSGaetan Rivet event_type++) { 155876ad4a2dSGaetan Rivet diag = rte_eth_dev_callback_register(pi, 155976ad4a2dSGaetan Rivet event_type, 156076ad4a2dSGaetan Rivet eth_event_callback, 156176ad4a2dSGaetan Rivet NULL); 156276ad4a2dSGaetan Rivet if (diag) { 156376ad4a2dSGaetan Rivet printf("Failed to setup even callback for event %d\n", 156476ad4a2dSGaetan Rivet event_type); 156576ad4a2dSGaetan Rivet return -1; 156676ad4a2dSGaetan Rivet } 156776ad4a2dSGaetan Rivet } 156876ad4a2dSGaetan Rivet 1569ce8d5614SIntel /* start port */ 1570ce8d5614SIntel if (rte_eth_dev_start(pi) < 0) { 1571ce8d5614SIntel printf("Fail to start port %d\n", pi); 1572ce8d5614SIntel 1573ce8d5614SIntel /* Fail to setup rx queue, return */ 1574ce8d5614SIntel if (rte_atomic16_cmpset(&(port->port_status), 1575ce8d5614SIntel RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0) 1576ce8d5614SIntel printf("Port %d can not be set back to " 1577ce8d5614SIntel "stopped\n", pi); 1578ce8d5614SIntel continue; 1579ce8d5614SIntel } 1580ce8d5614SIntel 1581ce8d5614SIntel if (rte_atomic16_cmpset(&(port->port_status), 1582ce8d5614SIntel RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0) 1583ce8d5614SIntel printf("Port %d can not be set into started\n", pi); 1584ce8d5614SIntel 15852950a769SDeclan Doherty rte_eth_macaddr_get(pi, &mac_addr); 1586d8c89163SZijie Pan printf("Port %d: %02X:%02X:%02X:%02X:%02X:%02X\n", pi, 15872950a769SDeclan Doherty mac_addr.addr_bytes[0], mac_addr.addr_bytes[1], 15882950a769SDeclan Doherty mac_addr.addr_bytes[2], mac_addr.addr_bytes[3], 15892950a769SDeclan Doherty mac_addr.addr_bytes[4], mac_addr.addr_bytes[5]); 1590d8c89163SZijie Pan 1591ce8d5614SIntel /* at least one port started, need checking link status */ 1592ce8d5614SIntel need_check_link_status = 1; 1593ce8d5614SIntel } 1594ce8d5614SIntel 159592d2703eSMichael Qiu if (need_check_link_status == 1 && !no_link_check) 1596edab33b1STetsuya Mukawa check_all_ports_link_status(RTE_PORT_ALL); 159792d2703eSMichael Qiu else if (need_check_link_status == 0) 1598ce8d5614SIntel printf("Please stop the ports first\n"); 1599ce8d5614SIntel 1600ce8d5614SIntel printf("Done\n"); 1601148f963fSBruce Richardson return 0; 1602ce8d5614SIntel } 1603ce8d5614SIntel 1604ce8d5614SIntel void 1605ce8d5614SIntel stop_port(portid_t pid) 1606ce8d5614SIntel { 1607ce8d5614SIntel portid_t pi; 1608ce8d5614SIntel struct rte_port *port; 1609ce8d5614SIntel int need_check_link_status = 0; 1610ce8d5614SIntel 1611ce8d5614SIntel if (dcb_test) { 1612ce8d5614SIntel dcb_test = 0; 1613ce8d5614SIntel dcb_config = 0; 1614ce8d5614SIntel } 16154468635fSMichael Qiu 16164468635fSMichael Qiu if (port_id_is_invalid(pid, ENABLED_WARN)) 16174468635fSMichael Qiu return; 16184468635fSMichael Qiu 1619ce8d5614SIntel printf("Stopping ports...\n"); 1620ce8d5614SIntel 16217d89b261SGaetan Rivet RTE_ETH_FOREACH_DEV(pi) { 16224468635fSMichael Qiu if (pid != pi && pid != (portid_t)RTE_PORT_ALL) 1623ce8d5614SIntel continue; 1624ce8d5614SIntel 1625a8ef3e3aSBernard Iremonger if (port_is_forwarding(pi) != 0 && test_done == 0) { 1626a8ef3e3aSBernard Iremonger printf("Please remove port %d from forwarding configuration.\n", pi); 1627a8ef3e3aSBernard Iremonger continue; 1628a8ef3e3aSBernard Iremonger } 1629a8ef3e3aSBernard Iremonger 16300e545d30SBernard Iremonger if (port_is_bonding_slave(pi)) { 16310e545d30SBernard Iremonger printf("Please remove port %d from bonded device.\n", pi); 16320e545d30SBernard Iremonger continue; 16330e545d30SBernard Iremonger } 16340e545d30SBernard Iremonger 1635ce8d5614SIntel port = &ports[pi]; 1636ce8d5614SIntel if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED, 1637ce8d5614SIntel RTE_PORT_HANDLING) == 0) 1638ce8d5614SIntel continue; 1639ce8d5614SIntel 1640ce8d5614SIntel rte_eth_dev_stop(pi); 1641ce8d5614SIntel 1642ce8d5614SIntel if (rte_atomic16_cmpset(&(port->port_status), 1643ce8d5614SIntel RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0) 1644ce8d5614SIntel printf("Port %d can not be set into stopped\n", pi); 1645ce8d5614SIntel need_check_link_status = 1; 1646ce8d5614SIntel } 1647bc202406SDavid Marchand if (need_check_link_status && !no_link_check) 1648edab33b1STetsuya Mukawa check_all_ports_link_status(RTE_PORT_ALL); 1649ce8d5614SIntel 1650ce8d5614SIntel printf("Done\n"); 1651ce8d5614SIntel } 1652ce8d5614SIntel 1653ce8d5614SIntel void 1654ce8d5614SIntel close_port(portid_t pid) 1655ce8d5614SIntel { 1656ce8d5614SIntel portid_t pi; 1657ce8d5614SIntel struct rte_port *port; 1658ce8d5614SIntel 16594468635fSMichael Qiu if (port_id_is_invalid(pid, ENABLED_WARN)) 16604468635fSMichael Qiu return; 16614468635fSMichael Qiu 1662ce8d5614SIntel printf("Closing ports...\n"); 1663ce8d5614SIntel 16647d89b261SGaetan Rivet RTE_ETH_FOREACH_DEV(pi) { 16654468635fSMichael Qiu if (pid != pi && pid != (portid_t)RTE_PORT_ALL) 1666ce8d5614SIntel continue; 1667ce8d5614SIntel 1668a8ef3e3aSBernard Iremonger if (port_is_forwarding(pi) != 0 && test_done == 0) { 1669a8ef3e3aSBernard Iremonger printf("Please remove port %d from forwarding configuration.\n", pi); 1670a8ef3e3aSBernard Iremonger continue; 1671a8ef3e3aSBernard Iremonger } 1672a8ef3e3aSBernard Iremonger 16730e545d30SBernard Iremonger if (port_is_bonding_slave(pi)) { 16740e545d30SBernard Iremonger printf("Please remove port %d from bonded device.\n", pi); 16750e545d30SBernard Iremonger continue; 16760e545d30SBernard Iremonger } 16770e545d30SBernard Iremonger 1678ce8d5614SIntel port = &ports[pi]; 1679ce8d5614SIntel if (rte_atomic16_cmpset(&(port->port_status), 1680d4e8ad64SMichael Qiu RTE_PORT_CLOSED, RTE_PORT_CLOSED) == 1) { 1681d4e8ad64SMichael Qiu printf("Port %d is already closed\n", pi); 1682d4e8ad64SMichael Qiu continue; 1683d4e8ad64SMichael Qiu } 1684d4e8ad64SMichael Qiu 1685d4e8ad64SMichael Qiu if (rte_atomic16_cmpset(&(port->port_status), 1686ce8d5614SIntel RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) { 1687ce8d5614SIntel printf("Port %d is now not stopped\n", pi); 1688ce8d5614SIntel continue; 1689ce8d5614SIntel } 1690ce8d5614SIntel 1691938a184aSAdrien Mazarguil if (port->flow_list) 1692938a184aSAdrien Mazarguil port_flow_flush(pi); 1693ce8d5614SIntel rte_eth_dev_close(pi); 1694ce8d5614SIntel 1695ce8d5614SIntel if (rte_atomic16_cmpset(&(port->port_status), 1696ce8d5614SIntel RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0) 1697b38bb262SPablo de Lara printf("Port %d cannot be set to closed\n", pi); 1698ce8d5614SIntel } 1699ce8d5614SIntel 1700ce8d5614SIntel printf("Done\n"); 1701ce8d5614SIntel } 1702ce8d5614SIntel 1703edab33b1STetsuya Mukawa void 170497f1e196SWei Dai reset_port(portid_t pid) 170597f1e196SWei Dai { 170697f1e196SWei Dai int diag; 170797f1e196SWei Dai portid_t pi; 170897f1e196SWei Dai struct rte_port *port; 170997f1e196SWei Dai 171097f1e196SWei Dai if (port_id_is_invalid(pid, ENABLED_WARN)) 171197f1e196SWei Dai return; 171297f1e196SWei Dai 171397f1e196SWei Dai printf("Resetting ports...\n"); 171497f1e196SWei Dai 171597f1e196SWei Dai RTE_ETH_FOREACH_DEV(pi) { 171697f1e196SWei Dai if (pid != pi && pid != (portid_t)RTE_PORT_ALL) 171797f1e196SWei Dai continue; 171897f1e196SWei Dai 171997f1e196SWei Dai if (port_is_forwarding(pi) != 0 && test_done == 0) { 172097f1e196SWei Dai printf("Please remove port %d from forwarding " 172197f1e196SWei Dai "configuration.\n", pi); 172297f1e196SWei Dai continue; 172397f1e196SWei Dai } 172497f1e196SWei Dai 172597f1e196SWei Dai if (port_is_bonding_slave(pi)) { 172697f1e196SWei Dai printf("Please remove port %d from bonded device.\n", 172797f1e196SWei Dai pi); 172897f1e196SWei Dai continue; 172997f1e196SWei Dai } 173097f1e196SWei Dai 173197f1e196SWei Dai diag = rte_eth_dev_reset(pi); 173297f1e196SWei Dai if (diag == 0) { 173397f1e196SWei Dai port = &ports[pi]; 173497f1e196SWei Dai port->need_reconfig = 1; 173597f1e196SWei Dai port->need_reconfig_queues = 1; 173697f1e196SWei Dai } else { 173797f1e196SWei Dai printf("Failed to reset port %d. diag=%d\n", pi, diag); 173897f1e196SWei Dai } 173997f1e196SWei Dai } 174097f1e196SWei Dai 174197f1e196SWei Dai printf("Done\n"); 174297f1e196SWei Dai } 174397f1e196SWei Dai 174497f1e196SWei Dai void 1745edab33b1STetsuya Mukawa attach_port(char *identifier) 1746ce8d5614SIntel { 1747ebf5e9b7SBernard Iremonger portid_t pi = 0; 1748931126baSBernard Iremonger unsigned int socket_id; 1749ce8d5614SIntel 1750edab33b1STetsuya Mukawa printf("Attaching a new port...\n"); 1751edab33b1STetsuya Mukawa 1752edab33b1STetsuya Mukawa if (identifier == NULL) { 1753edab33b1STetsuya Mukawa printf("Invalid parameters are specified\n"); 1754edab33b1STetsuya Mukawa return; 1755ce8d5614SIntel } 1756ce8d5614SIntel 1757edab33b1STetsuya Mukawa if (rte_eth_dev_attach(identifier, &pi)) 1758edab33b1STetsuya Mukawa return; 1759edab33b1STetsuya Mukawa 1760931126baSBernard Iremonger socket_id = (unsigned)rte_eth_dev_socket_id(pi); 1761931126baSBernard Iremonger /* if socket_id is invalid, set to 0 */ 1762931126baSBernard Iremonger if (check_socket_id(socket_id) < 0) 1763931126baSBernard Iremonger socket_id = 0; 1764931126baSBernard Iremonger reconfig(pi, socket_id); 1765edab33b1STetsuya Mukawa rte_eth_promiscuous_enable(pi); 1766edab33b1STetsuya Mukawa 1767edab33b1STetsuya Mukawa nb_ports = rte_eth_dev_count(); 1768edab33b1STetsuya Mukawa 1769edab33b1STetsuya Mukawa ports[pi].port_status = RTE_PORT_STOPPED; 1770edab33b1STetsuya Mukawa 1771edab33b1STetsuya Mukawa printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports); 1772edab33b1STetsuya Mukawa printf("Done\n"); 1773edab33b1STetsuya Mukawa } 1774edab33b1STetsuya Mukawa 1775edab33b1STetsuya Mukawa void 177628caa76aSZhiyong Yang detach_port(portid_t port_id) 17775f4ec54fSChen Jing D(Mark) { 1778edab33b1STetsuya Mukawa char name[RTE_ETH_NAME_MAX_LEN]; 17795f4ec54fSChen Jing D(Mark) 1780edab33b1STetsuya Mukawa printf("Detaching a port...\n"); 17815f4ec54fSChen Jing D(Mark) 1782edab33b1STetsuya Mukawa if (!port_is_closed(port_id)) { 1783edab33b1STetsuya Mukawa printf("Please close port first\n"); 1784edab33b1STetsuya Mukawa return; 1785edab33b1STetsuya Mukawa } 1786edab33b1STetsuya Mukawa 1787938a184aSAdrien Mazarguil if (ports[port_id].flow_list) 1788938a184aSAdrien Mazarguil port_flow_flush(port_id); 1789938a184aSAdrien Mazarguil 17903070419eSGaetan Rivet if (rte_eth_dev_detach(port_id, name)) { 1791285fd101SOlivier Matz TESTPMD_LOG(ERR, "Failed to detach port '%s'\n", name); 1792edab33b1STetsuya Mukawa return; 17933070419eSGaetan Rivet } 1794edab33b1STetsuya Mukawa 1795edab33b1STetsuya Mukawa nb_ports = rte_eth_dev_count(); 1796edab33b1STetsuya Mukawa 1797edab33b1STetsuya Mukawa printf("Port '%s' is detached. Now total ports is %d\n", 1798edab33b1STetsuya Mukawa name, nb_ports); 1799edab33b1STetsuya Mukawa printf("Done\n"); 1800edab33b1STetsuya Mukawa return; 18015f4ec54fSChen Jing D(Mark) } 18025f4ec54fSChen Jing D(Mark) 1803af75078fSIntel void 1804af75078fSIntel pmd_test_exit(void) 1805af75078fSIntel { 1806af75078fSIntel portid_t pt_id; 1807af75078fSIntel 18088210ec25SPablo de Lara if (test_done == 0) 18098210ec25SPablo de Lara stop_packet_forwarding(); 18108210ec25SPablo de Lara 1811d3a274ceSZhihong Wang if (ports != NULL) { 1812d3a274ceSZhihong Wang no_link_check = 1; 18137d89b261SGaetan Rivet RTE_ETH_FOREACH_DEV(pt_id) { 1814d3a274ceSZhihong Wang printf("\nShutting down port %d...\n", pt_id); 1815af75078fSIntel fflush(stdout); 1816d3a274ceSZhihong Wang stop_port(pt_id); 1817d3a274ceSZhihong Wang close_port(pt_id); 1818af75078fSIntel } 1819d3a274ceSZhihong Wang } 1820d3a274ceSZhihong Wang printf("\nBye...\n"); 1821af75078fSIntel } 1822af75078fSIntel 1823af75078fSIntel typedef void (*cmd_func_t)(void); 1824af75078fSIntel struct pmd_test_command { 1825af75078fSIntel const char *cmd_name; 1826af75078fSIntel cmd_func_t cmd_func; 1827af75078fSIntel }; 1828af75078fSIntel 1829af75078fSIntel #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0])) 1830af75078fSIntel 1831ce8d5614SIntel /* Check the link status of all ports in up to 9s, and print them finally */ 1832af75078fSIntel static void 1833edab33b1STetsuya Mukawa check_all_ports_link_status(uint32_t port_mask) 1834af75078fSIntel { 1835ce8d5614SIntel #define CHECK_INTERVAL 100 /* 100ms */ 1836ce8d5614SIntel #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */ 1837f8244c63SZhiyong Yang portid_t portid; 1838f8244c63SZhiyong Yang uint8_t count, all_ports_up, print_flag = 0; 1839ce8d5614SIntel struct rte_eth_link link; 1840ce8d5614SIntel 1841ce8d5614SIntel printf("Checking link statuses...\n"); 1842ce8d5614SIntel fflush(stdout); 1843ce8d5614SIntel for (count = 0; count <= MAX_CHECK_TIME; count++) { 1844ce8d5614SIntel all_ports_up = 1; 18457d89b261SGaetan Rivet RTE_ETH_FOREACH_DEV(portid) { 1846ce8d5614SIntel if ((port_mask & (1 << portid)) == 0) 1847ce8d5614SIntel continue; 1848ce8d5614SIntel memset(&link, 0, sizeof(link)); 1849ce8d5614SIntel rte_eth_link_get_nowait(portid, &link); 1850ce8d5614SIntel /* print link status if flag set */ 1851ce8d5614SIntel if (print_flag == 1) { 1852ce8d5614SIntel if (link.link_status) 1853f8244c63SZhiyong Yang printf( 1854f8244c63SZhiyong Yang "Port%d Link Up. speed %u Mbps- %s\n", 1855f8244c63SZhiyong Yang portid, link.link_speed, 1856ce8d5614SIntel (link.link_duplex == ETH_LINK_FULL_DUPLEX) ? 1857ce8d5614SIntel ("full-duplex") : ("half-duplex\n")); 1858ce8d5614SIntel else 1859f8244c63SZhiyong Yang printf("Port %d Link Down\n", portid); 1860ce8d5614SIntel continue; 1861ce8d5614SIntel } 1862ce8d5614SIntel /* clear all_ports_up flag if any link down */ 186309419f23SThomas Monjalon if (link.link_status == ETH_LINK_DOWN) { 1864ce8d5614SIntel all_ports_up = 0; 1865ce8d5614SIntel break; 1866ce8d5614SIntel } 1867ce8d5614SIntel } 1868ce8d5614SIntel /* after finally printing all link status, get out */ 1869ce8d5614SIntel if (print_flag == 1) 1870ce8d5614SIntel break; 1871ce8d5614SIntel 1872ce8d5614SIntel if (all_ports_up == 0) { 1873ce8d5614SIntel fflush(stdout); 1874ce8d5614SIntel rte_delay_ms(CHECK_INTERVAL); 1875ce8d5614SIntel } 1876ce8d5614SIntel 1877ce8d5614SIntel /* set the print_flag if all ports up or timeout */ 1878ce8d5614SIntel if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) { 1879ce8d5614SIntel print_flag = 1; 1880ce8d5614SIntel } 18818ea656f8SGaetan Rivet 18828ea656f8SGaetan Rivet if (lsc_interrupt) 18838ea656f8SGaetan Rivet break; 1884ce8d5614SIntel } 1885af75078fSIntel } 1886af75078fSIntel 1887284c908cSGaetan Rivet static void 1888284c908cSGaetan Rivet rmv_event_callback(void *arg) 1889284c908cSGaetan Rivet { 1890284c908cSGaetan Rivet struct rte_eth_dev *dev; 189128caa76aSZhiyong Yang portid_t port_id = (intptr_t)arg; 1892284c908cSGaetan Rivet 1893284c908cSGaetan Rivet RTE_ETH_VALID_PORTID_OR_RET(port_id); 1894284c908cSGaetan Rivet dev = &rte_eth_devices[port_id]; 1895284c908cSGaetan Rivet 1896284c908cSGaetan Rivet stop_port(port_id); 1897284c908cSGaetan Rivet close_port(port_id); 1898f3a1188cSGaetan Rivet printf("removing device %s\n", dev->device->name); 18993070419eSGaetan Rivet if (rte_eal_dev_detach(dev->device)) 1900285fd101SOlivier Matz TESTPMD_LOG(ERR, "Failed to detach device %s\n", 19013070419eSGaetan Rivet dev->device->name); 1902284c908cSGaetan Rivet } 1903284c908cSGaetan Rivet 190476ad4a2dSGaetan Rivet /* This function is used by the interrupt thread */ 1905d6af1a13SBernard Iremonger static int 1906f8244c63SZhiyong Yang eth_event_callback(portid_t port_id, enum rte_eth_event_type type, void *param, 1907d6af1a13SBernard Iremonger void *ret_param) 190876ad4a2dSGaetan Rivet { 190976ad4a2dSGaetan Rivet static const char * const event_desc[] = { 191076ad4a2dSGaetan Rivet [RTE_ETH_EVENT_UNKNOWN] = "Unknown", 191176ad4a2dSGaetan Rivet [RTE_ETH_EVENT_INTR_LSC] = "LSC", 191276ad4a2dSGaetan Rivet [RTE_ETH_EVENT_QUEUE_STATE] = "Queue state", 191376ad4a2dSGaetan Rivet [RTE_ETH_EVENT_INTR_RESET] = "Interrupt reset", 191476ad4a2dSGaetan Rivet [RTE_ETH_EVENT_VF_MBOX] = "VF Mbox", 191576ad4a2dSGaetan Rivet [RTE_ETH_EVENT_MACSEC] = "MACsec", 191676ad4a2dSGaetan Rivet [RTE_ETH_EVENT_INTR_RMV] = "device removal", 191776ad4a2dSGaetan Rivet [RTE_ETH_EVENT_MAX] = NULL, 191876ad4a2dSGaetan Rivet }; 191976ad4a2dSGaetan Rivet 192076ad4a2dSGaetan Rivet RTE_SET_USED(param); 1921d6af1a13SBernard Iremonger RTE_SET_USED(ret_param); 192276ad4a2dSGaetan Rivet 192376ad4a2dSGaetan Rivet if (type >= RTE_ETH_EVENT_MAX) { 192476ad4a2dSGaetan Rivet fprintf(stderr, "\nPort %" PRIu8 ": %s called upon invalid event %d\n", 192576ad4a2dSGaetan Rivet port_id, __func__, type); 192676ad4a2dSGaetan Rivet fflush(stderr); 19273af72783SGaetan Rivet } else if (event_print_mask & (UINT32_C(1) << type)) { 192876ad4a2dSGaetan Rivet printf("\nPort %" PRIu8 ": %s event\n", port_id, 192976ad4a2dSGaetan Rivet event_desc[type]); 193076ad4a2dSGaetan Rivet fflush(stdout); 193176ad4a2dSGaetan Rivet } 1932284c908cSGaetan Rivet 1933284c908cSGaetan Rivet switch (type) { 1934284c908cSGaetan Rivet case RTE_ETH_EVENT_INTR_RMV: 1935284c908cSGaetan Rivet if (rte_eal_alarm_set(100000, 1936284c908cSGaetan Rivet rmv_event_callback, (void *)(intptr_t)port_id)) 1937284c908cSGaetan Rivet fprintf(stderr, "Could not set up deferred device removal\n"); 1938284c908cSGaetan Rivet break; 1939284c908cSGaetan Rivet default: 1940284c908cSGaetan Rivet break; 1941284c908cSGaetan Rivet } 1942d6af1a13SBernard Iremonger return 0; 194376ad4a2dSGaetan Rivet } 194476ad4a2dSGaetan Rivet 1945013af9b6SIntel static int 194628caa76aSZhiyong Yang set_tx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port) 1947af75078fSIntel { 1948013af9b6SIntel uint16_t i; 1949af75078fSIntel int diag; 1950013af9b6SIntel uint8_t mapping_found = 0; 1951af75078fSIntel 1952013af9b6SIntel for (i = 0; i < nb_tx_queue_stats_mappings; i++) { 1953013af9b6SIntel if ((tx_queue_stats_mappings[i].port_id == port_id) && 1954013af9b6SIntel (tx_queue_stats_mappings[i].queue_id < nb_txq )) { 1955013af9b6SIntel diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id, 1956013af9b6SIntel tx_queue_stats_mappings[i].queue_id, 1957013af9b6SIntel tx_queue_stats_mappings[i].stats_counter_id); 1958013af9b6SIntel if (diag != 0) 1959013af9b6SIntel return diag; 1960013af9b6SIntel mapping_found = 1; 1961af75078fSIntel } 1962013af9b6SIntel } 1963013af9b6SIntel if (mapping_found) 1964013af9b6SIntel port->tx_queue_stats_mapping_enabled = 1; 1965013af9b6SIntel return 0; 1966013af9b6SIntel } 1967013af9b6SIntel 1968013af9b6SIntel static int 196928caa76aSZhiyong Yang set_rx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port) 1970013af9b6SIntel { 1971013af9b6SIntel uint16_t i; 1972013af9b6SIntel int diag; 1973013af9b6SIntel uint8_t mapping_found = 0; 1974013af9b6SIntel 1975013af9b6SIntel for (i = 0; i < nb_rx_queue_stats_mappings; i++) { 1976013af9b6SIntel if ((rx_queue_stats_mappings[i].port_id == port_id) && 1977013af9b6SIntel (rx_queue_stats_mappings[i].queue_id < nb_rxq )) { 1978013af9b6SIntel diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id, 1979013af9b6SIntel rx_queue_stats_mappings[i].queue_id, 1980013af9b6SIntel rx_queue_stats_mappings[i].stats_counter_id); 1981013af9b6SIntel if (diag != 0) 1982013af9b6SIntel return diag; 1983013af9b6SIntel mapping_found = 1; 1984013af9b6SIntel } 1985013af9b6SIntel } 1986013af9b6SIntel if (mapping_found) 1987013af9b6SIntel port->rx_queue_stats_mapping_enabled = 1; 1988013af9b6SIntel return 0; 1989013af9b6SIntel } 1990013af9b6SIntel 1991013af9b6SIntel static void 199228caa76aSZhiyong Yang map_port_queue_stats_mapping_registers(portid_t pi, struct rte_port *port) 1993013af9b6SIntel { 1994013af9b6SIntel int diag = 0; 1995013af9b6SIntel 1996013af9b6SIntel diag = set_tx_queue_stats_mapping_registers(pi, port); 1997af75078fSIntel if (diag != 0) { 1998013af9b6SIntel if (diag == -ENOTSUP) { 1999013af9b6SIntel port->tx_queue_stats_mapping_enabled = 0; 2000013af9b6SIntel printf("TX queue stats mapping not supported port id=%d\n", pi); 2001013af9b6SIntel } 2002013af9b6SIntel else 2003013af9b6SIntel rte_exit(EXIT_FAILURE, 2004013af9b6SIntel "set_tx_queue_stats_mapping_registers " 2005013af9b6SIntel "failed for port id=%d diag=%d\n", 2006af75078fSIntel pi, diag); 2007af75078fSIntel } 2008013af9b6SIntel 2009013af9b6SIntel diag = set_rx_queue_stats_mapping_registers(pi, port); 2010af75078fSIntel if (diag != 0) { 2011013af9b6SIntel if (diag == -ENOTSUP) { 2012013af9b6SIntel port->rx_queue_stats_mapping_enabled = 0; 2013013af9b6SIntel printf("RX queue stats mapping not supported port id=%d\n", pi); 2014013af9b6SIntel } 2015013af9b6SIntel else 2016013af9b6SIntel rte_exit(EXIT_FAILURE, 2017013af9b6SIntel "set_rx_queue_stats_mapping_registers " 2018013af9b6SIntel "failed for port id=%d diag=%d\n", 2019af75078fSIntel pi, diag); 2020af75078fSIntel } 2021af75078fSIntel } 2022af75078fSIntel 2023f2c5125aSPablo de Lara static void 2024f2c5125aSPablo de Lara rxtx_port_config(struct rte_port *port) 2025f2c5125aSPablo de Lara { 2026f2c5125aSPablo de Lara port->rx_conf = port->dev_info.default_rxconf; 2027f2c5125aSPablo de Lara port->tx_conf = port->dev_info.default_txconf; 2028f2c5125aSPablo de Lara 2029f2c5125aSPablo de Lara /* Check if any RX/TX parameters have been passed */ 2030f2c5125aSPablo de Lara if (rx_pthresh != RTE_PMD_PARAM_UNSET) 2031f2c5125aSPablo de Lara port->rx_conf.rx_thresh.pthresh = rx_pthresh; 2032f2c5125aSPablo de Lara 2033f2c5125aSPablo de Lara if (rx_hthresh != RTE_PMD_PARAM_UNSET) 2034f2c5125aSPablo de Lara port->rx_conf.rx_thresh.hthresh = rx_hthresh; 2035f2c5125aSPablo de Lara 2036f2c5125aSPablo de Lara if (rx_wthresh != RTE_PMD_PARAM_UNSET) 2037f2c5125aSPablo de Lara port->rx_conf.rx_thresh.wthresh = rx_wthresh; 2038f2c5125aSPablo de Lara 2039f2c5125aSPablo de Lara if (rx_free_thresh != RTE_PMD_PARAM_UNSET) 2040f2c5125aSPablo de Lara port->rx_conf.rx_free_thresh = rx_free_thresh; 2041f2c5125aSPablo de Lara 2042f2c5125aSPablo de Lara if (rx_drop_en != RTE_PMD_PARAM_UNSET) 2043f2c5125aSPablo de Lara port->rx_conf.rx_drop_en = rx_drop_en; 2044f2c5125aSPablo de Lara 2045f2c5125aSPablo de Lara if (tx_pthresh != RTE_PMD_PARAM_UNSET) 2046f2c5125aSPablo de Lara port->tx_conf.tx_thresh.pthresh = tx_pthresh; 2047f2c5125aSPablo de Lara 2048f2c5125aSPablo de Lara if (tx_hthresh != RTE_PMD_PARAM_UNSET) 2049f2c5125aSPablo de Lara port->tx_conf.tx_thresh.hthresh = tx_hthresh; 2050f2c5125aSPablo de Lara 2051f2c5125aSPablo de Lara if (tx_wthresh != RTE_PMD_PARAM_UNSET) 2052f2c5125aSPablo de Lara port->tx_conf.tx_thresh.wthresh = tx_wthresh; 2053f2c5125aSPablo de Lara 2054f2c5125aSPablo de Lara if (tx_rs_thresh != RTE_PMD_PARAM_UNSET) 2055f2c5125aSPablo de Lara port->tx_conf.tx_rs_thresh = tx_rs_thresh; 2056f2c5125aSPablo de Lara 2057f2c5125aSPablo de Lara if (tx_free_thresh != RTE_PMD_PARAM_UNSET) 2058f2c5125aSPablo de Lara port->tx_conf.tx_free_thresh = tx_free_thresh; 2059f2c5125aSPablo de Lara 2060f2c5125aSPablo de Lara if (txq_flags != RTE_PMD_PARAM_UNSET) 2061f2c5125aSPablo de Lara port->tx_conf.txq_flags = txq_flags; 2062f2c5125aSPablo de Lara } 2063f2c5125aSPablo de Lara 2064013af9b6SIntel void 2065013af9b6SIntel init_port_config(void) 2066013af9b6SIntel { 2067013af9b6SIntel portid_t pid; 2068013af9b6SIntel struct rte_port *port; 2069013af9b6SIntel 20707d89b261SGaetan Rivet RTE_ETH_FOREACH_DEV(pid) { 2071013af9b6SIntel port = &ports[pid]; 2072013af9b6SIntel port->dev_conf.rxmode = rx_mode; 2073013af9b6SIntel port->dev_conf.fdir_conf = fdir_conf; 20743ce690d3SBruce Richardson if (nb_rxq > 1) { 2075013af9b6SIntel port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL; 2076013af9b6SIntel port->dev_conf.rx_adv_conf.rss_conf.rss_hf = rss_hf; 2077af75078fSIntel } else { 2078013af9b6SIntel port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL; 2079013af9b6SIntel port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0; 2080af75078fSIntel } 20813ce690d3SBruce Richardson 20825f592039SJingjing Wu if (port->dcb_flag == 0) { 20833ce690d3SBruce Richardson if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0) 20843ce690d3SBruce Richardson port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_RSS; 20853ce690d3SBruce Richardson else 20863ce690d3SBruce Richardson port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE; 20873ce690d3SBruce Richardson } 20883ce690d3SBruce Richardson 2089f2c5125aSPablo de Lara rxtx_port_config(port); 2090013af9b6SIntel 2091013af9b6SIntel rte_eth_macaddr_get(pid, &port->eth_addr); 2092013af9b6SIntel 2093013af9b6SIntel map_port_queue_stats_mapping_registers(pid, port); 209450c4440eSThomas Monjalon #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS 2095e261265eSRadu Nicolau rte_pmd_ixgbe_bypass_init(pid); 20967b7e5ba7SIntel #endif 20978ea656f8SGaetan Rivet 20988ea656f8SGaetan Rivet if (lsc_interrupt && 20998ea656f8SGaetan Rivet (rte_eth_devices[pid].data->dev_flags & 21008ea656f8SGaetan Rivet RTE_ETH_DEV_INTR_LSC)) 21018ea656f8SGaetan Rivet port->dev_conf.intr_conf.lsc = 1; 2102284c908cSGaetan Rivet if (rmv_interrupt && 2103284c908cSGaetan Rivet (rte_eth_devices[pid].data->dev_flags & 2104284c908cSGaetan Rivet RTE_ETH_DEV_INTR_RMV)) 2105284c908cSGaetan Rivet port->dev_conf.intr_conf.rmv = 1; 21065b590fbeSJasvinder Singh 21075b590fbeSJasvinder Singh #if defined RTE_LIBRTE_PMD_SOFTNIC && defined RTE_LIBRTE_SCHED 21085b590fbeSJasvinder Singh /* Detect softnic port */ 21095b590fbeSJasvinder Singh if (!strcmp(port->dev_info.driver_name, "net_softnic")) { 21105b590fbeSJasvinder Singh port->softnic_enable = 1; 21115b590fbeSJasvinder Singh memset(&port->softport, 0, sizeof(struct softnic_port)); 21125b590fbeSJasvinder Singh 21135b590fbeSJasvinder Singh if (!strcmp(cur_fwd_eng->fwd_mode_name, "tm")) 21145b590fbeSJasvinder Singh port->softport.tm_flag = 1; 21155b590fbeSJasvinder Singh } 21165b590fbeSJasvinder Singh #endif 2117013af9b6SIntel } 2118013af9b6SIntel } 2119013af9b6SIntel 212041b05095SBernard Iremonger void set_port_slave_flag(portid_t slave_pid) 212141b05095SBernard Iremonger { 212241b05095SBernard Iremonger struct rte_port *port; 212341b05095SBernard Iremonger 212441b05095SBernard Iremonger port = &ports[slave_pid]; 212541b05095SBernard Iremonger port->slave_flag = 1; 212641b05095SBernard Iremonger } 212741b05095SBernard Iremonger 212841b05095SBernard Iremonger void clear_port_slave_flag(portid_t slave_pid) 212941b05095SBernard Iremonger { 213041b05095SBernard Iremonger struct rte_port *port; 213141b05095SBernard Iremonger 213241b05095SBernard Iremonger port = &ports[slave_pid]; 213341b05095SBernard Iremonger port->slave_flag = 0; 213441b05095SBernard Iremonger } 213541b05095SBernard Iremonger 21360e545d30SBernard Iremonger uint8_t port_is_bonding_slave(portid_t slave_pid) 21370e545d30SBernard Iremonger { 21380e545d30SBernard Iremonger struct rte_port *port; 21390e545d30SBernard Iremonger 21400e545d30SBernard Iremonger port = &ports[slave_pid]; 21410e545d30SBernard Iremonger return port->slave_flag; 21420e545d30SBernard Iremonger } 21430e545d30SBernard Iremonger 2144013af9b6SIntel const uint16_t vlan_tags[] = { 2145013af9b6SIntel 0, 1, 2, 3, 4, 5, 6, 7, 2146013af9b6SIntel 8, 9, 10, 11, 12, 13, 14, 15, 2147013af9b6SIntel 16, 17, 18, 19, 20, 21, 22, 23, 2148013af9b6SIntel 24, 25, 26, 27, 28, 29, 30, 31 2149013af9b6SIntel }; 2150013af9b6SIntel 2151013af9b6SIntel static int 21521a572499SJingjing Wu get_eth_dcb_conf(struct rte_eth_conf *eth_conf, 21531a572499SJingjing Wu enum dcb_mode_enable dcb_mode, 21541a572499SJingjing Wu enum rte_eth_nb_tcs num_tcs, 21551a572499SJingjing Wu uint8_t pfc_en) 2156013af9b6SIntel { 2157013af9b6SIntel uint8_t i; 2158af75078fSIntel 2159af75078fSIntel /* 2160013af9b6SIntel * Builds up the correct configuration for dcb+vt based on the vlan tags array 2161013af9b6SIntel * given above, and the number of traffic classes available for use. 2162af75078fSIntel */ 21631a572499SJingjing Wu if (dcb_mode == DCB_VT_ENABLED) { 21641a572499SJingjing Wu struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf = 21651a572499SJingjing Wu ð_conf->rx_adv_conf.vmdq_dcb_conf; 21661a572499SJingjing Wu struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf = 21671a572499SJingjing Wu ð_conf->tx_adv_conf.vmdq_dcb_tx_conf; 2168013af9b6SIntel 2169547d946cSNirmoy Das /* VMDQ+DCB RX and TX configurations */ 21701a572499SJingjing Wu vmdq_rx_conf->enable_default_pool = 0; 21711a572499SJingjing Wu vmdq_rx_conf->default_pool = 0; 21721a572499SJingjing Wu vmdq_rx_conf->nb_queue_pools = 21731a572499SJingjing Wu (num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS); 21741a572499SJingjing Wu vmdq_tx_conf->nb_queue_pools = 21751a572499SJingjing Wu (num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS); 2176013af9b6SIntel 21771a572499SJingjing Wu vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools; 21781a572499SJingjing Wu for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) { 21791a572499SJingjing Wu vmdq_rx_conf->pool_map[i].vlan_id = vlan_tags[i]; 21801a572499SJingjing Wu vmdq_rx_conf->pool_map[i].pools = 21811a572499SJingjing Wu 1 << (i % vmdq_rx_conf->nb_queue_pools); 2182af75078fSIntel } 2183013af9b6SIntel for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) { 2184f59908feSWei Dai vmdq_rx_conf->dcb_tc[i] = i % num_tcs; 2185f59908feSWei Dai vmdq_tx_conf->dcb_tc[i] = i % num_tcs; 2186013af9b6SIntel } 2187013af9b6SIntel 2188013af9b6SIntel /* set DCB mode of RX and TX of multiple queues */ 218932e7aa0bSIntel eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB; 219032e7aa0bSIntel eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB; 21911a572499SJingjing Wu } else { 21921a572499SJingjing Wu struct rte_eth_dcb_rx_conf *rx_conf = 21931a572499SJingjing Wu ð_conf->rx_adv_conf.dcb_rx_conf; 21941a572499SJingjing Wu struct rte_eth_dcb_tx_conf *tx_conf = 21951a572499SJingjing Wu ð_conf->tx_adv_conf.dcb_tx_conf; 2196013af9b6SIntel 21971a572499SJingjing Wu rx_conf->nb_tcs = num_tcs; 21981a572499SJingjing Wu tx_conf->nb_tcs = num_tcs; 21991a572499SJingjing Wu 2200bcd0e432SJingjing Wu for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) { 2201bcd0e432SJingjing Wu rx_conf->dcb_tc[i] = i % num_tcs; 2202bcd0e432SJingjing Wu tx_conf->dcb_tc[i] = i % num_tcs; 2203013af9b6SIntel } 22041a572499SJingjing Wu eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB_RSS; 22051a572499SJingjing Wu eth_conf->rx_adv_conf.rss_conf.rss_hf = rss_hf; 220632e7aa0bSIntel eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB; 22071a572499SJingjing Wu } 22081a572499SJingjing Wu 22091a572499SJingjing Wu if (pfc_en) 22101a572499SJingjing Wu eth_conf->dcb_capability_en = 22111a572499SJingjing Wu ETH_DCB_PG_SUPPORT | ETH_DCB_PFC_SUPPORT; 2212013af9b6SIntel else 2213013af9b6SIntel eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT; 2214013af9b6SIntel 2215013af9b6SIntel return 0; 2216013af9b6SIntel } 2217013af9b6SIntel 2218013af9b6SIntel int 22191a572499SJingjing Wu init_port_dcb_config(portid_t pid, 22201a572499SJingjing Wu enum dcb_mode_enable dcb_mode, 22211a572499SJingjing Wu enum rte_eth_nb_tcs num_tcs, 22221a572499SJingjing Wu uint8_t pfc_en) 2223013af9b6SIntel { 2224013af9b6SIntel struct rte_eth_conf port_conf; 2225013af9b6SIntel struct rte_port *rte_port; 2226013af9b6SIntel int retval; 2227013af9b6SIntel uint16_t i; 2228013af9b6SIntel 22292a977b89SWenzhuo Lu rte_port = &ports[pid]; 2230013af9b6SIntel 2231013af9b6SIntel memset(&port_conf, 0, sizeof(struct rte_eth_conf)); 2232013af9b6SIntel /* Enter DCB configuration status */ 2233013af9b6SIntel dcb_config = 1; 2234013af9b6SIntel 2235013af9b6SIntel /*set configuration of DCB in vt mode and DCB in non-vt mode*/ 22361a572499SJingjing Wu retval = get_eth_dcb_conf(&port_conf, dcb_mode, num_tcs, pfc_en); 2237013af9b6SIntel if (retval < 0) 2238013af9b6SIntel return retval; 22390074d02fSShahaf Shuler port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER; 2240013af9b6SIntel 22412a977b89SWenzhuo Lu /** 22422a977b89SWenzhuo Lu * Write the configuration into the device. 22432a977b89SWenzhuo Lu * Set the numbers of RX & TX queues to 0, so 22442a977b89SWenzhuo Lu * the RX & TX queues will not be setup. 22452a977b89SWenzhuo Lu */ 2246c947ef89SStephen Hemminger rte_eth_dev_configure(pid, 0, 0, &port_conf); 22472a977b89SWenzhuo Lu 22482a977b89SWenzhuo Lu rte_eth_dev_info_get(pid, &rte_port->dev_info); 22492a977b89SWenzhuo Lu 22502a977b89SWenzhuo Lu /* If dev_info.vmdq_pool_base is greater than 0, 22512a977b89SWenzhuo Lu * the queue id of vmdq pools is started after pf queues. 22522a977b89SWenzhuo Lu */ 22532a977b89SWenzhuo Lu if (dcb_mode == DCB_VT_ENABLED && 22542a977b89SWenzhuo Lu rte_port->dev_info.vmdq_pool_base > 0) { 22552a977b89SWenzhuo Lu printf("VMDQ_DCB multi-queue mode is nonsensical" 22562a977b89SWenzhuo Lu " for port %d.", pid); 22572a977b89SWenzhuo Lu return -1; 22582a977b89SWenzhuo Lu } 22592a977b89SWenzhuo Lu 22602a977b89SWenzhuo Lu /* Assume the ports in testpmd have the same dcb capability 22612a977b89SWenzhuo Lu * and has the same number of rxq and txq in dcb mode 22622a977b89SWenzhuo Lu */ 22632a977b89SWenzhuo Lu if (dcb_mode == DCB_VT_ENABLED) { 226486ef65eeSBernard Iremonger if (rte_port->dev_info.max_vfs > 0) { 226586ef65eeSBernard Iremonger nb_rxq = rte_port->dev_info.nb_rx_queues; 226686ef65eeSBernard Iremonger nb_txq = rte_port->dev_info.nb_tx_queues; 226786ef65eeSBernard Iremonger } else { 22682a977b89SWenzhuo Lu nb_rxq = rte_port->dev_info.max_rx_queues; 22692a977b89SWenzhuo Lu nb_txq = rte_port->dev_info.max_tx_queues; 227086ef65eeSBernard Iremonger } 22712a977b89SWenzhuo Lu } else { 22722a977b89SWenzhuo Lu /*if vt is disabled, use all pf queues */ 22732a977b89SWenzhuo Lu if (rte_port->dev_info.vmdq_pool_base == 0) { 22742a977b89SWenzhuo Lu nb_rxq = rte_port->dev_info.max_rx_queues; 22752a977b89SWenzhuo Lu nb_txq = rte_port->dev_info.max_tx_queues; 22762a977b89SWenzhuo Lu } else { 22772a977b89SWenzhuo Lu nb_rxq = (queueid_t)num_tcs; 22782a977b89SWenzhuo Lu nb_txq = (queueid_t)num_tcs; 22792a977b89SWenzhuo Lu 22802a977b89SWenzhuo Lu } 22812a977b89SWenzhuo Lu } 22822a977b89SWenzhuo Lu rx_free_thresh = 64; 22832a977b89SWenzhuo Lu 2284013af9b6SIntel memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf)); 2285013af9b6SIntel 2286f2c5125aSPablo de Lara rxtx_port_config(rte_port); 2287013af9b6SIntel /* VLAN filter */ 22880074d02fSShahaf Shuler rte_port->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER; 22891a572499SJingjing Wu for (i = 0; i < RTE_DIM(vlan_tags); i++) 2290013af9b6SIntel rx_vft_set(pid, vlan_tags[i], 1); 2291013af9b6SIntel 2292013af9b6SIntel rte_eth_macaddr_get(pid, &rte_port->eth_addr); 2293013af9b6SIntel map_port_queue_stats_mapping_registers(pid, rte_port); 2294013af9b6SIntel 22957741e4cfSIntel rte_port->dcb_flag = 1; 22967741e4cfSIntel 2297013af9b6SIntel return 0; 2298af75078fSIntel } 2299af75078fSIntel 2300ffc468ffSTetsuya Mukawa static void 2301ffc468ffSTetsuya Mukawa init_port(void) 2302ffc468ffSTetsuya Mukawa { 2303ffc468ffSTetsuya Mukawa /* Configuration of Ethernet ports. */ 2304ffc468ffSTetsuya Mukawa ports = rte_zmalloc("testpmd: ports", 2305ffc468ffSTetsuya Mukawa sizeof(struct rte_port) * RTE_MAX_ETHPORTS, 2306ffc468ffSTetsuya Mukawa RTE_CACHE_LINE_SIZE); 2307ffc468ffSTetsuya Mukawa if (ports == NULL) { 2308ffc468ffSTetsuya Mukawa rte_exit(EXIT_FAILURE, 2309ffc468ffSTetsuya Mukawa "rte_zmalloc(%d struct rte_port) failed\n", 2310ffc468ffSTetsuya Mukawa RTE_MAX_ETHPORTS); 2311ffc468ffSTetsuya Mukawa } 2312ffc468ffSTetsuya Mukawa } 2313ffc468ffSTetsuya Mukawa 2314d3a274ceSZhihong Wang static void 2315d3a274ceSZhihong Wang force_quit(void) 2316d3a274ceSZhihong Wang { 2317d3a274ceSZhihong Wang pmd_test_exit(); 2318d3a274ceSZhihong Wang prompt_exit(); 2319d3a274ceSZhihong Wang } 2320d3a274ceSZhihong Wang 2321d3a274ceSZhihong Wang static void 2322cfea1f30SPablo de Lara print_stats(void) 2323cfea1f30SPablo de Lara { 2324cfea1f30SPablo de Lara uint8_t i; 2325cfea1f30SPablo de Lara const char clr[] = { 27, '[', '2', 'J', '\0' }; 2326cfea1f30SPablo de Lara const char top_left[] = { 27, '[', '1', ';', '1', 'H', '\0' }; 2327cfea1f30SPablo de Lara 2328cfea1f30SPablo de Lara /* Clear screen and move to top left */ 2329cfea1f30SPablo de Lara printf("%s%s", clr, top_left); 2330cfea1f30SPablo de Lara 2331cfea1f30SPablo de Lara printf("\nPort statistics ===================================="); 2332cfea1f30SPablo de Lara for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) 2333cfea1f30SPablo de Lara nic_stats_display(fwd_ports_ids[i]); 2334cfea1f30SPablo de Lara } 2335cfea1f30SPablo de Lara 2336cfea1f30SPablo de Lara static void 2337d3a274ceSZhihong Wang signal_handler(int signum) 2338d3a274ceSZhihong Wang { 2339d3a274ceSZhihong Wang if (signum == SIGINT || signum == SIGTERM) { 2340d3a274ceSZhihong Wang printf("\nSignal %d received, preparing to exit...\n", 2341d3a274ceSZhihong Wang signum); 2342102b7329SReshma Pattan #ifdef RTE_LIBRTE_PDUMP 2343102b7329SReshma Pattan /* uninitialize packet capture framework */ 2344102b7329SReshma Pattan rte_pdump_uninit(); 2345102b7329SReshma Pattan #endif 234662d3216dSReshma Pattan #ifdef RTE_LIBRTE_LATENCY_STATS 234762d3216dSReshma Pattan rte_latencystats_uninit(); 234862d3216dSReshma Pattan #endif 2349d3a274ceSZhihong Wang force_quit(); 2350d9a191a0SPhil Yang /* Set flag to indicate the force termination. */ 2351d9a191a0SPhil Yang f_quit = 1; 2352d3a274ceSZhihong Wang /* exit with the expected status */ 2353d3a274ceSZhihong Wang signal(signum, SIG_DFL); 2354d3a274ceSZhihong Wang kill(getpid(), signum); 2355d3a274ceSZhihong Wang } 2356d3a274ceSZhihong Wang } 2357d3a274ceSZhihong Wang 2358af75078fSIntel int 2359af75078fSIntel main(int argc, char** argv) 2360af75078fSIntel { 2361af75078fSIntel int diag; 2362f8244c63SZhiyong Yang portid_t port_id; 2363af75078fSIntel 2364d3a274ceSZhihong Wang signal(SIGINT, signal_handler); 2365d3a274ceSZhihong Wang signal(SIGTERM, signal_handler); 2366d3a274ceSZhihong Wang 2367af75078fSIntel diag = rte_eal_init(argc, argv); 2368af75078fSIntel if (diag < 0) 2369af75078fSIntel rte_panic("Cannot init EAL\n"); 2370af75078fSIntel 2371285fd101SOlivier Matz testpmd_logtype = rte_log_register("testpmd"); 2372285fd101SOlivier Matz if (testpmd_logtype < 0) 2373285fd101SOlivier Matz rte_panic("Cannot register log type"); 2374285fd101SOlivier Matz rte_log_set_level(testpmd_logtype, RTE_LOG_DEBUG); 2375285fd101SOlivier Matz 23761c036b16SEelco Chaudron if (mlockall(MCL_CURRENT | MCL_FUTURE)) { 2377285fd101SOlivier Matz TESTPMD_LOG(NOTICE, "mlockall() failed with error \"%s\"\n", 23781c036b16SEelco Chaudron strerror(errno)); 23791c036b16SEelco Chaudron } 23801c036b16SEelco Chaudron 2381102b7329SReshma Pattan #ifdef RTE_LIBRTE_PDUMP 2382102b7329SReshma Pattan /* initialize packet capture framework */ 2383102b7329SReshma Pattan rte_pdump_init(NULL); 2384102b7329SReshma Pattan #endif 2385102b7329SReshma Pattan 2386af75078fSIntel nb_ports = (portid_t) rte_eth_dev_count(); 2387af75078fSIntel if (nb_ports == 0) 2388285fd101SOlivier Matz TESTPMD_LOG(WARNING, "No probed ethernet devices\n"); 2389af75078fSIntel 2390ffc468ffSTetsuya Mukawa /* allocate port structures, and init them */ 2391ffc468ffSTetsuya Mukawa init_port(); 2392ffc468ffSTetsuya Mukawa 2393af75078fSIntel set_def_fwd_config(); 2394af75078fSIntel if (nb_lcores == 0) 2395af75078fSIntel rte_panic("Empty set of forwarding logical cores - check the " 2396af75078fSIntel "core mask supplied in the command parameters\n"); 2397af75078fSIntel 239865eb1e54SPablo de Lara /* Bitrate/latency stats disabled by default */ 239930bcc68cSPablo de Lara #ifdef RTE_LIBRTE_BITRATE 2400e25e6c70SRemy Horton bitrate_enabled = 0; 240130bcc68cSPablo de Lara #endif 240265eb1e54SPablo de Lara #ifdef RTE_LIBRTE_LATENCY_STATS 240365eb1e54SPablo de Lara latencystats_enabled = 0; 240465eb1e54SPablo de Lara #endif 2405e25e6c70SRemy Horton 2406af75078fSIntel argc -= diag; 2407af75078fSIntel argv += diag; 2408af75078fSIntel if (argc > 1) 2409af75078fSIntel launch_args_parse(argc, argv); 2410af75078fSIntel 241199cabef0SPablo de Lara if (tx_first && interactive) 241299cabef0SPablo de Lara rte_exit(EXIT_FAILURE, "--tx-first cannot be used on " 241399cabef0SPablo de Lara "interactive mode.\n"); 24148820cba4SDavid Hunt 24158820cba4SDavid Hunt if (tx_first && lsc_interrupt) { 24168820cba4SDavid Hunt printf("Warning: lsc_interrupt needs to be off when " 24178820cba4SDavid Hunt " using tx_first. Disabling.\n"); 24188820cba4SDavid Hunt lsc_interrupt = 0; 24198820cba4SDavid Hunt } 24208820cba4SDavid Hunt 24215a8fb55cSReshma Pattan if (!nb_rxq && !nb_txq) 24225a8fb55cSReshma Pattan printf("Warning: Either rx or tx queues should be non-zero\n"); 24235a8fb55cSReshma Pattan 24245a8fb55cSReshma Pattan if (nb_rxq > 1 && nb_rxq > nb_txq) 2425af75078fSIntel printf("Warning: nb_rxq=%d enables RSS configuration, " 2426af75078fSIntel "but nb_txq=%d will prevent to fully test it.\n", 2427af75078fSIntel nb_rxq, nb_txq); 2428af75078fSIntel 2429af75078fSIntel init_config(); 2430148f963fSBruce Richardson if (start_port(RTE_PORT_ALL) != 0) 2431148f963fSBruce Richardson rte_exit(EXIT_FAILURE, "Start ports failed\n"); 2432af75078fSIntel 2433ce8d5614SIntel /* set all ports to promiscuous mode by default */ 24347d89b261SGaetan Rivet RTE_ETH_FOREACH_DEV(port_id) 2435ce8d5614SIntel rte_eth_promiscuous_enable(port_id); 2436af75078fSIntel 24377e4441c8SRemy Horton /* Init metrics library */ 24387e4441c8SRemy Horton rte_metrics_init(rte_socket_id()); 24397e4441c8SRemy Horton 244062d3216dSReshma Pattan #ifdef RTE_LIBRTE_LATENCY_STATS 244162d3216dSReshma Pattan if (latencystats_enabled != 0) { 244262d3216dSReshma Pattan int ret = rte_latencystats_init(1, NULL); 244362d3216dSReshma Pattan if (ret) 244462d3216dSReshma Pattan printf("Warning: latencystats init()" 244562d3216dSReshma Pattan " returned error %d\n", ret); 244662d3216dSReshma Pattan printf("Latencystats running on lcore %d\n", 244762d3216dSReshma Pattan latencystats_lcore_id); 244862d3216dSReshma Pattan } 244962d3216dSReshma Pattan #endif 245062d3216dSReshma Pattan 24517e4441c8SRemy Horton /* Setup bitrate stats */ 24527e4441c8SRemy Horton #ifdef RTE_LIBRTE_BITRATE 2453e25e6c70SRemy Horton if (bitrate_enabled != 0) { 24547e4441c8SRemy Horton bitrate_data = rte_stats_bitrate_create(); 24557e4441c8SRemy Horton if (bitrate_data == NULL) 2456e25e6c70SRemy Horton rte_exit(EXIT_FAILURE, 2457e25e6c70SRemy Horton "Could not allocate bitrate data.\n"); 24587e4441c8SRemy Horton rte_stats_bitrate_reg(bitrate_data); 2459e25e6c70SRemy Horton } 24607e4441c8SRemy Horton #endif 24617e4441c8SRemy Horton 24620d56cb81SThomas Monjalon #ifdef RTE_LIBRTE_CMDLINE 246381ef862bSAllain Legacy if (strlen(cmdline_filename) != 0) 246481ef862bSAllain Legacy cmdline_read_from_file(cmdline_filename); 246581ef862bSAllain Legacy 2466ca7feb22SCyril Chemparathy if (interactive == 1) { 2467ca7feb22SCyril Chemparathy if (auto_start) { 2468ca7feb22SCyril Chemparathy printf("Start automatic packet forwarding\n"); 2469ca7feb22SCyril Chemparathy start_packet_forwarding(0); 2470ca7feb22SCyril Chemparathy } 2471af75078fSIntel prompt(); 24720de738cfSJiayu Hu pmd_test_exit(); 2473ca7feb22SCyril Chemparathy } else 24740d56cb81SThomas Monjalon #endif 24750d56cb81SThomas Monjalon { 2476af75078fSIntel char c; 2477af75078fSIntel int rc; 2478af75078fSIntel 2479d9a191a0SPhil Yang f_quit = 0; 2480d9a191a0SPhil Yang 2481af75078fSIntel printf("No commandline core given, start packet forwarding\n"); 248299cabef0SPablo de Lara start_packet_forwarding(tx_first); 2483cfea1f30SPablo de Lara if (stats_period != 0) { 2484cfea1f30SPablo de Lara uint64_t prev_time = 0, cur_time, diff_time = 0; 2485cfea1f30SPablo de Lara uint64_t timer_period; 2486cfea1f30SPablo de Lara 2487cfea1f30SPablo de Lara /* Convert to number of cycles */ 2488cfea1f30SPablo de Lara timer_period = stats_period * rte_get_timer_hz(); 2489cfea1f30SPablo de Lara 2490d9a191a0SPhil Yang while (f_quit == 0) { 2491cfea1f30SPablo de Lara cur_time = rte_get_timer_cycles(); 2492cfea1f30SPablo de Lara diff_time += cur_time - prev_time; 2493cfea1f30SPablo de Lara 2494cfea1f30SPablo de Lara if (diff_time >= timer_period) { 2495cfea1f30SPablo de Lara print_stats(); 2496cfea1f30SPablo de Lara /* Reset the timer */ 2497cfea1f30SPablo de Lara diff_time = 0; 2498cfea1f30SPablo de Lara } 2499cfea1f30SPablo de Lara /* Sleep to avoid unnecessary checks */ 2500cfea1f30SPablo de Lara prev_time = cur_time; 2501cfea1f30SPablo de Lara sleep(1); 2502cfea1f30SPablo de Lara } 2503cfea1f30SPablo de Lara } 2504cfea1f30SPablo de Lara 2505af75078fSIntel printf("Press enter to exit\n"); 2506af75078fSIntel rc = read(0, &c, 1); 2507d3a274ceSZhihong Wang pmd_test_exit(); 2508af75078fSIntel if (rc < 0) 2509af75078fSIntel return 1; 2510af75078fSIntel } 2511af75078fSIntel 2512af75078fSIntel return 0; 2513af75078fSIntel } 2514