1174a1631SBruce Richardson /* SPDX-License-Identifier: BSD-3-Clause 2174a1631SBruce Richardson * Copyright(c) 2010-2017 Intel Corporation 3af75078fSIntel */ 4af75078fSIntel 5af75078fSIntel #include <stdarg.h> 6af75078fSIntel #include <stdio.h> 7af75078fSIntel #include <stdlib.h> 8af75078fSIntel #include <signal.h> 9af75078fSIntel #include <string.h> 10af75078fSIntel #include <time.h> 11af75078fSIntel #include <fcntl.h> 121c036b16SEelco Chaudron #include <sys/mman.h> 13af75078fSIntel #include <sys/types.h> 14af75078fSIntel #include <errno.h> 15af75078fSIntel 16af75078fSIntel #include <sys/queue.h> 17af75078fSIntel #include <sys/stat.h> 18af75078fSIntel 19af75078fSIntel #include <stdint.h> 20af75078fSIntel #include <unistd.h> 21af75078fSIntel #include <inttypes.h> 22af75078fSIntel 23af75078fSIntel #include <rte_common.h> 24d1eb542eSOlivier Matz #include <rte_errno.h> 25af75078fSIntel #include <rte_byteorder.h> 26af75078fSIntel #include <rte_log.h> 27af75078fSIntel #include <rte_debug.h> 28af75078fSIntel #include <rte_cycles.h> 29af75078fSIntel #include <rte_memory.h> 30af75078fSIntel #include <rte_memcpy.h> 31af75078fSIntel #include <rte_launch.h> 32af75078fSIntel #include <rte_eal.h> 33284c908cSGaetan Rivet #include <rte_alarm.h> 34af75078fSIntel #include <rte_per_lcore.h> 35af75078fSIntel #include <rte_lcore.h> 36af75078fSIntel #include <rte_atomic.h> 37af75078fSIntel #include <rte_branch_prediction.h> 38af75078fSIntel #include <rte_mempool.h> 39af75078fSIntel #include <rte_malloc.h> 40af75078fSIntel #include <rte_mbuf.h> 41af75078fSIntel #include <rte_interrupts.h> 42af75078fSIntel #include <rte_pci.h> 43af75078fSIntel #include <rte_ether.h> 44af75078fSIntel #include <rte_ethdev.h> 45edab33b1STetsuya Mukawa #include <rte_dev.h> 46af75078fSIntel #include <rte_string_fns.h> 47e261265eSRadu Nicolau #ifdef RTE_LIBRTE_IXGBE_PMD 48e261265eSRadu Nicolau #include <rte_pmd_ixgbe.h> 49e261265eSRadu Nicolau #endif 50102b7329SReshma Pattan #ifdef RTE_LIBRTE_PDUMP 51102b7329SReshma Pattan #include <rte_pdump.h> 52102b7329SReshma Pattan #endif 53938a184aSAdrien Mazarguil #include <rte_flow.h> 547e4441c8SRemy Horton #include <rte_metrics.h> 557e4441c8SRemy Horton #ifdef RTE_LIBRTE_BITRATE 567e4441c8SRemy Horton #include <rte_bitrate.h> 577e4441c8SRemy Horton #endif 5862d3216dSReshma Pattan #ifdef RTE_LIBRTE_LATENCY_STATS 5962d3216dSReshma Pattan #include <rte_latencystats.h> 6062d3216dSReshma Pattan #endif 61af75078fSIntel 62af75078fSIntel #include "testpmd.h" 63af75078fSIntel 64af75078fSIntel uint16_t verbose_level = 0; /**< Silent by default. */ 65285fd101SOlivier Matz int testpmd_logtype; /**< Log type for testpmd logs */ 66af75078fSIntel 67af75078fSIntel /* use master core for command line ? */ 68af75078fSIntel uint8_t interactive = 0; 69ca7feb22SCyril Chemparathy uint8_t auto_start = 0; 7099cabef0SPablo de Lara uint8_t tx_first; 7181ef862bSAllain Legacy char cmdline_filename[PATH_MAX] = {0}; 72af75078fSIntel 73af75078fSIntel /* 74af75078fSIntel * NUMA support configuration. 75af75078fSIntel * When set, the NUMA support attempts to dispatch the allocation of the 76af75078fSIntel * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the 77af75078fSIntel * probed ports among the CPU sockets 0 and 1. 78af75078fSIntel * Otherwise, all memory is allocated from CPU socket 0. 79af75078fSIntel */ 80999b2ee0SBruce Richardson uint8_t numa_support = 1; /**< numa enabled by default */ 81af75078fSIntel 82af75078fSIntel /* 83b6ea6408SIntel * In UMA mode,all memory is allocated from socket 0 if --socket-num is 84b6ea6408SIntel * not configured. 85b6ea6408SIntel */ 86b6ea6408SIntel uint8_t socket_num = UMA_NO_CONFIG; 87b6ea6408SIntel 88b6ea6408SIntel /* 89148f963fSBruce Richardson * Use ANONYMOUS mapped memory (might be not physically continuous) for mbufs. 90148f963fSBruce Richardson */ 91148f963fSBruce Richardson uint8_t mp_anon = 0; 92148f963fSBruce Richardson 93148f963fSBruce Richardson /* 94af75078fSIntel * Record the Ethernet address of peer target ports to which packets are 95af75078fSIntel * forwarded. 96547d946cSNirmoy Das * Must be instantiated with the ethernet addresses of peer traffic generator 97af75078fSIntel * ports. 98af75078fSIntel */ 99af75078fSIntel struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS]; 100af75078fSIntel portid_t nb_peer_eth_addrs = 0; 101af75078fSIntel 102af75078fSIntel /* 103af75078fSIntel * Probed Target Environment. 104af75078fSIntel */ 105af75078fSIntel struct rte_port *ports; /**< For all probed ethernet ports. */ 106af75078fSIntel portid_t nb_ports; /**< Number of probed ethernet ports. */ 107af75078fSIntel struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */ 108af75078fSIntel lcoreid_t nb_lcores; /**< Number of probed logical cores. */ 109af75078fSIntel 110af75078fSIntel /* 111af75078fSIntel * Test Forwarding Configuration. 112af75078fSIntel * nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores 113af75078fSIntel * nb_fwd_ports <= nb_cfg_ports <= nb_ports 114af75078fSIntel */ 115af75078fSIntel lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */ 116af75078fSIntel lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */ 117af75078fSIntel portid_t nb_cfg_ports; /**< Number of configured ports. */ 118af75078fSIntel portid_t nb_fwd_ports; /**< Number of forwarding ports. */ 119af75078fSIntel 120af75078fSIntel unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */ 121af75078fSIntel portid_t fwd_ports_ids[RTE_MAX_ETHPORTS]; /**< Port ids configuration. */ 122af75078fSIntel 123af75078fSIntel struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */ 124af75078fSIntel streamid_t nb_fwd_streams; /**< Is equal to (nb_ports * nb_rxq). */ 125af75078fSIntel 126af75078fSIntel /* 127af75078fSIntel * Forwarding engines. 128af75078fSIntel */ 129af75078fSIntel struct fwd_engine * fwd_engines[] = { 130af75078fSIntel &io_fwd_engine, 131af75078fSIntel &mac_fwd_engine, 132d47388f1SCyril Chemparathy &mac_swap_engine, 133e9e23a61SCyril Chemparathy &flow_gen_engine, 134af75078fSIntel &rx_only_engine, 135af75078fSIntel &tx_only_engine, 136af75078fSIntel &csum_fwd_engine, 137168dfa61SIvan Boule &icmp_echo_engine, 1385b590fbeSJasvinder Singh #if defined RTE_LIBRTE_PMD_SOFTNIC && defined RTE_LIBRTE_SCHED 1395b590fbeSJasvinder Singh &softnic_tm_engine, 1405b590fbeSJasvinder Singh &softnic_tm_bypass_engine, 1415b590fbeSJasvinder Singh #endif 142af75078fSIntel #ifdef RTE_LIBRTE_IEEE1588 143af75078fSIntel &ieee1588_fwd_engine, 144af75078fSIntel #endif 145af75078fSIntel NULL, 146af75078fSIntel }; 147af75078fSIntel 148af75078fSIntel struct fwd_config cur_fwd_config; 149af75078fSIntel struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */ 150bf56fce1SZhihong Wang uint32_t retry_enabled; 151bf56fce1SZhihong Wang uint32_t burst_tx_delay_time = BURST_TX_WAIT_US; 152bf56fce1SZhihong Wang uint32_t burst_tx_retry_num = BURST_TX_RETRIES; 153af75078fSIntel 154af75078fSIntel uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */ 155c8798818SIntel uint32_t param_total_num_mbufs = 0; /**< number of mbufs in all pools - if 156c8798818SIntel * specified on command-line. */ 157cfea1f30SPablo de Lara uint16_t stats_period; /**< Period to show statistics (disabled by default) */ 158d9a191a0SPhil Yang 159d9a191a0SPhil Yang /* 160d9a191a0SPhil Yang * In container, it cannot terminate the process which running with 'stats-period' 161d9a191a0SPhil Yang * option. Set flag to exit stats period loop after received SIGINT/SIGTERM. 162d9a191a0SPhil Yang */ 163d9a191a0SPhil Yang uint8_t f_quit; 164d9a191a0SPhil Yang 165af75078fSIntel /* 166af75078fSIntel * Configuration of packet segments used by the "txonly" processing engine. 167af75078fSIntel */ 168af75078fSIntel uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */ 169af75078fSIntel uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = { 170af75078fSIntel TXONLY_DEF_PACKET_LEN, 171af75078fSIntel }; 172af75078fSIntel uint8_t tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */ 173af75078fSIntel 17479bec05bSKonstantin Ananyev enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF; 17579bec05bSKonstantin Ananyev /**< Split policy for packets to TX. */ 17679bec05bSKonstantin Ananyev 177af75078fSIntel uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */ 178e9378bbcSCunming Liang uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */ 179af75078fSIntel 180900550deSIntel /* current configuration is in DCB or not,0 means it is not in DCB mode */ 181900550deSIntel uint8_t dcb_config = 0; 182900550deSIntel 183900550deSIntel /* Whether the dcb is in testing status */ 184900550deSIntel uint8_t dcb_test = 0; 185900550deSIntel 186af75078fSIntel /* 187af75078fSIntel * Configurable number of RX/TX queues. 188af75078fSIntel */ 189af75078fSIntel queueid_t nb_rxq = 1; /**< Number of RX queues per port. */ 190af75078fSIntel queueid_t nb_txq = 1; /**< Number of TX queues per port. */ 191af75078fSIntel 192af75078fSIntel /* 193af75078fSIntel * Configurable number of RX/TX ring descriptors. 194af75078fSIntel */ 195af75078fSIntel #define RTE_TEST_RX_DESC_DEFAULT 128 196af75078fSIntel #define RTE_TEST_TX_DESC_DEFAULT 512 197af75078fSIntel uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */ 198af75078fSIntel uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */ 199af75078fSIntel 200f2c5125aSPablo de Lara #define RTE_PMD_PARAM_UNSET -1 201af75078fSIntel /* 202af75078fSIntel * Configurable values of RX and TX ring threshold registers. 203af75078fSIntel */ 204af75078fSIntel 205f2c5125aSPablo de Lara int8_t rx_pthresh = RTE_PMD_PARAM_UNSET; 206f2c5125aSPablo de Lara int8_t rx_hthresh = RTE_PMD_PARAM_UNSET; 207f2c5125aSPablo de Lara int8_t rx_wthresh = RTE_PMD_PARAM_UNSET; 208af75078fSIntel 209f2c5125aSPablo de Lara int8_t tx_pthresh = RTE_PMD_PARAM_UNSET; 210f2c5125aSPablo de Lara int8_t tx_hthresh = RTE_PMD_PARAM_UNSET; 211f2c5125aSPablo de Lara int8_t tx_wthresh = RTE_PMD_PARAM_UNSET; 212af75078fSIntel 213af75078fSIntel /* 214af75078fSIntel * Configurable value of RX free threshold. 215af75078fSIntel */ 216f2c5125aSPablo de Lara int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET; 217af75078fSIntel 218af75078fSIntel /* 219ce8d5614SIntel * Configurable value of RX drop enable. 220ce8d5614SIntel */ 221f2c5125aSPablo de Lara int8_t rx_drop_en = RTE_PMD_PARAM_UNSET; 222ce8d5614SIntel 223ce8d5614SIntel /* 224af75078fSIntel * Configurable value of TX free threshold. 225af75078fSIntel */ 226f2c5125aSPablo de Lara int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET; 227af75078fSIntel 228af75078fSIntel /* 229af75078fSIntel * Configurable value of TX RS bit threshold. 230af75078fSIntel */ 231f2c5125aSPablo de Lara int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET; 232af75078fSIntel 233af75078fSIntel /* 234ce8d5614SIntel * Configurable value of TX queue flags. 235ce8d5614SIntel */ 236f2c5125aSPablo de Lara int32_t txq_flags = RTE_PMD_PARAM_UNSET; 237ce8d5614SIntel 238ce8d5614SIntel /* 239af75078fSIntel * Receive Side Scaling (RSS) configuration. 240af75078fSIntel */ 2418a387fa8SHelin Zhang uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */ 242af75078fSIntel 243af75078fSIntel /* 244af75078fSIntel * Port topology configuration 245af75078fSIntel */ 246af75078fSIntel uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */ 247af75078fSIntel 2487741e4cfSIntel /* 2497741e4cfSIntel * Avoids to flush all the RX streams before starts forwarding. 2507741e4cfSIntel */ 2517741e4cfSIntel uint8_t no_flush_rx = 0; /* flush by default */ 2527741e4cfSIntel 253af75078fSIntel /* 2547ee3e944SVasily Philipov * Flow API isolated mode. 2557ee3e944SVasily Philipov */ 2567ee3e944SVasily Philipov uint8_t flow_isolate_all; 2577ee3e944SVasily Philipov 2587ee3e944SVasily Philipov /* 259bc202406SDavid Marchand * Avoids to check link status when starting/stopping a port. 260bc202406SDavid Marchand */ 261bc202406SDavid Marchand uint8_t no_link_check = 0; /* check by default */ 262bc202406SDavid Marchand 263bc202406SDavid Marchand /* 2648ea656f8SGaetan Rivet * Enable link status change notification 2658ea656f8SGaetan Rivet */ 2668ea656f8SGaetan Rivet uint8_t lsc_interrupt = 1; /* enabled by default */ 2678ea656f8SGaetan Rivet 2688ea656f8SGaetan Rivet /* 269284c908cSGaetan Rivet * Enable device removal notification. 270284c908cSGaetan Rivet */ 271284c908cSGaetan Rivet uint8_t rmv_interrupt = 1; /* enabled by default */ 272284c908cSGaetan Rivet 273284c908cSGaetan Rivet /* 2743af72783SGaetan Rivet * Display or mask ether events 2753af72783SGaetan Rivet * Default to all events except VF_MBOX 2763af72783SGaetan Rivet */ 2773af72783SGaetan Rivet uint32_t event_print_mask = (UINT32_C(1) << RTE_ETH_EVENT_UNKNOWN) | 2783af72783SGaetan Rivet (UINT32_C(1) << RTE_ETH_EVENT_INTR_LSC) | 2793af72783SGaetan Rivet (UINT32_C(1) << RTE_ETH_EVENT_QUEUE_STATE) | 2803af72783SGaetan Rivet (UINT32_C(1) << RTE_ETH_EVENT_INTR_RESET) | 2813af72783SGaetan Rivet (UINT32_C(1) << RTE_ETH_EVENT_MACSEC) | 2823af72783SGaetan Rivet (UINT32_C(1) << RTE_ETH_EVENT_INTR_RMV); 2833af72783SGaetan Rivet 2843af72783SGaetan Rivet /* 2857b7e5ba7SIntel * NIC bypass mode configuration options. 2867b7e5ba7SIntel */ 2877b7e5ba7SIntel 28850c4440eSThomas Monjalon #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS 2897b7e5ba7SIntel /* The NIC bypass watchdog timeout. */ 290e261265eSRadu Nicolau uint32_t bypass_timeout = RTE_PMD_IXGBE_BYPASS_TMT_OFF; 2917b7e5ba7SIntel #endif 2927b7e5ba7SIntel 293e261265eSRadu Nicolau 29462d3216dSReshma Pattan #ifdef RTE_LIBRTE_LATENCY_STATS 29562d3216dSReshma Pattan 29662d3216dSReshma Pattan /* 29762d3216dSReshma Pattan * Set when latency stats is enabled in the commandline 29862d3216dSReshma Pattan */ 29962d3216dSReshma Pattan uint8_t latencystats_enabled; 30062d3216dSReshma Pattan 30162d3216dSReshma Pattan /* 30262d3216dSReshma Pattan * Lcore ID to serive latency statistics. 30362d3216dSReshma Pattan */ 30462d3216dSReshma Pattan lcoreid_t latencystats_lcore_id = -1; 30562d3216dSReshma Pattan 30662d3216dSReshma Pattan #endif 30762d3216dSReshma Pattan 3087b7e5ba7SIntel /* 309af75078fSIntel * Ethernet device configuration. 310af75078fSIntel */ 311af75078fSIntel struct rte_eth_rxmode rx_mode = { 312af75078fSIntel .max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */ 3130074d02fSShahaf Shuler .offloads = (DEV_RX_OFFLOAD_VLAN_FILTER | 3140074d02fSShahaf Shuler DEV_RX_OFFLOAD_VLAN_STRIP | 3150074d02fSShahaf Shuler DEV_RX_OFFLOAD_CRC_STRIP), 3160074d02fSShahaf Shuler .ignore_offload_bitfield = 1, 317af75078fSIntel }; 318af75078fSIntel 319af75078fSIntel struct rte_fdir_conf fdir_conf = { 320af75078fSIntel .mode = RTE_FDIR_MODE_NONE, 321af75078fSIntel .pballoc = RTE_FDIR_PBALLOC_64K, 322af75078fSIntel .status = RTE_FDIR_REPORT_STATUS, 323d9d5e6f2SJingjing Wu .mask = { 324d9d5e6f2SJingjing Wu .vlan_tci_mask = 0x0, 325d9d5e6f2SJingjing Wu .ipv4_mask = { 326d9d5e6f2SJingjing Wu .src_ip = 0xFFFFFFFF, 327d9d5e6f2SJingjing Wu .dst_ip = 0xFFFFFFFF, 328d9d5e6f2SJingjing Wu }, 329d9d5e6f2SJingjing Wu .ipv6_mask = { 330d9d5e6f2SJingjing Wu .src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF}, 331d9d5e6f2SJingjing Wu .dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF}, 332d9d5e6f2SJingjing Wu }, 333d9d5e6f2SJingjing Wu .src_port_mask = 0xFFFF, 334d9d5e6f2SJingjing Wu .dst_port_mask = 0xFFFF, 33547b3ac6bSWenzhuo Lu .mac_addr_byte_mask = 0xFF, 33647b3ac6bSWenzhuo Lu .tunnel_type_mask = 1, 33747b3ac6bSWenzhuo Lu .tunnel_id_mask = 0xFFFFFFFF, 338d9d5e6f2SJingjing Wu }, 339af75078fSIntel .drop_queue = 127, 340af75078fSIntel }; 341af75078fSIntel 3422950a769SDeclan Doherty volatile int test_done = 1; /* stop packet forwarding when set to 1. */ 343af75078fSIntel 344ed30d9b6SIntel struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS]; 345ed30d9b6SIntel struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS]; 346ed30d9b6SIntel 347ed30d9b6SIntel struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array; 348ed30d9b6SIntel struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array; 349ed30d9b6SIntel 350ed30d9b6SIntel uint16_t nb_tx_queue_stats_mappings = 0; 351ed30d9b6SIntel uint16_t nb_rx_queue_stats_mappings = 0; 352ed30d9b6SIntel 353a4fd5eeeSElza Mathew /* 354a4fd5eeeSElza Mathew * Display zero values by default for xstats 355a4fd5eeeSElza Mathew */ 356a4fd5eeeSElza Mathew uint8_t xstats_hide_zero; 357a4fd5eeeSElza Mathew 358c9cafcc8SShahaf Shuler unsigned int num_sockets = 0; 359c9cafcc8SShahaf Shuler unsigned int socket_ids[RTE_MAX_NUMA_NODES]; 3607acf894dSStephen Hurd 361e25e6c70SRemy Horton #ifdef RTE_LIBRTE_BITRATE 3627e4441c8SRemy Horton /* Bitrate statistics */ 3637e4441c8SRemy Horton struct rte_stats_bitrates *bitrate_data; 364e25e6c70SRemy Horton lcoreid_t bitrate_lcore_id; 365e25e6c70SRemy Horton uint8_t bitrate_enabled; 366e25e6c70SRemy Horton #endif 3677e4441c8SRemy Horton 368b40f8d78SJiayu Hu struct gro_status gro_ports[RTE_MAX_ETHPORTS]; 369b7091f1dSJiayu Hu uint8_t gro_flush_cycles = GRO_DEFAULT_FLUSH_CYCLES; 370b40f8d78SJiayu Hu 371ed30d9b6SIntel /* Forward function declarations */ 37228caa76aSZhiyong Yang static void map_port_queue_stats_mapping_registers(portid_t pi, 37328caa76aSZhiyong Yang struct rte_port *port); 374edab33b1STetsuya Mukawa static void check_all_ports_link_status(uint32_t port_mask); 375f8244c63SZhiyong Yang static int eth_event_callback(portid_t port_id, 37676ad4a2dSGaetan Rivet enum rte_eth_event_type type, 377d6af1a13SBernard Iremonger void *param, void *ret_param); 378ce8d5614SIntel 379ce8d5614SIntel /* 380ce8d5614SIntel * Check if all the ports are started. 381ce8d5614SIntel * If yes, return positive value. If not, return zero. 382ce8d5614SIntel */ 383ce8d5614SIntel static int all_ports_started(void); 384ed30d9b6SIntel 38552f38a20SJiayu Hu struct gso_status gso_ports[RTE_MAX_ETHPORTS]; 38652f38a20SJiayu Hu uint16_t gso_max_segment_size = ETHER_MAX_LEN - ETHER_CRC_LEN; 38752f38a20SJiayu Hu 388af75078fSIntel /* 38998a7ea33SJerin Jacob * Helper function to check if socket is already discovered. 390c9cafcc8SShahaf Shuler * If yes, return positive value. If not, return zero. 391c9cafcc8SShahaf Shuler */ 392c9cafcc8SShahaf Shuler int 393c9cafcc8SShahaf Shuler new_socket_id(unsigned int socket_id) 394c9cafcc8SShahaf Shuler { 395c9cafcc8SShahaf Shuler unsigned int i; 396c9cafcc8SShahaf Shuler 397c9cafcc8SShahaf Shuler for (i = 0; i < num_sockets; i++) { 398c9cafcc8SShahaf Shuler if (socket_ids[i] == socket_id) 399c9cafcc8SShahaf Shuler return 0; 400c9cafcc8SShahaf Shuler } 401c9cafcc8SShahaf Shuler return 1; 402c9cafcc8SShahaf Shuler } 403c9cafcc8SShahaf Shuler 404c9cafcc8SShahaf Shuler /* 405af75078fSIntel * Setup default configuration. 406af75078fSIntel */ 407af75078fSIntel static void 408af75078fSIntel set_default_fwd_lcores_config(void) 409af75078fSIntel { 410af75078fSIntel unsigned int i; 411af75078fSIntel unsigned int nb_lc; 4127acf894dSStephen Hurd unsigned int sock_num; 413af75078fSIntel 414af75078fSIntel nb_lc = 0; 415af75078fSIntel for (i = 0; i < RTE_MAX_LCORE; i++) { 416c9cafcc8SShahaf Shuler sock_num = rte_lcore_to_socket_id(i); 417c9cafcc8SShahaf Shuler if (new_socket_id(sock_num)) { 418c9cafcc8SShahaf Shuler if (num_sockets >= RTE_MAX_NUMA_NODES) { 419c9cafcc8SShahaf Shuler rte_exit(EXIT_FAILURE, 420c9cafcc8SShahaf Shuler "Total sockets greater than %u\n", 421c9cafcc8SShahaf Shuler RTE_MAX_NUMA_NODES); 422c9cafcc8SShahaf Shuler } 423c9cafcc8SShahaf Shuler socket_ids[num_sockets++] = sock_num; 4247acf894dSStephen Hurd } 425f54fe5eeSStephen Hurd if (!rte_lcore_is_enabled(i)) 426f54fe5eeSStephen Hurd continue; 427f54fe5eeSStephen Hurd if (i == rte_get_master_lcore()) 428f54fe5eeSStephen Hurd continue; 429f54fe5eeSStephen Hurd fwd_lcores_cpuids[nb_lc++] = i; 430af75078fSIntel } 431af75078fSIntel nb_lcores = (lcoreid_t) nb_lc; 432af75078fSIntel nb_cfg_lcores = nb_lcores; 433af75078fSIntel nb_fwd_lcores = 1; 434af75078fSIntel } 435af75078fSIntel 436af75078fSIntel static void 437af75078fSIntel set_def_peer_eth_addrs(void) 438af75078fSIntel { 439af75078fSIntel portid_t i; 440af75078fSIntel 441af75078fSIntel for (i = 0; i < RTE_MAX_ETHPORTS; i++) { 442af75078fSIntel peer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR; 443af75078fSIntel peer_eth_addrs[i].addr_bytes[5] = i; 444af75078fSIntel } 445af75078fSIntel } 446af75078fSIntel 447af75078fSIntel static void 448af75078fSIntel set_default_fwd_ports_config(void) 449af75078fSIntel { 450af75078fSIntel portid_t pt_id; 45165a7360cSMatan Azrad int i = 0; 452af75078fSIntel 45365a7360cSMatan Azrad RTE_ETH_FOREACH_DEV(pt_id) 45465a7360cSMatan Azrad fwd_ports_ids[i++] = pt_id; 455af75078fSIntel 456af75078fSIntel nb_cfg_ports = nb_ports; 457af75078fSIntel nb_fwd_ports = nb_ports; 458af75078fSIntel } 459af75078fSIntel 460af75078fSIntel void 461af75078fSIntel set_def_fwd_config(void) 462af75078fSIntel { 463af75078fSIntel set_default_fwd_lcores_config(); 464af75078fSIntel set_def_peer_eth_addrs(); 465af75078fSIntel set_default_fwd_ports_config(); 466af75078fSIntel } 467af75078fSIntel 468af75078fSIntel /* 469af75078fSIntel * Configuration initialisation done once at init time. 470af75078fSIntel */ 471af75078fSIntel static void 472af75078fSIntel mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf, 473af75078fSIntel unsigned int socket_id) 474af75078fSIntel { 475af75078fSIntel char pool_name[RTE_MEMPOOL_NAMESIZE]; 476bece7b6cSChristian Ehrhardt struct rte_mempool *rte_mp = NULL; 477af75078fSIntel uint32_t mb_size; 478af75078fSIntel 479dfb03bbeSOlivier Matz mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size; 480af75078fSIntel mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name)); 481148f963fSBruce Richardson 482285fd101SOlivier Matz TESTPMD_LOG(INFO, 483d1eb542eSOlivier Matz "create a new mbuf pool <%s>: n=%u, size=%u, socket=%u\n", 484d1eb542eSOlivier Matz pool_name, nb_mbuf, mbuf_seg_size, socket_id); 485d1eb542eSOlivier Matz 486b19a0c75SOlivier Matz if (mp_anon != 0) { 487b19a0c75SOlivier Matz rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf, 488bece7b6cSChristian Ehrhardt mb_size, (unsigned) mb_mempool_cache, 489148f963fSBruce Richardson sizeof(struct rte_pktmbuf_pool_private), 490148f963fSBruce Richardson socket_id, 0); 49124427bb9SOlivier Matz if (rte_mp == NULL) 49224427bb9SOlivier Matz goto err; 493b19a0c75SOlivier Matz 494b19a0c75SOlivier Matz if (rte_mempool_populate_anon(rte_mp) == 0) { 495b19a0c75SOlivier Matz rte_mempool_free(rte_mp); 496b19a0c75SOlivier Matz rte_mp = NULL; 49724427bb9SOlivier Matz goto err; 498b19a0c75SOlivier Matz } 499b19a0c75SOlivier Matz rte_pktmbuf_pool_init(rte_mp, NULL); 500b19a0c75SOlivier Matz rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL); 501b19a0c75SOlivier Matz } else { 502ea0c20eaSOlivier Matz /* wrapper to rte_mempool_create() */ 503ea0c20eaSOlivier Matz rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf, 504ea0c20eaSOlivier Matz mb_mempool_cache, 0, mbuf_seg_size, socket_id); 505bece7b6cSChristian Ehrhardt } 506148f963fSBruce Richardson 50724427bb9SOlivier Matz err: 508af75078fSIntel if (rte_mp == NULL) { 509d1eb542eSOlivier Matz rte_exit(EXIT_FAILURE, 510d1eb542eSOlivier Matz "Creation of mbuf pool for socket %u failed: %s\n", 511d1eb542eSOlivier Matz socket_id, rte_strerror(rte_errno)); 512148f963fSBruce Richardson } else if (verbose_level > 0) { 513591a9d79SStephen Hemminger rte_mempool_dump(stdout, rte_mp); 514af75078fSIntel } 515af75078fSIntel } 516af75078fSIntel 51720a0286fSLiu Xiaofeng /* 51820a0286fSLiu Xiaofeng * Check given socket id is valid or not with NUMA mode, 51920a0286fSLiu Xiaofeng * if valid, return 0, else return -1 52020a0286fSLiu Xiaofeng */ 52120a0286fSLiu Xiaofeng static int 52220a0286fSLiu Xiaofeng check_socket_id(const unsigned int socket_id) 52320a0286fSLiu Xiaofeng { 52420a0286fSLiu Xiaofeng static int warning_once = 0; 52520a0286fSLiu Xiaofeng 526c9cafcc8SShahaf Shuler if (new_socket_id(socket_id)) { 52720a0286fSLiu Xiaofeng if (!warning_once && numa_support) 52820a0286fSLiu Xiaofeng printf("Warning: NUMA should be configured manually by" 52920a0286fSLiu Xiaofeng " using --port-numa-config and" 53020a0286fSLiu Xiaofeng " --ring-numa-config parameters along with" 53120a0286fSLiu Xiaofeng " --numa.\n"); 53220a0286fSLiu Xiaofeng warning_once = 1; 53320a0286fSLiu Xiaofeng return -1; 53420a0286fSLiu Xiaofeng } 53520a0286fSLiu Xiaofeng return 0; 53620a0286fSLiu Xiaofeng } 53720a0286fSLiu Xiaofeng 538af75078fSIntel static void 539af75078fSIntel init_config(void) 540af75078fSIntel { 541ce8d5614SIntel portid_t pid; 542af75078fSIntel struct rte_port *port; 543af75078fSIntel struct rte_mempool *mbp; 544af75078fSIntel unsigned int nb_mbuf_per_pool; 545af75078fSIntel lcoreid_t lc_id; 5467acf894dSStephen Hurd uint8_t port_per_socket[RTE_MAX_NUMA_NODES]; 547b7091f1dSJiayu Hu struct rte_gro_param gro_param; 54852f38a20SJiayu Hu uint32_t gso_types; 549af75078fSIntel 5507acf894dSStephen Hurd memset(port_per_socket,0,RTE_MAX_NUMA_NODES); 551487f9a59SYulong Pei 552487f9a59SYulong Pei if (numa_support) { 553487f9a59SYulong Pei memset(port_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS); 554487f9a59SYulong Pei memset(rxring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS); 555487f9a59SYulong Pei memset(txring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS); 556487f9a59SYulong Pei } 557487f9a59SYulong Pei 558af75078fSIntel /* Configuration of logical cores. */ 559af75078fSIntel fwd_lcores = rte_zmalloc("testpmd: fwd_lcores", 560af75078fSIntel sizeof(struct fwd_lcore *) * nb_lcores, 561fdf20fa7SSergio Gonzalez Monroy RTE_CACHE_LINE_SIZE); 562af75078fSIntel if (fwd_lcores == NULL) { 563ce8d5614SIntel rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) " 564ce8d5614SIntel "failed\n", nb_lcores); 565af75078fSIntel } 566af75078fSIntel for (lc_id = 0; lc_id < nb_lcores; lc_id++) { 567af75078fSIntel fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore", 568af75078fSIntel sizeof(struct fwd_lcore), 569fdf20fa7SSergio Gonzalez Monroy RTE_CACHE_LINE_SIZE); 570af75078fSIntel if (fwd_lcores[lc_id] == NULL) { 571ce8d5614SIntel rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) " 572ce8d5614SIntel "failed\n"); 573af75078fSIntel } 574af75078fSIntel fwd_lcores[lc_id]->cpuid_idx = lc_id; 575af75078fSIntel } 576af75078fSIntel 5777d89b261SGaetan Rivet RTE_ETH_FOREACH_DEV(pid) { 578ce8d5614SIntel port = &ports[pid]; 579ce8d5614SIntel rte_eth_dev_info_get(pid, &port->dev_info); 580ce8d5614SIntel 581b6ea6408SIntel if (numa_support) { 582b6ea6408SIntel if (port_numa[pid] != NUMA_NO_CONFIG) 583b6ea6408SIntel port_per_socket[port_numa[pid]]++; 584b6ea6408SIntel else { 585b6ea6408SIntel uint32_t socket_id = rte_eth_dev_socket_id(pid); 58620a0286fSLiu Xiaofeng 58720a0286fSLiu Xiaofeng /* if socket_id is invalid, set to 0 */ 58820a0286fSLiu Xiaofeng if (check_socket_id(socket_id) < 0) 58920a0286fSLiu Xiaofeng socket_id = 0; 590b6ea6408SIntel port_per_socket[socket_id]++; 591b6ea6408SIntel } 592b6ea6408SIntel } 593b6ea6408SIntel 594ce8d5614SIntel /* set flag to initialize port/queue */ 595ce8d5614SIntel port->need_reconfig = 1; 596ce8d5614SIntel port->need_reconfig_queues = 1; 597ce8d5614SIntel } 598ce8d5614SIntel 5993ab64341SOlivier Matz /* 6003ab64341SOlivier Matz * Create pools of mbuf. 6013ab64341SOlivier Matz * If NUMA support is disabled, create a single pool of mbuf in 6023ab64341SOlivier Matz * socket 0 memory by default. 6033ab64341SOlivier Matz * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1. 6043ab64341SOlivier Matz * 6053ab64341SOlivier Matz * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and 6063ab64341SOlivier Matz * nb_txd can be configured at run time. 6073ab64341SOlivier Matz */ 6083ab64341SOlivier Matz if (param_total_num_mbufs) 6093ab64341SOlivier Matz nb_mbuf_per_pool = param_total_num_mbufs; 6103ab64341SOlivier Matz else { 6113ab64341SOlivier Matz nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX + 6123ab64341SOlivier Matz (nb_lcores * mb_mempool_cache) + 6133ab64341SOlivier Matz RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST; 6143ab64341SOlivier Matz nb_mbuf_per_pool *= RTE_MAX_ETHPORTS; 6153ab64341SOlivier Matz } 6163ab64341SOlivier Matz 617b6ea6408SIntel if (numa_support) { 618b6ea6408SIntel uint8_t i; 619ce8d5614SIntel 620c9cafcc8SShahaf Shuler for (i = 0; i < num_sockets; i++) 621c9cafcc8SShahaf Shuler mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 622c9cafcc8SShahaf Shuler socket_ids[i]); 6233ab64341SOlivier Matz } else { 6243ab64341SOlivier Matz if (socket_num == UMA_NO_CONFIG) 6253ab64341SOlivier Matz mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0); 6263ab64341SOlivier Matz else 6273ab64341SOlivier Matz mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 6283ab64341SOlivier Matz socket_num); 6293ab64341SOlivier Matz } 630b6ea6408SIntel 631b6ea6408SIntel init_port_config(); 6325886ae07SAdrien Mazarguil 63352f38a20SJiayu Hu gso_types = DEV_TX_OFFLOAD_TCP_TSO | DEV_TX_OFFLOAD_VXLAN_TNL_TSO | 63452f38a20SJiayu Hu DEV_TX_OFFLOAD_GRE_TNL_TSO; 6355886ae07SAdrien Mazarguil /* 6365886ae07SAdrien Mazarguil * Records which Mbuf pool to use by each logical core, if needed. 6375886ae07SAdrien Mazarguil */ 6385886ae07SAdrien Mazarguil for (lc_id = 0; lc_id < nb_lcores; lc_id++) { 6398fd8bebcSAdrien Mazarguil mbp = mbuf_pool_find( 6408fd8bebcSAdrien Mazarguil rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id])); 6418fd8bebcSAdrien Mazarguil 6425886ae07SAdrien Mazarguil if (mbp == NULL) 6435886ae07SAdrien Mazarguil mbp = mbuf_pool_find(0); 6445886ae07SAdrien Mazarguil fwd_lcores[lc_id]->mbp = mbp; 64552f38a20SJiayu Hu /* initialize GSO context */ 64652f38a20SJiayu Hu fwd_lcores[lc_id]->gso_ctx.direct_pool = mbp; 64752f38a20SJiayu Hu fwd_lcores[lc_id]->gso_ctx.indirect_pool = mbp; 64852f38a20SJiayu Hu fwd_lcores[lc_id]->gso_ctx.gso_types = gso_types; 64952f38a20SJiayu Hu fwd_lcores[lc_id]->gso_ctx.gso_size = ETHER_MAX_LEN - 65052f38a20SJiayu Hu ETHER_CRC_LEN; 65152f38a20SJiayu Hu fwd_lcores[lc_id]->gso_ctx.flag = 0; 6525886ae07SAdrien Mazarguil } 6535886ae07SAdrien Mazarguil 654ce8d5614SIntel /* Configuration of packet forwarding streams. */ 655ce8d5614SIntel if (init_fwd_streams() < 0) 656ce8d5614SIntel rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n"); 6570c0db76fSBernard Iremonger 6580c0db76fSBernard Iremonger fwd_config_setup(); 659b7091f1dSJiayu Hu 660b7091f1dSJiayu Hu /* create a gro context for each lcore */ 661b7091f1dSJiayu Hu gro_param.gro_types = RTE_GRO_TCP_IPV4; 662b7091f1dSJiayu Hu gro_param.max_flow_num = GRO_MAX_FLUSH_CYCLES; 663b7091f1dSJiayu Hu gro_param.max_item_per_flow = MAX_PKT_BURST; 664b7091f1dSJiayu Hu for (lc_id = 0; lc_id < nb_lcores; lc_id++) { 665b7091f1dSJiayu Hu gro_param.socket_id = rte_lcore_to_socket_id( 666b7091f1dSJiayu Hu fwd_lcores_cpuids[lc_id]); 667b7091f1dSJiayu Hu fwd_lcores[lc_id]->gro_ctx = rte_gro_ctx_create(&gro_param); 668b7091f1dSJiayu Hu if (fwd_lcores[lc_id]->gro_ctx == NULL) { 669b7091f1dSJiayu Hu rte_exit(EXIT_FAILURE, 670b7091f1dSJiayu Hu "rte_gro_ctx_create() failed\n"); 671b7091f1dSJiayu Hu } 672b7091f1dSJiayu Hu } 673ce8d5614SIntel } 674ce8d5614SIntel 6752950a769SDeclan Doherty 6762950a769SDeclan Doherty void 677a21d5a4bSDeclan Doherty reconfig(portid_t new_port_id, unsigned socket_id) 6782950a769SDeclan Doherty { 6792950a769SDeclan Doherty struct rte_port *port; 6802950a769SDeclan Doherty 6812950a769SDeclan Doherty /* Reconfiguration of Ethernet ports. */ 6822950a769SDeclan Doherty port = &ports[new_port_id]; 6832950a769SDeclan Doherty rte_eth_dev_info_get(new_port_id, &port->dev_info); 6842950a769SDeclan Doherty 6852950a769SDeclan Doherty /* set flag to initialize port/queue */ 6862950a769SDeclan Doherty port->need_reconfig = 1; 6872950a769SDeclan Doherty port->need_reconfig_queues = 1; 688a21d5a4bSDeclan Doherty port->socket_id = socket_id; 6892950a769SDeclan Doherty 6902950a769SDeclan Doherty init_port_config(); 6912950a769SDeclan Doherty } 6922950a769SDeclan Doherty 6932950a769SDeclan Doherty 694ce8d5614SIntel int 695ce8d5614SIntel init_fwd_streams(void) 696ce8d5614SIntel { 697ce8d5614SIntel portid_t pid; 698ce8d5614SIntel struct rte_port *port; 699ce8d5614SIntel streamid_t sm_id, nb_fwd_streams_new; 7005a8fb55cSReshma Pattan queueid_t q; 701ce8d5614SIntel 702ce8d5614SIntel /* set socket id according to numa or not */ 7037d89b261SGaetan Rivet RTE_ETH_FOREACH_DEV(pid) { 704ce8d5614SIntel port = &ports[pid]; 705ce8d5614SIntel if (nb_rxq > port->dev_info.max_rx_queues) { 706ce8d5614SIntel printf("Fail: nb_rxq(%d) is greater than " 707ce8d5614SIntel "max_rx_queues(%d)\n", nb_rxq, 708ce8d5614SIntel port->dev_info.max_rx_queues); 709ce8d5614SIntel return -1; 710ce8d5614SIntel } 711ce8d5614SIntel if (nb_txq > port->dev_info.max_tx_queues) { 712ce8d5614SIntel printf("Fail: nb_txq(%d) is greater than " 713ce8d5614SIntel "max_tx_queues(%d)\n", nb_txq, 714ce8d5614SIntel port->dev_info.max_tx_queues); 715ce8d5614SIntel return -1; 716ce8d5614SIntel } 71720a0286fSLiu Xiaofeng if (numa_support) { 71820a0286fSLiu Xiaofeng if (port_numa[pid] != NUMA_NO_CONFIG) 71920a0286fSLiu Xiaofeng port->socket_id = port_numa[pid]; 72020a0286fSLiu Xiaofeng else { 721b6ea6408SIntel port->socket_id = rte_eth_dev_socket_id(pid); 72220a0286fSLiu Xiaofeng 72320a0286fSLiu Xiaofeng /* if socket_id is invalid, set to 0 */ 72420a0286fSLiu Xiaofeng if (check_socket_id(port->socket_id) < 0) 72520a0286fSLiu Xiaofeng port->socket_id = 0; 72620a0286fSLiu Xiaofeng } 72720a0286fSLiu Xiaofeng } 728b6ea6408SIntel else { 729b6ea6408SIntel if (socket_num == UMA_NO_CONFIG) 730af75078fSIntel port->socket_id = 0; 731b6ea6408SIntel else 732b6ea6408SIntel port->socket_id = socket_num; 733b6ea6408SIntel } 734af75078fSIntel } 735af75078fSIntel 7365a8fb55cSReshma Pattan q = RTE_MAX(nb_rxq, nb_txq); 7375a8fb55cSReshma Pattan if (q == 0) { 7385a8fb55cSReshma Pattan printf("Fail: Cannot allocate fwd streams as number of queues is 0\n"); 7395a8fb55cSReshma Pattan return -1; 7405a8fb55cSReshma Pattan } 7415a8fb55cSReshma Pattan nb_fwd_streams_new = (streamid_t)(nb_ports * q); 742ce8d5614SIntel if (nb_fwd_streams_new == nb_fwd_streams) 743ce8d5614SIntel return 0; 744ce8d5614SIntel /* clear the old */ 745ce8d5614SIntel if (fwd_streams != NULL) { 746ce8d5614SIntel for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) { 747ce8d5614SIntel if (fwd_streams[sm_id] == NULL) 748ce8d5614SIntel continue; 749ce8d5614SIntel rte_free(fwd_streams[sm_id]); 750ce8d5614SIntel fwd_streams[sm_id] = NULL; 751af75078fSIntel } 752ce8d5614SIntel rte_free(fwd_streams); 753ce8d5614SIntel fwd_streams = NULL; 754ce8d5614SIntel } 755ce8d5614SIntel 756ce8d5614SIntel /* init new */ 757ce8d5614SIntel nb_fwd_streams = nb_fwd_streams_new; 758ce8d5614SIntel fwd_streams = rte_zmalloc("testpmd: fwd_streams", 759fdf20fa7SSergio Gonzalez Monroy sizeof(struct fwd_stream *) * nb_fwd_streams, RTE_CACHE_LINE_SIZE); 760ce8d5614SIntel if (fwd_streams == NULL) 761ce8d5614SIntel rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_stream *)) " 762ce8d5614SIntel "failed\n", nb_fwd_streams); 763ce8d5614SIntel 764af75078fSIntel for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) { 765af75078fSIntel fwd_streams[sm_id] = rte_zmalloc("testpmd: struct fwd_stream", 766fdf20fa7SSergio Gonzalez Monroy sizeof(struct fwd_stream), RTE_CACHE_LINE_SIZE); 767ce8d5614SIntel if (fwd_streams[sm_id] == NULL) 768ce8d5614SIntel rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_stream)" 769ce8d5614SIntel " failed\n"); 770af75078fSIntel } 771ce8d5614SIntel 772ce8d5614SIntel return 0; 773af75078fSIntel } 774af75078fSIntel 775af75078fSIntel #ifdef RTE_TEST_PMD_RECORD_BURST_STATS 776af75078fSIntel static void 777af75078fSIntel pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs) 778af75078fSIntel { 779af75078fSIntel unsigned int total_burst; 780af75078fSIntel unsigned int nb_burst; 781af75078fSIntel unsigned int burst_stats[3]; 782af75078fSIntel uint16_t pktnb_stats[3]; 783af75078fSIntel uint16_t nb_pkt; 784af75078fSIntel int burst_percent[3]; 785af75078fSIntel 786af75078fSIntel /* 787af75078fSIntel * First compute the total number of packet bursts and the 788af75078fSIntel * two highest numbers of bursts of the same number of packets. 789af75078fSIntel */ 790af75078fSIntel total_burst = 0; 791af75078fSIntel burst_stats[0] = burst_stats[1] = burst_stats[2] = 0; 792af75078fSIntel pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0; 793af75078fSIntel for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) { 794af75078fSIntel nb_burst = pbs->pkt_burst_spread[nb_pkt]; 795af75078fSIntel if (nb_burst == 0) 796af75078fSIntel continue; 797af75078fSIntel total_burst += nb_burst; 798af75078fSIntel if (nb_burst > burst_stats[0]) { 799af75078fSIntel burst_stats[1] = burst_stats[0]; 800af75078fSIntel pktnb_stats[1] = pktnb_stats[0]; 801af75078fSIntel burst_stats[0] = nb_burst; 802af75078fSIntel pktnb_stats[0] = nb_pkt; 803af75078fSIntel } 804af75078fSIntel } 805af75078fSIntel if (total_burst == 0) 806af75078fSIntel return; 807af75078fSIntel burst_percent[0] = (burst_stats[0] * 100) / total_burst; 808af75078fSIntel printf(" %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst, 809af75078fSIntel burst_percent[0], (int) pktnb_stats[0]); 810af75078fSIntel if (burst_stats[0] == total_burst) { 811af75078fSIntel printf("]\n"); 812af75078fSIntel return; 813af75078fSIntel } 814af75078fSIntel if (burst_stats[0] + burst_stats[1] == total_burst) { 815af75078fSIntel printf(" + %d%% of %d pkts]\n", 816af75078fSIntel 100 - burst_percent[0], pktnb_stats[1]); 817af75078fSIntel return; 818af75078fSIntel } 819af75078fSIntel burst_percent[1] = (burst_stats[1] * 100) / total_burst; 820af75078fSIntel burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]); 821af75078fSIntel if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) { 822af75078fSIntel printf(" + %d%% of others]\n", 100 - burst_percent[0]); 823af75078fSIntel return; 824af75078fSIntel } 825af75078fSIntel printf(" + %d%% of %d pkts + %d%% of others]\n", 826af75078fSIntel burst_percent[1], (int) pktnb_stats[1], burst_percent[2]); 827af75078fSIntel } 828af75078fSIntel #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */ 829af75078fSIntel 830af75078fSIntel static void 831af75078fSIntel fwd_port_stats_display(portid_t port_id, struct rte_eth_stats *stats) 832af75078fSIntel { 833af75078fSIntel struct rte_port *port; 834013af9b6SIntel uint8_t i; 835af75078fSIntel 836af75078fSIntel static const char *fwd_stats_border = "----------------------"; 837af75078fSIntel 838af75078fSIntel port = &ports[port_id]; 839af75078fSIntel printf("\n %s Forward statistics for port %-2d %s\n", 840af75078fSIntel fwd_stats_border, port_id, fwd_stats_border); 841013af9b6SIntel 842013af9b6SIntel if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) { 843af75078fSIntel printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: " 844af75078fSIntel "%-"PRIu64"\n", 84570bdb186SIvan Boule stats->ipackets, stats->imissed, 84670bdb186SIvan Boule (uint64_t) (stats->ipackets + stats->imissed)); 847af75078fSIntel 848af75078fSIntel if (cur_fwd_eng == &csum_fwd_engine) 849af75078fSIntel printf(" Bad-ipcsum: %-14"PRIu64" Bad-l4csum: %-14"PRIu64" \n", 850af75078fSIntel port->rx_bad_ip_csum, port->rx_bad_l4_csum); 85186057c99SIgor Ryzhov if ((stats->ierrors + stats->rx_nombuf) > 0) { 852f72a0fa6SStephen Hemminger printf(" RX-error: %-"PRIu64"\n", stats->ierrors); 85370bdb186SIvan Boule printf(" RX-nombufs: %-14"PRIu64"\n", stats->rx_nombuf); 85470bdb186SIvan Boule } 855af75078fSIntel 856af75078fSIntel printf(" TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: " 857af75078fSIntel "%-"PRIu64"\n", 858af75078fSIntel stats->opackets, port->tx_dropped, 859af75078fSIntel (uint64_t) (stats->opackets + port->tx_dropped)); 860013af9b6SIntel } 861013af9b6SIntel else { 862013af9b6SIntel printf(" RX-packets: %14"PRIu64" RX-dropped:%14"PRIu64" RX-total:" 863013af9b6SIntel "%14"PRIu64"\n", 86470bdb186SIvan Boule stats->ipackets, stats->imissed, 86570bdb186SIvan Boule (uint64_t) (stats->ipackets + stats->imissed)); 866013af9b6SIntel 867013af9b6SIntel if (cur_fwd_eng == &csum_fwd_engine) 868013af9b6SIntel printf(" Bad-ipcsum:%14"PRIu64" Bad-l4csum:%14"PRIu64"\n", 869013af9b6SIntel port->rx_bad_ip_csum, port->rx_bad_l4_csum); 87086057c99SIgor Ryzhov if ((stats->ierrors + stats->rx_nombuf) > 0) { 871f72a0fa6SStephen Hemminger printf(" RX-error:%"PRIu64"\n", stats->ierrors); 87270bdb186SIvan Boule printf(" RX-nombufs: %14"PRIu64"\n", 87370bdb186SIvan Boule stats->rx_nombuf); 87470bdb186SIvan Boule } 875013af9b6SIntel 876013af9b6SIntel printf(" TX-packets: %14"PRIu64" TX-dropped:%14"PRIu64" TX-total:" 877013af9b6SIntel "%14"PRIu64"\n", 878013af9b6SIntel stats->opackets, port->tx_dropped, 879013af9b6SIntel (uint64_t) (stats->opackets + port->tx_dropped)); 880013af9b6SIntel } 881e659b6b4SIvan Boule 882af75078fSIntel #ifdef RTE_TEST_PMD_RECORD_BURST_STATS 883af75078fSIntel if (port->rx_stream) 884013af9b6SIntel pkt_burst_stats_display("RX", 885013af9b6SIntel &port->rx_stream->rx_burst_stats); 886af75078fSIntel if (port->tx_stream) 887013af9b6SIntel pkt_burst_stats_display("TX", 888013af9b6SIntel &port->tx_stream->tx_burst_stats); 889af75078fSIntel #endif 890af75078fSIntel 891013af9b6SIntel if (port->rx_queue_stats_mapping_enabled) { 892013af9b6SIntel printf("\n"); 893013af9b6SIntel for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) { 894013af9b6SIntel printf(" Stats reg %2d RX-packets:%14"PRIu64 895013af9b6SIntel " RX-errors:%14"PRIu64 896013af9b6SIntel " RX-bytes:%14"PRIu64"\n", 897013af9b6SIntel i, stats->q_ipackets[i], stats->q_errors[i], stats->q_ibytes[i]); 898013af9b6SIntel } 899013af9b6SIntel printf("\n"); 900013af9b6SIntel } 901013af9b6SIntel if (port->tx_queue_stats_mapping_enabled) { 902013af9b6SIntel for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) { 903013af9b6SIntel printf(" Stats reg %2d TX-packets:%14"PRIu64 904013af9b6SIntel " TX-bytes:%14"PRIu64"\n", 905013af9b6SIntel i, stats->q_opackets[i], stats->q_obytes[i]); 906013af9b6SIntel } 907013af9b6SIntel } 908013af9b6SIntel 909af75078fSIntel printf(" %s--------------------------------%s\n", 910af75078fSIntel fwd_stats_border, fwd_stats_border); 911af75078fSIntel } 912af75078fSIntel 913af75078fSIntel static void 914af75078fSIntel fwd_stream_stats_display(streamid_t stream_id) 915af75078fSIntel { 916af75078fSIntel struct fwd_stream *fs; 917af75078fSIntel static const char *fwd_top_stats_border = "-------"; 918af75078fSIntel 919af75078fSIntel fs = fwd_streams[stream_id]; 920af75078fSIntel if ((fs->rx_packets == 0) && (fs->tx_packets == 0) && 921af75078fSIntel (fs->fwd_dropped == 0)) 922af75078fSIntel return; 923af75078fSIntel printf("\n %s Forward Stats for RX Port=%2d/Queue=%2d -> " 924af75078fSIntel "TX Port=%2d/Queue=%2d %s\n", 925af75078fSIntel fwd_top_stats_border, fs->rx_port, fs->rx_queue, 926af75078fSIntel fs->tx_port, fs->tx_queue, fwd_top_stats_border); 927af75078fSIntel printf(" RX-packets: %-14u TX-packets: %-14u TX-dropped: %-14u", 928af75078fSIntel fs->rx_packets, fs->tx_packets, fs->fwd_dropped); 929af75078fSIntel 930af75078fSIntel /* if checksum mode */ 931af75078fSIntel if (cur_fwd_eng == &csum_fwd_engine) { 932013af9b6SIntel printf(" RX- bad IP checksum: %-14u Rx- bad L4 checksum: " 933013af9b6SIntel "%-14u\n", fs->rx_bad_ip_csum, fs->rx_bad_l4_csum); 934af75078fSIntel } 935af75078fSIntel 936af75078fSIntel #ifdef RTE_TEST_PMD_RECORD_BURST_STATS 937af75078fSIntel pkt_burst_stats_display("RX", &fs->rx_burst_stats); 938af75078fSIntel pkt_burst_stats_display("TX", &fs->tx_burst_stats); 939af75078fSIntel #endif 940af75078fSIntel } 941af75078fSIntel 942af75078fSIntel static void 9437741e4cfSIntel flush_fwd_rx_queues(void) 944af75078fSIntel { 945af75078fSIntel struct rte_mbuf *pkts_burst[MAX_PKT_BURST]; 946af75078fSIntel portid_t rxp; 9477741e4cfSIntel portid_t port_id; 948af75078fSIntel queueid_t rxq; 949af75078fSIntel uint16_t nb_rx; 950af75078fSIntel uint16_t i; 951af75078fSIntel uint8_t j; 952f487715fSReshma Pattan uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0; 953594302c7SJames Poole uint64_t timer_period; 954f487715fSReshma Pattan 955f487715fSReshma Pattan /* convert to number of cycles */ 956594302c7SJames Poole timer_period = rte_get_timer_hz(); /* 1 second timeout */ 957af75078fSIntel 958af75078fSIntel for (j = 0; j < 2; j++) { 9597741e4cfSIntel for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) { 960af75078fSIntel for (rxq = 0; rxq < nb_rxq; rxq++) { 9617741e4cfSIntel port_id = fwd_ports_ids[rxp]; 962f487715fSReshma Pattan /** 963f487715fSReshma Pattan * testpmd can stuck in the below do while loop 964f487715fSReshma Pattan * if rte_eth_rx_burst() always returns nonzero 965f487715fSReshma Pattan * packets. So timer is added to exit this loop 966f487715fSReshma Pattan * after 1sec timer expiry. 967f487715fSReshma Pattan */ 968f487715fSReshma Pattan prev_tsc = rte_rdtsc(); 969af75078fSIntel do { 9707741e4cfSIntel nb_rx = rte_eth_rx_burst(port_id, rxq, 971013af9b6SIntel pkts_burst, MAX_PKT_BURST); 972af75078fSIntel for (i = 0; i < nb_rx; i++) 973af75078fSIntel rte_pktmbuf_free(pkts_burst[i]); 974f487715fSReshma Pattan 975f487715fSReshma Pattan cur_tsc = rte_rdtsc(); 976f487715fSReshma Pattan diff_tsc = cur_tsc - prev_tsc; 977f487715fSReshma Pattan timer_tsc += diff_tsc; 978f487715fSReshma Pattan } while ((nb_rx > 0) && 979f487715fSReshma Pattan (timer_tsc < timer_period)); 980f487715fSReshma Pattan timer_tsc = 0; 981af75078fSIntel } 982af75078fSIntel } 983af75078fSIntel rte_delay_ms(10); /* wait 10 milli-seconds before retrying */ 984af75078fSIntel } 985af75078fSIntel } 986af75078fSIntel 987af75078fSIntel static void 988af75078fSIntel run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd) 989af75078fSIntel { 990af75078fSIntel struct fwd_stream **fsm; 991af75078fSIntel streamid_t nb_fs; 992af75078fSIntel streamid_t sm_id; 9937e4441c8SRemy Horton #ifdef RTE_LIBRTE_BITRATE 9947e4441c8SRemy Horton uint64_t tics_per_1sec; 9957e4441c8SRemy Horton uint64_t tics_datum; 9967e4441c8SRemy Horton uint64_t tics_current; 9977e4441c8SRemy Horton uint8_t idx_port, cnt_ports; 998af75078fSIntel 9997e4441c8SRemy Horton cnt_ports = rte_eth_dev_count(); 10007e4441c8SRemy Horton tics_datum = rte_rdtsc(); 10017e4441c8SRemy Horton tics_per_1sec = rte_get_timer_hz(); 10027e4441c8SRemy Horton #endif 1003af75078fSIntel fsm = &fwd_streams[fc->stream_idx]; 1004af75078fSIntel nb_fs = fc->stream_nb; 1005af75078fSIntel do { 1006af75078fSIntel for (sm_id = 0; sm_id < nb_fs; sm_id++) 1007af75078fSIntel (*pkt_fwd)(fsm[sm_id]); 10087e4441c8SRemy Horton #ifdef RTE_LIBRTE_BITRATE 1009e25e6c70SRemy Horton if (bitrate_enabled != 0 && 1010e25e6c70SRemy Horton bitrate_lcore_id == rte_lcore_id()) { 10117e4441c8SRemy Horton tics_current = rte_rdtsc(); 10127e4441c8SRemy Horton if (tics_current - tics_datum >= tics_per_1sec) { 10137e4441c8SRemy Horton /* Periodic bitrate calculation */ 1014e25e6c70SRemy Horton for (idx_port = 0; 1015e25e6c70SRemy Horton idx_port < cnt_ports; 1016e25e6c70SRemy Horton idx_port++) 1017e25e6c70SRemy Horton rte_stats_bitrate_calc(bitrate_data, 1018e25e6c70SRemy Horton idx_port); 10197e4441c8SRemy Horton tics_datum = tics_current; 10207e4441c8SRemy Horton } 1021e25e6c70SRemy Horton } 10227e4441c8SRemy Horton #endif 102362d3216dSReshma Pattan #ifdef RTE_LIBRTE_LATENCY_STATS 102465eb1e54SPablo de Lara if (latencystats_enabled != 0 && 102565eb1e54SPablo de Lara latencystats_lcore_id == rte_lcore_id()) 102662d3216dSReshma Pattan rte_latencystats_update(); 102762d3216dSReshma Pattan #endif 102862d3216dSReshma Pattan 1029af75078fSIntel } while (! fc->stopped); 1030af75078fSIntel } 1031af75078fSIntel 1032af75078fSIntel static int 1033af75078fSIntel start_pkt_forward_on_core(void *fwd_arg) 1034af75078fSIntel { 1035af75078fSIntel run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg, 1036af75078fSIntel cur_fwd_config.fwd_eng->packet_fwd); 1037af75078fSIntel return 0; 1038af75078fSIntel } 1039af75078fSIntel 1040af75078fSIntel /* 1041af75078fSIntel * Run the TXONLY packet forwarding engine to send a single burst of packets. 1042af75078fSIntel * Used to start communication flows in network loopback test configurations. 1043af75078fSIntel */ 1044af75078fSIntel static int 1045af75078fSIntel run_one_txonly_burst_on_core(void *fwd_arg) 1046af75078fSIntel { 1047af75078fSIntel struct fwd_lcore *fwd_lc; 1048af75078fSIntel struct fwd_lcore tmp_lcore; 1049af75078fSIntel 1050af75078fSIntel fwd_lc = (struct fwd_lcore *) fwd_arg; 1051af75078fSIntel tmp_lcore = *fwd_lc; 1052af75078fSIntel tmp_lcore.stopped = 1; 1053af75078fSIntel run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd); 1054af75078fSIntel return 0; 1055af75078fSIntel } 1056af75078fSIntel 1057af75078fSIntel /* 1058af75078fSIntel * Launch packet forwarding: 1059af75078fSIntel * - Setup per-port forwarding context. 1060af75078fSIntel * - launch logical cores with their forwarding configuration. 1061af75078fSIntel */ 1062af75078fSIntel static void 1063af75078fSIntel launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore) 1064af75078fSIntel { 1065af75078fSIntel port_fwd_begin_t port_fwd_begin; 1066af75078fSIntel unsigned int i; 1067af75078fSIntel unsigned int lc_id; 1068af75078fSIntel int diag; 1069af75078fSIntel 1070af75078fSIntel port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin; 1071af75078fSIntel if (port_fwd_begin != NULL) { 1072af75078fSIntel for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) 1073af75078fSIntel (*port_fwd_begin)(fwd_ports_ids[i]); 1074af75078fSIntel } 1075af75078fSIntel for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) { 1076af75078fSIntel lc_id = fwd_lcores_cpuids[i]; 1077af75078fSIntel if ((interactive == 0) || (lc_id != rte_lcore_id())) { 1078af75078fSIntel fwd_lcores[i]->stopped = 0; 1079af75078fSIntel diag = rte_eal_remote_launch(pkt_fwd_on_lcore, 1080af75078fSIntel fwd_lcores[i], lc_id); 1081af75078fSIntel if (diag != 0) 1082af75078fSIntel printf("launch lcore %u failed - diag=%d\n", 1083af75078fSIntel lc_id, diag); 1084af75078fSIntel } 1085af75078fSIntel } 1086af75078fSIntel } 1087af75078fSIntel 1088af75078fSIntel /* 1089af75078fSIntel * Launch packet forwarding configuration. 1090af75078fSIntel */ 1091af75078fSIntel void 1092af75078fSIntel start_packet_forwarding(int with_tx_first) 1093af75078fSIntel { 1094af75078fSIntel port_fwd_begin_t port_fwd_begin; 1095af75078fSIntel port_fwd_end_t port_fwd_end; 1096af75078fSIntel struct rte_port *port; 1097af75078fSIntel unsigned int i; 1098af75078fSIntel portid_t pt_id; 1099af75078fSIntel streamid_t sm_id; 1100af75078fSIntel 11015a8fb55cSReshma Pattan if (strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") == 0 && !nb_rxq) 11025a8fb55cSReshma Pattan rte_exit(EXIT_FAILURE, "rxq are 0, cannot use rxonly fwd mode\n"); 11035a8fb55cSReshma Pattan 11045a8fb55cSReshma Pattan if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 && !nb_txq) 11055a8fb55cSReshma Pattan rte_exit(EXIT_FAILURE, "txq are 0, cannot use txonly fwd mode\n"); 11065a8fb55cSReshma Pattan 11075a8fb55cSReshma Pattan if ((strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") != 0 && 11085a8fb55cSReshma Pattan strcmp(cur_fwd_eng->fwd_mode_name, "txonly") != 0) && 11095a8fb55cSReshma Pattan (!nb_rxq || !nb_txq)) 11105a8fb55cSReshma Pattan rte_exit(EXIT_FAILURE, 11115a8fb55cSReshma Pattan "Either rxq or txq are 0, cannot use %s fwd mode\n", 11125a8fb55cSReshma Pattan cur_fwd_eng->fwd_mode_name); 11135a8fb55cSReshma Pattan 1114ce8d5614SIntel if (all_ports_started() == 0) { 1115ce8d5614SIntel printf("Not all ports were started\n"); 1116ce8d5614SIntel return; 1117ce8d5614SIntel } 1118af75078fSIntel if (test_done == 0) { 1119af75078fSIntel printf("Packet forwarding already started\n"); 1120af75078fSIntel return; 1121af75078fSIntel } 1122edf87b4aSBernard Iremonger 1123edf87b4aSBernard Iremonger if (init_fwd_streams() < 0) { 1124edf87b4aSBernard Iremonger printf("Fail from init_fwd_streams()\n"); 1125edf87b4aSBernard Iremonger return; 1126edf87b4aSBernard Iremonger } 1127edf87b4aSBernard Iremonger 11287741e4cfSIntel if(dcb_test) { 11297741e4cfSIntel for (i = 0; i < nb_fwd_ports; i++) { 11307741e4cfSIntel pt_id = fwd_ports_ids[i]; 11317741e4cfSIntel port = &ports[pt_id]; 11327741e4cfSIntel if (!port->dcb_flag) { 11337741e4cfSIntel printf("In DCB mode, all forwarding ports must " 11347741e4cfSIntel "be configured in this mode.\n"); 1135013af9b6SIntel return; 1136013af9b6SIntel } 11377741e4cfSIntel } 11387741e4cfSIntel if (nb_fwd_lcores == 1) { 11397741e4cfSIntel printf("In DCB mode,the nb forwarding cores " 11407741e4cfSIntel "should be larger than 1.\n"); 11417741e4cfSIntel return; 11427741e4cfSIntel } 11437741e4cfSIntel } 1144af75078fSIntel test_done = 0; 11457741e4cfSIntel 11467741e4cfSIntel if(!no_flush_rx) 11477741e4cfSIntel flush_fwd_rx_queues(); 11487741e4cfSIntel 1149af75078fSIntel fwd_config_setup(); 1150933617d8SZhihong Wang pkt_fwd_config_display(&cur_fwd_config); 1151af75078fSIntel rxtx_config_display(); 1152af75078fSIntel 1153af75078fSIntel for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) { 1154af75078fSIntel pt_id = fwd_ports_ids[i]; 1155af75078fSIntel port = &ports[pt_id]; 1156af75078fSIntel rte_eth_stats_get(pt_id, &port->stats); 1157af75078fSIntel port->tx_dropped = 0; 1158013af9b6SIntel 1159013af9b6SIntel map_port_queue_stats_mapping_registers(pt_id, port); 1160af75078fSIntel } 1161af75078fSIntel for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) { 1162af75078fSIntel fwd_streams[sm_id]->rx_packets = 0; 1163af75078fSIntel fwd_streams[sm_id]->tx_packets = 0; 1164af75078fSIntel fwd_streams[sm_id]->fwd_dropped = 0; 1165af75078fSIntel fwd_streams[sm_id]->rx_bad_ip_csum = 0; 1166af75078fSIntel fwd_streams[sm_id]->rx_bad_l4_csum = 0; 1167af75078fSIntel 1168af75078fSIntel #ifdef RTE_TEST_PMD_RECORD_BURST_STATS 1169af75078fSIntel memset(&fwd_streams[sm_id]->rx_burst_stats, 0, 1170af75078fSIntel sizeof(fwd_streams[sm_id]->rx_burst_stats)); 1171af75078fSIntel memset(&fwd_streams[sm_id]->tx_burst_stats, 0, 1172af75078fSIntel sizeof(fwd_streams[sm_id]->tx_burst_stats)); 1173af75078fSIntel #endif 1174af75078fSIntel #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES 1175af75078fSIntel fwd_streams[sm_id]->core_cycles = 0; 1176af75078fSIntel #endif 1177af75078fSIntel } 1178af75078fSIntel if (with_tx_first) { 1179af75078fSIntel port_fwd_begin = tx_only_engine.port_fwd_begin; 1180af75078fSIntel if (port_fwd_begin != NULL) { 1181af75078fSIntel for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) 1182af75078fSIntel (*port_fwd_begin)(fwd_ports_ids[i]); 1183af75078fSIntel } 1184acbf77a6SZhihong Wang while (with_tx_first--) { 1185acbf77a6SZhihong Wang launch_packet_forwarding( 1186acbf77a6SZhihong Wang run_one_txonly_burst_on_core); 1187af75078fSIntel rte_eal_mp_wait_lcore(); 1188acbf77a6SZhihong Wang } 1189af75078fSIntel port_fwd_end = tx_only_engine.port_fwd_end; 1190af75078fSIntel if (port_fwd_end != NULL) { 1191af75078fSIntel for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) 1192af75078fSIntel (*port_fwd_end)(fwd_ports_ids[i]); 1193af75078fSIntel } 1194af75078fSIntel } 1195af75078fSIntel launch_packet_forwarding(start_pkt_forward_on_core); 1196af75078fSIntel } 1197af75078fSIntel 1198af75078fSIntel void 1199af75078fSIntel stop_packet_forwarding(void) 1200af75078fSIntel { 1201af75078fSIntel struct rte_eth_stats stats; 1202af75078fSIntel struct rte_port *port; 1203af75078fSIntel port_fwd_end_t port_fwd_end; 1204af75078fSIntel int i; 1205af75078fSIntel portid_t pt_id; 1206af75078fSIntel streamid_t sm_id; 1207af75078fSIntel lcoreid_t lc_id; 1208af75078fSIntel uint64_t total_recv; 1209af75078fSIntel uint64_t total_xmit; 1210af75078fSIntel uint64_t total_rx_dropped; 1211af75078fSIntel uint64_t total_tx_dropped; 1212af75078fSIntel uint64_t total_rx_nombuf; 1213af75078fSIntel uint64_t tx_dropped; 1214af75078fSIntel uint64_t rx_bad_ip_csum; 1215af75078fSIntel uint64_t rx_bad_l4_csum; 1216af75078fSIntel #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES 1217af75078fSIntel uint64_t fwd_cycles; 1218af75078fSIntel #endif 1219b7091f1dSJiayu Hu 1220af75078fSIntel static const char *acc_stats_border = "+++++++++++++++"; 1221af75078fSIntel 1222af75078fSIntel if (test_done) { 1223af75078fSIntel printf("Packet forwarding not started\n"); 1224af75078fSIntel return; 1225af75078fSIntel } 1226af75078fSIntel printf("Telling cores to stop..."); 1227af75078fSIntel for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) 1228af75078fSIntel fwd_lcores[lc_id]->stopped = 1; 1229af75078fSIntel printf("\nWaiting for lcores to finish...\n"); 1230af75078fSIntel rte_eal_mp_wait_lcore(); 1231af75078fSIntel port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end; 1232af75078fSIntel if (port_fwd_end != NULL) { 1233af75078fSIntel for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) { 1234af75078fSIntel pt_id = fwd_ports_ids[i]; 1235af75078fSIntel (*port_fwd_end)(pt_id); 1236af75078fSIntel } 1237af75078fSIntel } 1238af75078fSIntel #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES 1239af75078fSIntel fwd_cycles = 0; 1240af75078fSIntel #endif 1241af75078fSIntel for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) { 1242af75078fSIntel if (cur_fwd_config.nb_fwd_streams > 1243af75078fSIntel cur_fwd_config.nb_fwd_ports) { 1244af75078fSIntel fwd_stream_stats_display(sm_id); 1245af75078fSIntel ports[fwd_streams[sm_id]->tx_port].tx_stream = NULL; 1246af75078fSIntel ports[fwd_streams[sm_id]->rx_port].rx_stream = NULL; 1247af75078fSIntel } else { 1248af75078fSIntel ports[fwd_streams[sm_id]->tx_port].tx_stream = 1249af75078fSIntel fwd_streams[sm_id]; 1250af75078fSIntel ports[fwd_streams[sm_id]->rx_port].rx_stream = 1251af75078fSIntel fwd_streams[sm_id]; 1252af75078fSIntel } 1253af75078fSIntel tx_dropped = ports[fwd_streams[sm_id]->tx_port].tx_dropped; 1254af75078fSIntel tx_dropped = (uint64_t) (tx_dropped + 1255af75078fSIntel fwd_streams[sm_id]->fwd_dropped); 1256af75078fSIntel ports[fwd_streams[sm_id]->tx_port].tx_dropped = tx_dropped; 1257af75078fSIntel 1258013af9b6SIntel rx_bad_ip_csum = 1259013af9b6SIntel ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum; 1260af75078fSIntel rx_bad_ip_csum = (uint64_t) (rx_bad_ip_csum + 1261af75078fSIntel fwd_streams[sm_id]->rx_bad_ip_csum); 1262013af9b6SIntel ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum = 1263013af9b6SIntel rx_bad_ip_csum; 1264af75078fSIntel 1265013af9b6SIntel rx_bad_l4_csum = 1266013af9b6SIntel ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum; 1267af75078fSIntel rx_bad_l4_csum = (uint64_t) (rx_bad_l4_csum + 1268af75078fSIntel fwd_streams[sm_id]->rx_bad_l4_csum); 1269013af9b6SIntel ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum = 1270013af9b6SIntel rx_bad_l4_csum; 1271af75078fSIntel 1272af75078fSIntel #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES 1273af75078fSIntel fwd_cycles = (uint64_t) (fwd_cycles + 1274af75078fSIntel fwd_streams[sm_id]->core_cycles); 1275af75078fSIntel #endif 1276af75078fSIntel } 1277af75078fSIntel total_recv = 0; 1278af75078fSIntel total_xmit = 0; 1279af75078fSIntel total_rx_dropped = 0; 1280af75078fSIntel total_tx_dropped = 0; 1281af75078fSIntel total_rx_nombuf = 0; 12827741e4cfSIntel for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) { 1283af75078fSIntel pt_id = fwd_ports_ids[i]; 1284af75078fSIntel 1285af75078fSIntel port = &ports[pt_id]; 1286af75078fSIntel rte_eth_stats_get(pt_id, &stats); 1287af75078fSIntel stats.ipackets -= port->stats.ipackets; 1288af75078fSIntel port->stats.ipackets = 0; 1289af75078fSIntel stats.opackets -= port->stats.opackets; 1290af75078fSIntel port->stats.opackets = 0; 1291af75078fSIntel stats.ibytes -= port->stats.ibytes; 1292af75078fSIntel port->stats.ibytes = 0; 1293af75078fSIntel stats.obytes -= port->stats.obytes; 1294af75078fSIntel port->stats.obytes = 0; 129570bdb186SIvan Boule stats.imissed -= port->stats.imissed; 129670bdb186SIvan Boule port->stats.imissed = 0; 1297af75078fSIntel stats.oerrors -= port->stats.oerrors; 1298af75078fSIntel port->stats.oerrors = 0; 1299af75078fSIntel stats.rx_nombuf -= port->stats.rx_nombuf; 1300af75078fSIntel port->stats.rx_nombuf = 0; 1301af75078fSIntel 1302af75078fSIntel total_recv += stats.ipackets; 1303af75078fSIntel total_xmit += stats.opackets; 130470bdb186SIvan Boule total_rx_dropped += stats.imissed; 1305af75078fSIntel total_tx_dropped += port->tx_dropped; 1306af75078fSIntel total_rx_nombuf += stats.rx_nombuf; 1307af75078fSIntel 1308af75078fSIntel fwd_port_stats_display(pt_id, &stats); 1309af75078fSIntel } 1310b7091f1dSJiayu Hu 1311af75078fSIntel printf("\n %s Accumulated forward statistics for all ports" 1312af75078fSIntel "%s\n", 1313af75078fSIntel acc_stats_border, acc_stats_border); 1314af75078fSIntel printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: " 1315af75078fSIntel "%-"PRIu64"\n" 1316af75078fSIntel " TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: " 1317af75078fSIntel "%-"PRIu64"\n", 1318af75078fSIntel total_recv, total_rx_dropped, total_recv + total_rx_dropped, 1319af75078fSIntel total_xmit, total_tx_dropped, total_xmit + total_tx_dropped); 1320af75078fSIntel if (total_rx_nombuf > 0) 1321af75078fSIntel printf(" RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf); 1322af75078fSIntel printf(" %s++++++++++++++++++++++++++++++++++++++++++++++" 1323af75078fSIntel "%s\n", 1324af75078fSIntel acc_stats_border, acc_stats_border); 1325af75078fSIntel #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES 1326af75078fSIntel if (total_recv > 0) 1327af75078fSIntel printf("\n CPU cycles/packet=%u (total cycles=" 1328af75078fSIntel "%"PRIu64" / total RX packets=%"PRIu64")\n", 1329af75078fSIntel (unsigned int)(fwd_cycles / total_recv), 1330af75078fSIntel fwd_cycles, total_recv); 1331af75078fSIntel #endif 1332af75078fSIntel printf("\nDone.\n"); 1333af75078fSIntel test_done = 1; 1334af75078fSIntel } 1335af75078fSIntel 1336cfae07fdSOuyang Changchun void 1337cfae07fdSOuyang Changchun dev_set_link_up(portid_t pid) 1338cfae07fdSOuyang Changchun { 1339492ab604SZhiyong Yang if (rte_eth_dev_set_link_up(pid) < 0) 1340cfae07fdSOuyang Changchun printf("\nSet link up fail.\n"); 1341cfae07fdSOuyang Changchun } 1342cfae07fdSOuyang Changchun 1343cfae07fdSOuyang Changchun void 1344cfae07fdSOuyang Changchun dev_set_link_down(portid_t pid) 1345cfae07fdSOuyang Changchun { 1346492ab604SZhiyong Yang if (rte_eth_dev_set_link_down(pid) < 0) 1347cfae07fdSOuyang Changchun printf("\nSet link down fail.\n"); 1348cfae07fdSOuyang Changchun } 1349cfae07fdSOuyang Changchun 1350ce8d5614SIntel static int 1351ce8d5614SIntel all_ports_started(void) 1352ce8d5614SIntel { 1353ce8d5614SIntel portid_t pi; 1354ce8d5614SIntel struct rte_port *port; 1355ce8d5614SIntel 13567d89b261SGaetan Rivet RTE_ETH_FOREACH_DEV(pi) { 1357ce8d5614SIntel port = &ports[pi]; 1358ce8d5614SIntel /* Check if there is a port which is not started */ 135941b05095SBernard Iremonger if ((port->port_status != RTE_PORT_STARTED) && 136041b05095SBernard Iremonger (port->slave_flag == 0)) 1361ce8d5614SIntel return 0; 1362ce8d5614SIntel } 1363ce8d5614SIntel 1364ce8d5614SIntel /* No port is not started */ 1365ce8d5614SIntel return 1; 1366ce8d5614SIntel } 1367ce8d5614SIntel 1368148f963fSBruce Richardson int 1369*6018eb8cSShahaf Shuler port_is_stopped(portid_t port_id) 1370*6018eb8cSShahaf Shuler { 1371*6018eb8cSShahaf Shuler struct rte_port *port = &ports[port_id]; 1372*6018eb8cSShahaf Shuler 1373*6018eb8cSShahaf Shuler if ((port->port_status != RTE_PORT_STOPPED) && 1374*6018eb8cSShahaf Shuler (port->slave_flag == 0)) 1375*6018eb8cSShahaf Shuler return 0; 1376*6018eb8cSShahaf Shuler return 1; 1377*6018eb8cSShahaf Shuler } 1378*6018eb8cSShahaf Shuler 1379*6018eb8cSShahaf Shuler int 1380edab33b1STetsuya Mukawa all_ports_stopped(void) 1381edab33b1STetsuya Mukawa { 1382edab33b1STetsuya Mukawa portid_t pi; 1383edab33b1STetsuya Mukawa 13847d89b261SGaetan Rivet RTE_ETH_FOREACH_DEV(pi) { 1385*6018eb8cSShahaf Shuler if (!port_is_stopped(pi)) 1386edab33b1STetsuya Mukawa return 0; 1387edab33b1STetsuya Mukawa } 1388edab33b1STetsuya Mukawa 1389edab33b1STetsuya Mukawa return 1; 1390edab33b1STetsuya Mukawa } 1391edab33b1STetsuya Mukawa 1392edab33b1STetsuya Mukawa int 1393edab33b1STetsuya Mukawa port_is_started(portid_t port_id) 1394edab33b1STetsuya Mukawa { 1395edab33b1STetsuya Mukawa if (port_id_is_invalid(port_id, ENABLED_WARN)) 1396edab33b1STetsuya Mukawa return 0; 1397edab33b1STetsuya Mukawa 1398edab33b1STetsuya Mukawa if (ports[port_id].port_status != RTE_PORT_STARTED) 1399edab33b1STetsuya Mukawa return 0; 1400edab33b1STetsuya Mukawa 1401edab33b1STetsuya Mukawa return 1; 1402edab33b1STetsuya Mukawa } 1403edab33b1STetsuya Mukawa 1404edab33b1STetsuya Mukawa static int 1405edab33b1STetsuya Mukawa port_is_closed(portid_t port_id) 1406edab33b1STetsuya Mukawa { 1407edab33b1STetsuya Mukawa if (port_id_is_invalid(port_id, ENABLED_WARN)) 1408edab33b1STetsuya Mukawa return 0; 1409edab33b1STetsuya Mukawa 1410edab33b1STetsuya Mukawa if (ports[port_id].port_status != RTE_PORT_CLOSED) 1411edab33b1STetsuya Mukawa return 0; 1412edab33b1STetsuya Mukawa 1413edab33b1STetsuya Mukawa return 1; 1414edab33b1STetsuya Mukawa } 1415edab33b1STetsuya Mukawa 1416edab33b1STetsuya Mukawa int 1417ce8d5614SIntel start_port(portid_t pid) 1418ce8d5614SIntel { 141992d2703eSMichael Qiu int diag, need_check_link_status = -1; 1420ce8d5614SIntel portid_t pi; 1421ce8d5614SIntel queueid_t qi; 1422ce8d5614SIntel struct rte_port *port; 14232950a769SDeclan Doherty struct ether_addr mac_addr; 142476ad4a2dSGaetan Rivet enum rte_eth_event_type event_type; 1425ce8d5614SIntel 14264468635fSMichael Qiu if (port_id_is_invalid(pid, ENABLED_WARN)) 14274468635fSMichael Qiu return 0; 14284468635fSMichael Qiu 1429ce8d5614SIntel if(dcb_config) 1430ce8d5614SIntel dcb_test = 1; 14317d89b261SGaetan Rivet RTE_ETH_FOREACH_DEV(pi) { 1432edab33b1STetsuya Mukawa if (pid != pi && pid != (portid_t)RTE_PORT_ALL) 1433ce8d5614SIntel continue; 1434ce8d5614SIntel 143592d2703eSMichael Qiu need_check_link_status = 0; 1436ce8d5614SIntel port = &ports[pi]; 1437ce8d5614SIntel if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED, 1438ce8d5614SIntel RTE_PORT_HANDLING) == 0) { 1439ce8d5614SIntel printf("Port %d is now not stopped\n", pi); 1440ce8d5614SIntel continue; 1441ce8d5614SIntel } 1442ce8d5614SIntel 1443ce8d5614SIntel if (port->need_reconfig > 0) { 1444ce8d5614SIntel port->need_reconfig = 0; 1445ce8d5614SIntel 14467ee3e944SVasily Philipov if (flow_isolate_all) { 14477ee3e944SVasily Philipov int ret = port_flow_isolate(pi, 1); 14487ee3e944SVasily Philipov if (ret) { 14497ee3e944SVasily Philipov printf("Failed to apply isolated" 14507ee3e944SVasily Philipov " mode on port %d\n", pi); 14517ee3e944SVasily Philipov return -1; 14527ee3e944SVasily Philipov } 14537ee3e944SVasily Philipov } 14547ee3e944SVasily Philipov 14555706de65SJulien Cretin printf("Configuring Port %d (socket %u)\n", pi, 145620a0286fSLiu Xiaofeng port->socket_id); 1457ce8d5614SIntel /* configure port */ 1458ce8d5614SIntel diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq, 1459ce8d5614SIntel &(port->dev_conf)); 1460ce8d5614SIntel if (diag != 0) { 1461ce8d5614SIntel if (rte_atomic16_cmpset(&(port->port_status), 1462ce8d5614SIntel RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0) 1463ce8d5614SIntel printf("Port %d can not be set back " 1464ce8d5614SIntel "to stopped\n", pi); 1465ce8d5614SIntel printf("Fail to configure port %d\n", pi); 1466ce8d5614SIntel /* try to reconfigure port next time */ 1467ce8d5614SIntel port->need_reconfig = 1; 1468148f963fSBruce Richardson return -1; 1469ce8d5614SIntel } 1470ce8d5614SIntel } 1471ce8d5614SIntel if (port->need_reconfig_queues > 0) { 1472ce8d5614SIntel port->need_reconfig_queues = 0; 1473ce8d5614SIntel /* setup tx queues */ 1474ce8d5614SIntel for (qi = 0; qi < nb_txq; qi++) { 1475b6ea6408SIntel if ((numa_support) && 1476b6ea6408SIntel (txring_numa[pi] != NUMA_NO_CONFIG)) 1477b6ea6408SIntel diag = rte_eth_tx_queue_setup(pi, qi, 1478b6ea6408SIntel nb_txd,txring_numa[pi], 1479b6ea6408SIntel &(port->tx_conf)); 1480b6ea6408SIntel else 1481b6ea6408SIntel diag = rte_eth_tx_queue_setup(pi, qi, 1482b6ea6408SIntel nb_txd,port->socket_id, 1483b6ea6408SIntel &(port->tx_conf)); 1484b6ea6408SIntel 1485ce8d5614SIntel if (diag == 0) 1486ce8d5614SIntel continue; 1487ce8d5614SIntel 1488ce8d5614SIntel /* Fail to setup tx queue, return */ 1489ce8d5614SIntel if (rte_atomic16_cmpset(&(port->port_status), 1490ce8d5614SIntel RTE_PORT_HANDLING, 1491ce8d5614SIntel RTE_PORT_STOPPED) == 0) 1492ce8d5614SIntel printf("Port %d can not be set back " 1493ce8d5614SIntel "to stopped\n", pi); 1494ce8d5614SIntel printf("Fail to configure port %d tx queues\n", pi); 1495ce8d5614SIntel /* try to reconfigure queues next time */ 1496ce8d5614SIntel port->need_reconfig_queues = 1; 1497148f963fSBruce Richardson return -1; 1498ce8d5614SIntel } 14990074d02fSShahaf Shuler /* Apply Rx offloads configuration */ 15000074d02fSShahaf Shuler port->rx_conf.offloads = port->dev_conf.rxmode.offloads; 1501ce8d5614SIntel /* setup rx queues */ 1502ce8d5614SIntel for (qi = 0; qi < nb_rxq; qi++) { 1503b6ea6408SIntel if ((numa_support) && 1504b6ea6408SIntel (rxring_numa[pi] != NUMA_NO_CONFIG)) { 1505b6ea6408SIntel struct rte_mempool * mp = 1506b6ea6408SIntel mbuf_pool_find(rxring_numa[pi]); 1507b6ea6408SIntel if (mp == NULL) { 1508b6ea6408SIntel printf("Failed to setup RX queue:" 1509b6ea6408SIntel "No mempool allocation" 1510b6ea6408SIntel " on the socket %d\n", 1511b6ea6408SIntel rxring_numa[pi]); 1512148f963fSBruce Richardson return -1; 1513b6ea6408SIntel } 1514b6ea6408SIntel 1515b6ea6408SIntel diag = rte_eth_rx_queue_setup(pi, qi, 1516b6ea6408SIntel nb_rxd,rxring_numa[pi], 1517b6ea6408SIntel &(port->rx_conf),mp); 15181e1d6bddSBernard Iremonger } else { 15191e1d6bddSBernard Iremonger struct rte_mempool *mp = 15201e1d6bddSBernard Iremonger mbuf_pool_find(port->socket_id); 15211e1d6bddSBernard Iremonger if (mp == NULL) { 15221e1d6bddSBernard Iremonger printf("Failed to setup RX queue:" 15231e1d6bddSBernard Iremonger "No mempool allocation" 15241e1d6bddSBernard Iremonger " on the socket %d\n", 15251e1d6bddSBernard Iremonger port->socket_id); 15261e1d6bddSBernard Iremonger return -1; 1527b6ea6408SIntel } 1528b6ea6408SIntel diag = rte_eth_rx_queue_setup(pi, qi, 1529b6ea6408SIntel nb_rxd,port->socket_id, 15301e1d6bddSBernard Iremonger &(port->rx_conf), mp); 15311e1d6bddSBernard Iremonger } 1532ce8d5614SIntel if (diag == 0) 1533ce8d5614SIntel continue; 1534ce8d5614SIntel 1535ce8d5614SIntel /* Fail to setup rx queue, return */ 1536ce8d5614SIntel if (rte_atomic16_cmpset(&(port->port_status), 1537ce8d5614SIntel RTE_PORT_HANDLING, 1538ce8d5614SIntel RTE_PORT_STOPPED) == 0) 1539ce8d5614SIntel printf("Port %d can not be set back " 1540ce8d5614SIntel "to stopped\n", pi); 1541ce8d5614SIntel printf("Fail to configure port %d rx queues\n", pi); 1542ce8d5614SIntel /* try to reconfigure queues next time */ 1543ce8d5614SIntel port->need_reconfig_queues = 1; 1544148f963fSBruce Richardson return -1; 1545ce8d5614SIntel } 1546ce8d5614SIntel } 154776ad4a2dSGaetan Rivet 154876ad4a2dSGaetan Rivet for (event_type = RTE_ETH_EVENT_UNKNOWN; 154976ad4a2dSGaetan Rivet event_type < RTE_ETH_EVENT_MAX; 155076ad4a2dSGaetan Rivet event_type++) { 155176ad4a2dSGaetan Rivet diag = rte_eth_dev_callback_register(pi, 155276ad4a2dSGaetan Rivet event_type, 155376ad4a2dSGaetan Rivet eth_event_callback, 155476ad4a2dSGaetan Rivet NULL); 155576ad4a2dSGaetan Rivet if (diag) { 155676ad4a2dSGaetan Rivet printf("Failed to setup even callback for event %d\n", 155776ad4a2dSGaetan Rivet event_type); 155876ad4a2dSGaetan Rivet return -1; 155976ad4a2dSGaetan Rivet } 156076ad4a2dSGaetan Rivet } 156176ad4a2dSGaetan Rivet 1562ce8d5614SIntel /* start port */ 1563ce8d5614SIntel if (rte_eth_dev_start(pi) < 0) { 1564ce8d5614SIntel printf("Fail to start port %d\n", pi); 1565ce8d5614SIntel 1566ce8d5614SIntel /* Fail to setup rx queue, return */ 1567ce8d5614SIntel if (rte_atomic16_cmpset(&(port->port_status), 1568ce8d5614SIntel RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0) 1569ce8d5614SIntel printf("Port %d can not be set back to " 1570ce8d5614SIntel "stopped\n", pi); 1571ce8d5614SIntel continue; 1572ce8d5614SIntel } 1573ce8d5614SIntel 1574ce8d5614SIntel if (rte_atomic16_cmpset(&(port->port_status), 1575ce8d5614SIntel RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0) 1576ce8d5614SIntel printf("Port %d can not be set into started\n", pi); 1577ce8d5614SIntel 15782950a769SDeclan Doherty rte_eth_macaddr_get(pi, &mac_addr); 1579d8c89163SZijie Pan printf("Port %d: %02X:%02X:%02X:%02X:%02X:%02X\n", pi, 15802950a769SDeclan Doherty mac_addr.addr_bytes[0], mac_addr.addr_bytes[1], 15812950a769SDeclan Doherty mac_addr.addr_bytes[2], mac_addr.addr_bytes[3], 15822950a769SDeclan Doherty mac_addr.addr_bytes[4], mac_addr.addr_bytes[5]); 1583d8c89163SZijie Pan 1584ce8d5614SIntel /* at least one port started, need checking link status */ 1585ce8d5614SIntel need_check_link_status = 1; 1586ce8d5614SIntel } 1587ce8d5614SIntel 158892d2703eSMichael Qiu if (need_check_link_status == 1 && !no_link_check) 1589edab33b1STetsuya Mukawa check_all_ports_link_status(RTE_PORT_ALL); 159092d2703eSMichael Qiu else if (need_check_link_status == 0) 1591ce8d5614SIntel printf("Please stop the ports first\n"); 1592ce8d5614SIntel 1593ce8d5614SIntel printf("Done\n"); 1594148f963fSBruce Richardson return 0; 1595ce8d5614SIntel } 1596ce8d5614SIntel 1597ce8d5614SIntel void 1598ce8d5614SIntel stop_port(portid_t pid) 1599ce8d5614SIntel { 1600ce8d5614SIntel portid_t pi; 1601ce8d5614SIntel struct rte_port *port; 1602ce8d5614SIntel int need_check_link_status = 0; 1603ce8d5614SIntel 1604ce8d5614SIntel if (dcb_test) { 1605ce8d5614SIntel dcb_test = 0; 1606ce8d5614SIntel dcb_config = 0; 1607ce8d5614SIntel } 16084468635fSMichael Qiu 16094468635fSMichael Qiu if (port_id_is_invalid(pid, ENABLED_WARN)) 16104468635fSMichael Qiu return; 16114468635fSMichael Qiu 1612ce8d5614SIntel printf("Stopping ports...\n"); 1613ce8d5614SIntel 16147d89b261SGaetan Rivet RTE_ETH_FOREACH_DEV(pi) { 16154468635fSMichael Qiu if (pid != pi && pid != (portid_t)RTE_PORT_ALL) 1616ce8d5614SIntel continue; 1617ce8d5614SIntel 1618a8ef3e3aSBernard Iremonger if (port_is_forwarding(pi) != 0 && test_done == 0) { 1619a8ef3e3aSBernard Iremonger printf("Please remove port %d from forwarding configuration.\n", pi); 1620a8ef3e3aSBernard Iremonger continue; 1621a8ef3e3aSBernard Iremonger } 1622a8ef3e3aSBernard Iremonger 16230e545d30SBernard Iremonger if (port_is_bonding_slave(pi)) { 16240e545d30SBernard Iremonger printf("Please remove port %d from bonded device.\n", pi); 16250e545d30SBernard Iremonger continue; 16260e545d30SBernard Iremonger } 16270e545d30SBernard Iremonger 1628ce8d5614SIntel port = &ports[pi]; 1629ce8d5614SIntel if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED, 1630ce8d5614SIntel RTE_PORT_HANDLING) == 0) 1631ce8d5614SIntel continue; 1632ce8d5614SIntel 1633ce8d5614SIntel rte_eth_dev_stop(pi); 1634ce8d5614SIntel 1635ce8d5614SIntel if (rte_atomic16_cmpset(&(port->port_status), 1636ce8d5614SIntel RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0) 1637ce8d5614SIntel printf("Port %d can not be set into stopped\n", pi); 1638ce8d5614SIntel need_check_link_status = 1; 1639ce8d5614SIntel } 1640bc202406SDavid Marchand if (need_check_link_status && !no_link_check) 1641edab33b1STetsuya Mukawa check_all_ports_link_status(RTE_PORT_ALL); 1642ce8d5614SIntel 1643ce8d5614SIntel printf("Done\n"); 1644ce8d5614SIntel } 1645ce8d5614SIntel 1646ce8d5614SIntel void 1647ce8d5614SIntel close_port(portid_t pid) 1648ce8d5614SIntel { 1649ce8d5614SIntel portid_t pi; 1650ce8d5614SIntel struct rte_port *port; 1651ce8d5614SIntel 16524468635fSMichael Qiu if (port_id_is_invalid(pid, ENABLED_WARN)) 16534468635fSMichael Qiu return; 16544468635fSMichael Qiu 1655ce8d5614SIntel printf("Closing ports...\n"); 1656ce8d5614SIntel 16577d89b261SGaetan Rivet RTE_ETH_FOREACH_DEV(pi) { 16584468635fSMichael Qiu if (pid != pi && pid != (portid_t)RTE_PORT_ALL) 1659ce8d5614SIntel continue; 1660ce8d5614SIntel 1661a8ef3e3aSBernard Iremonger if (port_is_forwarding(pi) != 0 && test_done == 0) { 1662a8ef3e3aSBernard Iremonger printf("Please remove port %d from forwarding configuration.\n", pi); 1663a8ef3e3aSBernard Iremonger continue; 1664a8ef3e3aSBernard Iremonger } 1665a8ef3e3aSBernard Iremonger 16660e545d30SBernard Iremonger if (port_is_bonding_slave(pi)) { 16670e545d30SBernard Iremonger printf("Please remove port %d from bonded device.\n", pi); 16680e545d30SBernard Iremonger continue; 16690e545d30SBernard Iremonger } 16700e545d30SBernard Iremonger 1671ce8d5614SIntel port = &ports[pi]; 1672ce8d5614SIntel if (rte_atomic16_cmpset(&(port->port_status), 1673d4e8ad64SMichael Qiu RTE_PORT_CLOSED, RTE_PORT_CLOSED) == 1) { 1674d4e8ad64SMichael Qiu printf("Port %d is already closed\n", pi); 1675d4e8ad64SMichael Qiu continue; 1676d4e8ad64SMichael Qiu } 1677d4e8ad64SMichael Qiu 1678d4e8ad64SMichael Qiu if (rte_atomic16_cmpset(&(port->port_status), 1679ce8d5614SIntel RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) { 1680ce8d5614SIntel printf("Port %d is now not stopped\n", pi); 1681ce8d5614SIntel continue; 1682ce8d5614SIntel } 1683ce8d5614SIntel 1684938a184aSAdrien Mazarguil if (port->flow_list) 1685938a184aSAdrien Mazarguil port_flow_flush(pi); 1686ce8d5614SIntel rte_eth_dev_close(pi); 1687ce8d5614SIntel 1688ce8d5614SIntel if (rte_atomic16_cmpset(&(port->port_status), 1689ce8d5614SIntel RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0) 1690b38bb262SPablo de Lara printf("Port %d cannot be set to closed\n", pi); 1691ce8d5614SIntel } 1692ce8d5614SIntel 1693ce8d5614SIntel printf("Done\n"); 1694ce8d5614SIntel } 1695ce8d5614SIntel 1696edab33b1STetsuya Mukawa void 169797f1e196SWei Dai reset_port(portid_t pid) 169897f1e196SWei Dai { 169997f1e196SWei Dai int diag; 170097f1e196SWei Dai portid_t pi; 170197f1e196SWei Dai struct rte_port *port; 170297f1e196SWei Dai 170397f1e196SWei Dai if (port_id_is_invalid(pid, ENABLED_WARN)) 170497f1e196SWei Dai return; 170597f1e196SWei Dai 170697f1e196SWei Dai printf("Resetting ports...\n"); 170797f1e196SWei Dai 170897f1e196SWei Dai RTE_ETH_FOREACH_DEV(pi) { 170997f1e196SWei Dai if (pid != pi && pid != (portid_t)RTE_PORT_ALL) 171097f1e196SWei Dai continue; 171197f1e196SWei Dai 171297f1e196SWei Dai if (port_is_forwarding(pi) != 0 && test_done == 0) { 171397f1e196SWei Dai printf("Please remove port %d from forwarding " 171497f1e196SWei Dai "configuration.\n", pi); 171597f1e196SWei Dai continue; 171697f1e196SWei Dai } 171797f1e196SWei Dai 171897f1e196SWei Dai if (port_is_bonding_slave(pi)) { 171997f1e196SWei Dai printf("Please remove port %d from bonded device.\n", 172097f1e196SWei Dai pi); 172197f1e196SWei Dai continue; 172297f1e196SWei Dai } 172397f1e196SWei Dai 172497f1e196SWei Dai diag = rte_eth_dev_reset(pi); 172597f1e196SWei Dai if (diag == 0) { 172697f1e196SWei Dai port = &ports[pi]; 172797f1e196SWei Dai port->need_reconfig = 1; 172897f1e196SWei Dai port->need_reconfig_queues = 1; 172997f1e196SWei Dai } else { 173097f1e196SWei Dai printf("Failed to reset port %d. diag=%d\n", pi, diag); 173197f1e196SWei Dai } 173297f1e196SWei Dai } 173397f1e196SWei Dai 173497f1e196SWei Dai printf("Done\n"); 173597f1e196SWei Dai } 173697f1e196SWei Dai 173797f1e196SWei Dai void 1738edab33b1STetsuya Mukawa attach_port(char *identifier) 1739ce8d5614SIntel { 1740ebf5e9b7SBernard Iremonger portid_t pi = 0; 1741931126baSBernard Iremonger unsigned int socket_id; 1742ce8d5614SIntel 1743edab33b1STetsuya Mukawa printf("Attaching a new port...\n"); 1744edab33b1STetsuya Mukawa 1745edab33b1STetsuya Mukawa if (identifier == NULL) { 1746edab33b1STetsuya Mukawa printf("Invalid parameters are specified\n"); 1747edab33b1STetsuya Mukawa return; 1748ce8d5614SIntel } 1749ce8d5614SIntel 1750edab33b1STetsuya Mukawa if (rte_eth_dev_attach(identifier, &pi)) 1751edab33b1STetsuya Mukawa return; 1752edab33b1STetsuya Mukawa 1753931126baSBernard Iremonger socket_id = (unsigned)rte_eth_dev_socket_id(pi); 1754931126baSBernard Iremonger /* if socket_id is invalid, set to 0 */ 1755931126baSBernard Iremonger if (check_socket_id(socket_id) < 0) 1756931126baSBernard Iremonger socket_id = 0; 1757931126baSBernard Iremonger reconfig(pi, socket_id); 1758edab33b1STetsuya Mukawa rte_eth_promiscuous_enable(pi); 1759edab33b1STetsuya Mukawa 1760edab33b1STetsuya Mukawa nb_ports = rte_eth_dev_count(); 1761edab33b1STetsuya Mukawa 1762edab33b1STetsuya Mukawa ports[pi].port_status = RTE_PORT_STOPPED; 1763edab33b1STetsuya Mukawa 1764edab33b1STetsuya Mukawa printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports); 1765edab33b1STetsuya Mukawa printf("Done\n"); 1766edab33b1STetsuya Mukawa } 1767edab33b1STetsuya Mukawa 1768edab33b1STetsuya Mukawa void 176928caa76aSZhiyong Yang detach_port(portid_t port_id) 17705f4ec54fSChen Jing D(Mark) { 1771edab33b1STetsuya Mukawa char name[RTE_ETH_NAME_MAX_LEN]; 17725f4ec54fSChen Jing D(Mark) 1773edab33b1STetsuya Mukawa printf("Detaching a port...\n"); 17745f4ec54fSChen Jing D(Mark) 1775edab33b1STetsuya Mukawa if (!port_is_closed(port_id)) { 1776edab33b1STetsuya Mukawa printf("Please close port first\n"); 1777edab33b1STetsuya Mukawa return; 1778edab33b1STetsuya Mukawa } 1779edab33b1STetsuya Mukawa 1780938a184aSAdrien Mazarguil if (ports[port_id].flow_list) 1781938a184aSAdrien Mazarguil port_flow_flush(port_id); 1782938a184aSAdrien Mazarguil 17833070419eSGaetan Rivet if (rte_eth_dev_detach(port_id, name)) { 1784285fd101SOlivier Matz TESTPMD_LOG(ERR, "Failed to detach port '%s'\n", name); 1785edab33b1STetsuya Mukawa return; 17863070419eSGaetan Rivet } 1787edab33b1STetsuya Mukawa 1788edab33b1STetsuya Mukawa nb_ports = rte_eth_dev_count(); 1789edab33b1STetsuya Mukawa 1790edab33b1STetsuya Mukawa printf("Port '%s' is detached. Now total ports is %d\n", 1791edab33b1STetsuya Mukawa name, nb_ports); 1792edab33b1STetsuya Mukawa printf("Done\n"); 1793edab33b1STetsuya Mukawa return; 17945f4ec54fSChen Jing D(Mark) } 17955f4ec54fSChen Jing D(Mark) 1796af75078fSIntel void 1797af75078fSIntel pmd_test_exit(void) 1798af75078fSIntel { 1799af75078fSIntel portid_t pt_id; 1800af75078fSIntel 18018210ec25SPablo de Lara if (test_done == 0) 18028210ec25SPablo de Lara stop_packet_forwarding(); 18038210ec25SPablo de Lara 1804d3a274ceSZhihong Wang if (ports != NULL) { 1805d3a274ceSZhihong Wang no_link_check = 1; 18067d89b261SGaetan Rivet RTE_ETH_FOREACH_DEV(pt_id) { 1807d3a274ceSZhihong Wang printf("\nShutting down port %d...\n", pt_id); 1808af75078fSIntel fflush(stdout); 1809d3a274ceSZhihong Wang stop_port(pt_id); 1810d3a274ceSZhihong Wang close_port(pt_id); 1811af75078fSIntel } 1812d3a274ceSZhihong Wang } 1813d3a274ceSZhihong Wang printf("\nBye...\n"); 1814af75078fSIntel } 1815af75078fSIntel 1816af75078fSIntel typedef void (*cmd_func_t)(void); 1817af75078fSIntel struct pmd_test_command { 1818af75078fSIntel const char *cmd_name; 1819af75078fSIntel cmd_func_t cmd_func; 1820af75078fSIntel }; 1821af75078fSIntel 1822af75078fSIntel #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0])) 1823af75078fSIntel 1824ce8d5614SIntel /* Check the link status of all ports in up to 9s, and print them finally */ 1825af75078fSIntel static void 1826edab33b1STetsuya Mukawa check_all_ports_link_status(uint32_t port_mask) 1827af75078fSIntel { 1828ce8d5614SIntel #define CHECK_INTERVAL 100 /* 100ms */ 1829ce8d5614SIntel #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */ 1830f8244c63SZhiyong Yang portid_t portid; 1831f8244c63SZhiyong Yang uint8_t count, all_ports_up, print_flag = 0; 1832ce8d5614SIntel struct rte_eth_link link; 1833ce8d5614SIntel 1834ce8d5614SIntel printf("Checking link statuses...\n"); 1835ce8d5614SIntel fflush(stdout); 1836ce8d5614SIntel for (count = 0; count <= MAX_CHECK_TIME; count++) { 1837ce8d5614SIntel all_ports_up = 1; 18387d89b261SGaetan Rivet RTE_ETH_FOREACH_DEV(portid) { 1839ce8d5614SIntel if ((port_mask & (1 << portid)) == 0) 1840ce8d5614SIntel continue; 1841ce8d5614SIntel memset(&link, 0, sizeof(link)); 1842ce8d5614SIntel rte_eth_link_get_nowait(portid, &link); 1843ce8d5614SIntel /* print link status if flag set */ 1844ce8d5614SIntel if (print_flag == 1) { 1845ce8d5614SIntel if (link.link_status) 1846f8244c63SZhiyong Yang printf( 1847f8244c63SZhiyong Yang "Port%d Link Up. speed %u Mbps- %s\n", 1848f8244c63SZhiyong Yang portid, link.link_speed, 1849ce8d5614SIntel (link.link_duplex == ETH_LINK_FULL_DUPLEX) ? 1850ce8d5614SIntel ("full-duplex") : ("half-duplex\n")); 1851ce8d5614SIntel else 1852f8244c63SZhiyong Yang printf("Port %d Link Down\n", portid); 1853ce8d5614SIntel continue; 1854ce8d5614SIntel } 1855ce8d5614SIntel /* clear all_ports_up flag if any link down */ 185609419f23SThomas Monjalon if (link.link_status == ETH_LINK_DOWN) { 1857ce8d5614SIntel all_ports_up = 0; 1858ce8d5614SIntel break; 1859ce8d5614SIntel } 1860ce8d5614SIntel } 1861ce8d5614SIntel /* after finally printing all link status, get out */ 1862ce8d5614SIntel if (print_flag == 1) 1863ce8d5614SIntel break; 1864ce8d5614SIntel 1865ce8d5614SIntel if (all_ports_up == 0) { 1866ce8d5614SIntel fflush(stdout); 1867ce8d5614SIntel rte_delay_ms(CHECK_INTERVAL); 1868ce8d5614SIntel } 1869ce8d5614SIntel 1870ce8d5614SIntel /* set the print_flag if all ports up or timeout */ 1871ce8d5614SIntel if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) { 1872ce8d5614SIntel print_flag = 1; 1873ce8d5614SIntel } 18748ea656f8SGaetan Rivet 18758ea656f8SGaetan Rivet if (lsc_interrupt) 18768ea656f8SGaetan Rivet break; 1877ce8d5614SIntel } 1878af75078fSIntel } 1879af75078fSIntel 1880284c908cSGaetan Rivet static void 1881284c908cSGaetan Rivet rmv_event_callback(void *arg) 1882284c908cSGaetan Rivet { 1883284c908cSGaetan Rivet struct rte_eth_dev *dev; 188428caa76aSZhiyong Yang portid_t port_id = (intptr_t)arg; 1885284c908cSGaetan Rivet 1886284c908cSGaetan Rivet RTE_ETH_VALID_PORTID_OR_RET(port_id); 1887284c908cSGaetan Rivet dev = &rte_eth_devices[port_id]; 1888284c908cSGaetan Rivet 1889284c908cSGaetan Rivet stop_port(port_id); 1890284c908cSGaetan Rivet close_port(port_id); 1891f3a1188cSGaetan Rivet printf("removing device %s\n", dev->device->name); 18923070419eSGaetan Rivet if (rte_eal_dev_detach(dev->device)) 1893285fd101SOlivier Matz TESTPMD_LOG(ERR, "Failed to detach device %s\n", 18943070419eSGaetan Rivet dev->device->name); 1895284c908cSGaetan Rivet } 1896284c908cSGaetan Rivet 189776ad4a2dSGaetan Rivet /* This function is used by the interrupt thread */ 1898d6af1a13SBernard Iremonger static int 1899f8244c63SZhiyong Yang eth_event_callback(portid_t port_id, enum rte_eth_event_type type, void *param, 1900d6af1a13SBernard Iremonger void *ret_param) 190176ad4a2dSGaetan Rivet { 190276ad4a2dSGaetan Rivet static const char * const event_desc[] = { 190376ad4a2dSGaetan Rivet [RTE_ETH_EVENT_UNKNOWN] = "Unknown", 190476ad4a2dSGaetan Rivet [RTE_ETH_EVENT_INTR_LSC] = "LSC", 190576ad4a2dSGaetan Rivet [RTE_ETH_EVENT_QUEUE_STATE] = "Queue state", 190676ad4a2dSGaetan Rivet [RTE_ETH_EVENT_INTR_RESET] = "Interrupt reset", 190776ad4a2dSGaetan Rivet [RTE_ETH_EVENT_VF_MBOX] = "VF Mbox", 190876ad4a2dSGaetan Rivet [RTE_ETH_EVENT_MACSEC] = "MACsec", 190976ad4a2dSGaetan Rivet [RTE_ETH_EVENT_INTR_RMV] = "device removal", 191076ad4a2dSGaetan Rivet [RTE_ETH_EVENT_MAX] = NULL, 191176ad4a2dSGaetan Rivet }; 191276ad4a2dSGaetan Rivet 191376ad4a2dSGaetan Rivet RTE_SET_USED(param); 1914d6af1a13SBernard Iremonger RTE_SET_USED(ret_param); 191576ad4a2dSGaetan Rivet 191676ad4a2dSGaetan Rivet if (type >= RTE_ETH_EVENT_MAX) { 191776ad4a2dSGaetan Rivet fprintf(stderr, "\nPort %" PRIu8 ": %s called upon invalid event %d\n", 191876ad4a2dSGaetan Rivet port_id, __func__, type); 191976ad4a2dSGaetan Rivet fflush(stderr); 19203af72783SGaetan Rivet } else if (event_print_mask & (UINT32_C(1) << type)) { 192176ad4a2dSGaetan Rivet printf("\nPort %" PRIu8 ": %s event\n", port_id, 192276ad4a2dSGaetan Rivet event_desc[type]); 192376ad4a2dSGaetan Rivet fflush(stdout); 192476ad4a2dSGaetan Rivet } 1925284c908cSGaetan Rivet 1926284c908cSGaetan Rivet switch (type) { 1927284c908cSGaetan Rivet case RTE_ETH_EVENT_INTR_RMV: 1928284c908cSGaetan Rivet if (rte_eal_alarm_set(100000, 1929284c908cSGaetan Rivet rmv_event_callback, (void *)(intptr_t)port_id)) 1930284c908cSGaetan Rivet fprintf(stderr, "Could not set up deferred device removal\n"); 1931284c908cSGaetan Rivet break; 1932284c908cSGaetan Rivet default: 1933284c908cSGaetan Rivet break; 1934284c908cSGaetan Rivet } 1935d6af1a13SBernard Iremonger return 0; 193676ad4a2dSGaetan Rivet } 193776ad4a2dSGaetan Rivet 1938013af9b6SIntel static int 193928caa76aSZhiyong Yang set_tx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port) 1940af75078fSIntel { 1941013af9b6SIntel uint16_t i; 1942af75078fSIntel int diag; 1943013af9b6SIntel uint8_t mapping_found = 0; 1944af75078fSIntel 1945013af9b6SIntel for (i = 0; i < nb_tx_queue_stats_mappings; i++) { 1946013af9b6SIntel if ((tx_queue_stats_mappings[i].port_id == port_id) && 1947013af9b6SIntel (tx_queue_stats_mappings[i].queue_id < nb_txq )) { 1948013af9b6SIntel diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id, 1949013af9b6SIntel tx_queue_stats_mappings[i].queue_id, 1950013af9b6SIntel tx_queue_stats_mappings[i].stats_counter_id); 1951013af9b6SIntel if (diag != 0) 1952013af9b6SIntel return diag; 1953013af9b6SIntel mapping_found = 1; 1954af75078fSIntel } 1955013af9b6SIntel } 1956013af9b6SIntel if (mapping_found) 1957013af9b6SIntel port->tx_queue_stats_mapping_enabled = 1; 1958013af9b6SIntel return 0; 1959013af9b6SIntel } 1960013af9b6SIntel 1961013af9b6SIntel static int 196228caa76aSZhiyong Yang set_rx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port) 1963013af9b6SIntel { 1964013af9b6SIntel uint16_t i; 1965013af9b6SIntel int diag; 1966013af9b6SIntel uint8_t mapping_found = 0; 1967013af9b6SIntel 1968013af9b6SIntel for (i = 0; i < nb_rx_queue_stats_mappings; i++) { 1969013af9b6SIntel if ((rx_queue_stats_mappings[i].port_id == port_id) && 1970013af9b6SIntel (rx_queue_stats_mappings[i].queue_id < nb_rxq )) { 1971013af9b6SIntel diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id, 1972013af9b6SIntel rx_queue_stats_mappings[i].queue_id, 1973013af9b6SIntel rx_queue_stats_mappings[i].stats_counter_id); 1974013af9b6SIntel if (diag != 0) 1975013af9b6SIntel return diag; 1976013af9b6SIntel mapping_found = 1; 1977013af9b6SIntel } 1978013af9b6SIntel } 1979013af9b6SIntel if (mapping_found) 1980013af9b6SIntel port->rx_queue_stats_mapping_enabled = 1; 1981013af9b6SIntel return 0; 1982013af9b6SIntel } 1983013af9b6SIntel 1984013af9b6SIntel static void 198528caa76aSZhiyong Yang map_port_queue_stats_mapping_registers(portid_t pi, struct rte_port *port) 1986013af9b6SIntel { 1987013af9b6SIntel int diag = 0; 1988013af9b6SIntel 1989013af9b6SIntel diag = set_tx_queue_stats_mapping_registers(pi, port); 1990af75078fSIntel if (diag != 0) { 1991013af9b6SIntel if (diag == -ENOTSUP) { 1992013af9b6SIntel port->tx_queue_stats_mapping_enabled = 0; 1993013af9b6SIntel printf("TX queue stats mapping not supported port id=%d\n", pi); 1994013af9b6SIntel } 1995013af9b6SIntel else 1996013af9b6SIntel rte_exit(EXIT_FAILURE, 1997013af9b6SIntel "set_tx_queue_stats_mapping_registers " 1998013af9b6SIntel "failed for port id=%d diag=%d\n", 1999af75078fSIntel pi, diag); 2000af75078fSIntel } 2001013af9b6SIntel 2002013af9b6SIntel diag = set_rx_queue_stats_mapping_registers(pi, port); 2003af75078fSIntel if (diag != 0) { 2004013af9b6SIntel if (diag == -ENOTSUP) { 2005013af9b6SIntel port->rx_queue_stats_mapping_enabled = 0; 2006013af9b6SIntel printf("RX queue stats mapping not supported port id=%d\n", pi); 2007013af9b6SIntel } 2008013af9b6SIntel else 2009013af9b6SIntel rte_exit(EXIT_FAILURE, 2010013af9b6SIntel "set_rx_queue_stats_mapping_registers " 2011013af9b6SIntel "failed for port id=%d diag=%d\n", 2012af75078fSIntel pi, diag); 2013af75078fSIntel } 2014af75078fSIntel } 2015af75078fSIntel 2016f2c5125aSPablo de Lara static void 2017f2c5125aSPablo de Lara rxtx_port_config(struct rte_port *port) 2018f2c5125aSPablo de Lara { 2019f2c5125aSPablo de Lara port->rx_conf = port->dev_info.default_rxconf; 2020f2c5125aSPablo de Lara port->tx_conf = port->dev_info.default_txconf; 2021f2c5125aSPablo de Lara 2022f2c5125aSPablo de Lara /* Check if any RX/TX parameters have been passed */ 2023f2c5125aSPablo de Lara if (rx_pthresh != RTE_PMD_PARAM_UNSET) 2024f2c5125aSPablo de Lara port->rx_conf.rx_thresh.pthresh = rx_pthresh; 2025f2c5125aSPablo de Lara 2026f2c5125aSPablo de Lara if (rx_hthresh != RTE_PMD_PARAM_UNSET) 2027f2c5125aSPablo de Lara port->rx_conf.rx_thresh.hthresh = rx_hthresh; 2028f2c5125aSPablo de Lara 2029f2c5125aSPablo de Lara if (rx_wthresh != RTE_PMD_PARAM_UNSET) 2030f2c5125aSPablo de Lara port->rx_conf.rx_thresh.wthresh = rx_wthresh; 2031f2c5125aSPablo de Lara 2032f2c5125aSPablo de Lara if (rx_free_thresh != RTE_PMD_PARAM_UNSET) 2033f2c5125aSPablo de Lara port->rx_conf.rx_free_thresh = rx_free_thresh; 2034f2c5125aSPablo de Lara 2035f2c5125aSPablo de Lara if (rx_drop_en != RTE_PMD_PARAM_UNSET) 2036f2c5125aSPablo de Lara port->rx_conf.rx_drop_en = rx_drop_en; 2037f2c5125aSPablo de Lara 2038f2c5125aSPablo de Lara if (tx_pthresh != RTE_PMD_PARAM_UNSET) 2039f2c5125aSPablo de Lara port->tx_conf.tx_thresh.pthresh = tx_pthresh; 2040f2c5125aSPablo de Lara 2041f2c5125aSPablo de Lara if (tx_hthresh != RTE_PMD_PARAM_UNSET) 2042f2c5125aSPablo de Lara port->tx_conf.tx_thresh.hthresh = tx_hthresh; 2043f2c5125aSPablo de Lara 2044f2c5125aSPablo de Lara if (tx_wthresh != RTE_PMD_PARAM_UNSET) 2045f2c5125aSPablo de Lara port->tx_conf.tx_thresh.wthresh = tx_wthresh; 2046f2c5125aSPablo de Lara 2047f2c5125aSPablo de Lara if (tx_rs_thresh != RTE_PMD_PARAM_UNSET) 2048f2c5125aSPablo de Lara port->tx_conf.tx_rs_thresh = tx_rs_thresh; 2049f2c5125aSPablo de Lara 2050f2c5125aSPablo de Lara if (tx_free_thresh != RTE_PMD_PARAM_UNSET) 2051f2c5125aSPablo de Lara port->tx_conf.tx_free_thresh = tx_free_thresh; 2052f2c5125aSPablo de Lara 2053f2c5125aSPablo de Lara if (txq_flags != RTE_PMD_PARAM_UNSET) 2054f2c5125aSPablo de Lara port->tx_conf.txq_flags = txq_flags; 2055f2c5125aSPablo de Lara } 2056f2c5125aSPablo de Lara 2057013af9b6SIntel void 2058013af9b6SIntel init_port_config(void) 2059013af9b6SIntel { 2060013af9b6SIntel portid_t pid; 2061013af9b6SIntel struct rte_port *port; 2062013af9b6SIntel 20637d89b261SGaetan Rivet RTE_ETH_FOREACH_DEV(pid) { 2064013af9b6SIntel port = &ports[pid]; 2065013af9b6SIntel port->dev_conf.rxmode = rx_mode; 2066013af9b6SIntel port->dev_conf.fdir_conf = fdir_conf; 20673ce690d3SBruce Richardson if (nb_rxq > 1) { 2068013af9b6SIntel port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL; 2069013af9b6SIntel port->dev_conf.rx_adv_conf.rss_conf.rss_hf = rss_hf; 2070af75078fSIntel } else { 2071013af9b6SIntel port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL; 2072013af9b6SIntel port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0; 2073af75078fSIntel } 20743ce690d3SBruce Richardson 20755f592039SJingjing Wu if (port->dcb_flag == 0) { 20763ce690d3SBruce Richardson if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0) 20773ce690d3SBruce Richardson port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_RSS; 20783ce690d3SBruce Richardson else 20793ce690d3SBruce Richardson port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE; 20803ce690d3SBruce Richardson } 20813ce690d3SBruce Richardson 2082f2c5125aSPablo de Lara rxtx_port_config(port); 2083013af9b6SIntel 2084013af9b6SIntel rte_eth_macaddr_get(pid, &port->eth_addr); 2085013af9b6SIntel 2086013af9b6SIntel map_port_queue_stats_mapping_registers(pid, port); 208750c4440eSThomas Monjalon #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS 2088e261265eSRadu Nicolau rte_pmd_ixgbe_bypass_init(pid); 20897b7e5ba7SIntel #endif 20908ea656f8SGaetan Rivet 20918ea656f8SGaetan Rivet if (lsc_interrupt && 20928ea656f8SGaetan Rivet (rte_eth_devices[pid].data->dev_flags & 20938ea656f8SGaetan Rivet RTE_ETH_DEV_INTR_LSC)) 20948ea656f8SGaetan Rivet port->dev_conf.intr_conf.lsc = 1; 2095284c908cSGaetan Rivet if (rmv_interrupt && 2096284c908cSGaetan Rivet (rte_eth_devices[pid].data->dev_flags & 2097284c908cSGaetan Rivet RTE_ETH_DEV_INTR_RMV)) 2098284c908cSGaetan Rivet port->dev_conf.intr_conf.rmv = 1; 20995b590fbeSJasvinder Singh 21005b590fbeSJasvinder Singh #if defined RTE_LIBRTE_PMD_SOFTNIC && defined RTE_LIBRTE_SCHED 21015b590fbeSJasvinder Singh /* Detect softnic port */ 21025b590fbeSJasvinder Singh if (!strcmp(port->dev_info.driver_name, "net_softnic")) { 21035b590fbeSJasvinder Singh port->softnic_enable = 1; 21045b590fbeSJasvinder Singh memset(&port->softport, 0, sizeof(struct softnic_port)); 21055b590fbeSJasvinder Singh 21065b590fbeSJasvinder Singh if (!strcmp(cur_fwd_eng->fwd_mode_name, "tm")) 21075b590fbeSJasvinder Singh port->softport.tm_flag = 1; 21085b590fbeSJasvinder Singh } 21095b590fbeSJasvinder Singh #endif 2110013af9b6SIntel } 2111013af9b6SIntel } 2112013af9b6SIntel 211341b05095SBernard Iremonger void set_port_slave_flag(portid_t slave_pid) 211441b05095SBernard Iremonger { 211541b05095SBernard Iremonger struct rte_port *port; 211641b05095SBernard Iremonger 211741b05095SBernard Iremonger port = &ports[slave_pid]; 211841b05095SBernard Iremonger port->slave_flag = 1; 211941b05095SBernard Iremonger } 212041b05095SBernard Iremonger 212141b05095SBernard Iremonger void clear_port_slave_flag(portid_t slave_pid) 212241b05095SBernard Iremonger { 212341b05095SBernard Iremonger struct rte_port *port; 212441b05095SBernard Iremonger 212541b05095SBernard Iremonger port = &ports[slave_pid]; 212641b05095SBernard Iremonger port->slave_flag = 0; 212741b05095SBernard Iremonger } 212841b05095SBernard Iremonger 21290e545d30SBernard Iremonger uint8_t port_is_bonding_slave(portid_t slave_pid) 21300e545d30SBernard Iremonger { 21310e545d30SBernard Iremonger struct rte_port *port; 21320e545d30SBernard Iremonger 21330e545d30SBernard Iremonger port = &ports[slave_pid]; 21340e545d30SBernard Iremonger return port->slave_flag; 21350e545d30SBernard Iremonger } 21360e545d30SBernard Iremonger 2137013af9b6SIntel const uint16_t vlan_tags[] = { 2138013af9b6SIntel 0, 1, 2, 3, 4, 5, 6, 7, 2139013af9b6SIntel 8, 9, 10, 11, 12, 13, 14, 15, 2140013af9b6SIntel 16, 17, 18, 19, 20, 21, 22, 23, 2141013af9b6SIntel 24, 25, 26, 27, 28, 29, 30, 31 2142013af9b6SIntel }; 2143013af9b6SIntel 2144013af9b6SIntel static int 21451a572499SJingjing Wu get_eth_dcb_conf(struct rte_eth_conf *eth_conf, 21461a572499SJingjing Wu enum dcb_mode_enable dcb_mode, 21471a572499SJingjing Wu enum rte_eth_nb_tcs num_tcs, 21481a572499SJingjing Wu uint8_t pfc_en) 2149013af9b6SIntel { 2150013af9b6SIntel uint8_t i; 2151af75078fSIntel 2152af75078fSIntel /* 2153013af9b6SIntel * Builds up the correct configuration for dcb+vt based on the vlan tags array 2154013af9b6SIntel * given above, and the number of traffic classes available for use. 2155af75078fSIntel */ 21561a572499SJingjing Wu if (dcb_mode == DCB_VT_ENABLED) { 21571a572499SJingjing Wu struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf = 21581a572499SJingjing Wu ð_conf->rx_adv_conf.vmdq_dcb_conf; 21591a572499SJingjing Wu struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf = 21601a572499SJingjing Wu ð_conf->tx_adv_conf.vmdq_dcb_tx_conf; 2161013af9b6SIntel 2162547d946cSNirmoy Das /* VMDQ+DCB RX and TX configurations */ 21631a572499SJingjing Wu vmdq_rx_conf->enable_default_pool = 0; 21641a572499SJingjing Wu vmdq_rx_conf->default_pool = 0; 21651a572499SJingjing Wu vmdq_rx_conf->nb_queue_pools = 21661a572499SJingjing Wu (num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS); 21671a572499SJingjing Wu vmdq_tx_conf->nb_queue_pools = 21681a572499SJingjing Wu (num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS); 2169013af9b6SIntel 21701a572499SJingjing Wu vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools; 21711a572499SJingjing Wu for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) { 21721a572499SJingjing Wu vmdq_rx_conf->pool_map[i].vlan_id = vlan_tags[i]; 21731a572499SJingjing Wu vmdq_rx_conf->pool_map[i].pools = 21741a572499SJingjing Wu 1 << (i % vmdq_rx_conf->nb_queue_pools); 2175af75078fSIntel } 2176013af9b6SIntel for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) { 2177f59908feSWei Dai vmdq_rx_conf->dcb_tc[i] = i % num_tcs; 2178f59908feSWei Dai vmdq_tx_conf->dcb_tc[i] = i % num_tcs; 2179013af9b6SIntel } 2180013af9b6SIntel 2181013af9b6SIntel /* set DCB mode of RX and TX of multiple queues */ 218232e7aa0bSIntel eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB; 218332e7aa0bSIntel eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB; 21841a572499SJingjing Wu } else { 21851a572499SJingjing Wu struct rte_eth_dcb_rx_conf *rx_conf = 21861a572499SJingjing Wu ð_conf->rx_adv_conf.dcb_rx_conf; 21871a572499SJingjing Wu struct rte_eth_dcb_tx_conf *tx_conf = 21881a572499SJingjing Wu ð_conf->tx_adv_conf.dcb_tx_conf; 2189013af9b6SIntel 21901a572499SJingjing Wu rx_conf->nb_tcs = num_tcs; 21911a572499SJingjing Wu tx_conf->nb_tcs = num_tcs; 21921a572499SJingjing Wu 2193bcd0e432SJingjing Wu for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) { 2194bcd0e432SJingjing Wu rx_conf->dcb_tc[i] = i % num_tcs; 2195bcd0e432SJingjing Wu tx_conf->dcb_tc[i] = i % num_tcs; 2196013af9b6SIntel } 21971a572499SJingjing Wu eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB_RSS; 21981a572499SJingjing Wu eth_conf->rx_adv_conf.rss_conf.rss_hf = rss_hf; 219932e7aa0bSIntel eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB; 22001a572499SJingjing Wu } 22011a572499SJingjing Wu 22021a572499SJingjing Wu if (pfc_en) 22031a572499SJingjing Wu eth_conf->dcb_capability_en = 22041a572499SJingjing Wu ETH_DCB_PG_SUPPORT | ETH_DCB_PFC_SUPPORT; 2205013af9b6SIntel else 2206013af9b6SIntel eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT; 2207013af9b6SIntel 2208013af9b6SIntel return 0; 2209013af9b6SIntel } 2210013af9b6SIntel 2211013af9b6SIntel int 22121a572499SJingjing Wu init_port_dcb_config(portid_t pid, 22131a572499SJingjing Wu enum dcb_mode_enable dcb_mode, 22141a572499SJingjing Wu enum rte_eth_nb_tcs num_tcs, 22151a572499SJingjing Wu uint8_t pfc_en) 2216013af9b6SIntel { 2217013af9b6SIntel struct rte_eth_conf port_conf; 2218013af9b6SIntel struct rte_port *rte_port; 2219013af9b6SIntel int retval; 2220013af9b6SIntel uint16_t i; 2221013af9b6SIntel 22222a977b89SWenzhuo Lu rte_port = &ports[pid]; 2223013af9b6SIntel 2224013af9b6SIntel memset(&port_conf, 0, sizeof(struct rte_eth_conf)); 2225013af9b6SIntel /* Enter DCB configuration status */ 2226013af9b6SIntel dcb_config = 1; 2227013af9b6SIntel 2228013af9b6SIntel /*set configuration of DCB in vt mode and DCB in non-vt mode*/ 22291a572499SJingjing Wu retval = get_eth_dcb_conf(&port_conf, dcb_mode, num_tcs, pfc_en); 2230013af9b6SIntel if (retval < 0) 2231013af9b6SIntel return retval; 22320074d02fSShahaf Shuler port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER; 2233013af9b6SIntel 22342a977b89SWenzhuo Lu /** 22352a977b89SWenzhuo Lu * Write the configuration into the device. 22362a977b89SWenzhuo Lu * Set the numbers of RX & TX queues to 0, so 22372a977b89SWenzhuo Lu * the RX & TX queues will not be setup. 22382a977b89SWenzhuo Lu */ 2239c947ef89SStephen Hemminger rte_eth_dev_configure(pid, 0, 0, &port_conf); 22402a977b89SWenzhuo Lu 22412a977b89SWenzhuo Lu rte_eth_dev_info_get(pid, &rte_port->dev_info); 22422a977b89SWenzhuo Lu 22432a977b89SWenzhuo Lu /* If dev_info.vmdq_pool_base is greater than 0, 22442a977b89SWenzhuo Lu * the queue id of vmdq pools is started after pf queues. 22452a977b89SWenzhuo Lu */ 22462a977b89SWenzhuo Lu if (dcb_mode == DCB_VT_ENABLED && 22472a977b89SWenzhuo Lu rte_port->dev_info.vmdq_pool_base > 0) { 22482a977b89SWenzhuo Lu printf("VMDQ_DCB multi-queue mode is nonsensical" 22492a977b89SWenzhuo Lu " for port %d.", pid); 22502a977b89SWenzhuo Lu return -1; 22512a977b89SWenzhuo Lu } 22522a977b89SWenzhuo Lu 22532a977b89SWenzhuo Lu /* Assume the ports in testpmd have the same dcb capability 22542a977b89SWenzhuo Lu * and has the same number of rxq and txq in dcb mode 22552a977b89SWenzhuo Lu */ 22562a977b89SWenzhuo Lu if (dcb_mode == DCB_VT_ENABLED) { 225786ef65eeSBernard Iremonger if (rte_port->dev_info.max_vfs > 0) { 225886ef65eeSBernard Iremonger nb_rxq = rte_port->dev_info.nb_rx_queues; 225986ef65eeSBernard Iremonger nb_txq = rte_port->dev_info.nb_tx_queues; 226086ef65eeSBernard Iremonger } else { 22612a977b89SWenzhuo Lu nb_rxq = rte_port->dev_info.max_rx_queues; 22622a977b89SWenzhuo Lu nb_txq = rte_port->dev_info.max_tx_queues; 226386ef65eeSBernard Iremonger } 22642a977b89SWenzhuo Lu } else { 22652a977b89SWenzhuo Lu /*if vt is disabled, use all pf queues */ 22662a977b89SWenzhuo Lu if (rte_port->dev_info.vmdq_pool_base == 0) { 22672a977b89SWenzhuo Lu nb_rxq = rte_port->dev_info.max_rx_queues; 22682a977b89SWenzhuo Lu nb_txq = rte_port->dev_info.max_tx_queues; 22692a977b89SWenzhuo Lu } else { 22702a977b89SWenzhuo Lu nb_rxq = (queueid_t)num_tcs; 22712a977b89SWenzhuo Lu nb_txq = (queueid_t)num_tcs; 22722a977b89SWenzhuo Lu 22732a977b89SWenzhuo Lu } 22742a977b89SWenzhuo Lu } 22752a977b89SWenzhuo Lu rx_free_thresh = 64; 22762a977b89SWenzhuo Lu 2277013af9b6SIntel memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf)); 2278013af9b6SIntel 2279f2c5125aSPablo de Lara rxtx_port_config(rte_port); 2280013af9b6SIntel /* VLAN filter */ 22810074d02fSShahaf Shuler rte_port->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER; 22821a572499SJingjing Wu for (i = 0; i < RTE_DIM(vlan_tags); i++) 2283013af9b6SIntel rx_vft_set(pid, vlan_tags[i], 1); 2284013af9b6SIntel 2285013af9b6SIntel rte_eth_macaddr_get(pid, &rte_port->eth_addr); 2286013af9b6SIntel map_port_queue_stats_mapping_registers(pid, rte_port); 2287013af9b6SIntel 22887741e4cfSIntel rte_port->dcb_flag = 1; 22897741e4cfSIntel 2290013af9b6SIntel return 0; 2291af75078fSIntel } 2292af75078fSIntel 2293ffc468ffSTetsuya Mukawa static void 2294ffc468ffSTetsuya Mukawa init_port(void) 2295ffc468ffSTetsuya Mukawa { 2296ffc468ffSTetsuya Mukawa /* Configuration of Ethernet ports. */ 2297ffc468ffSTetsuya Mukawa ports = rte_zmalloc("testpmd: ports", 2298ffc468ffSTetsuya Mukawa sizeof(struct rte_port) * RTE_MAX_ETHPORTS, 2299ffc468ffSTetsuya Mukawa RTE_CACHE_LINE_SIZE); 2300ffc468ffSTetsuya Mukawa if (ports == NULL) { 2301ffc468ffSTetsuya Mukawa rte_exit(EXIT_FAILURE, 2302ffc468ffSTetsuya Mukawa "rte_zmalloc(%d struct rte_port) failed\n", 2303ffc468ffSTetsuya Mukawa RTE_MAX_ETHPORTS); 2304ffc468ffSTetsuya Mukawa } 2305ffc468ffSTetsuya Mukawa } 2306ffc468ffSTetsuya Mukawa 2307d3a274ceSZhihong Wang static void 2308d3a274ceSZhihong Wang force_quit(void) 2309d3a274ceSZhihong Wang { 2310d3a274ceSZhihong Wang pmd_test_exit(); 2311d3a274ceSZhihong Wang prompt_exit(); 2312d3a274ceSZhihong Wang } 2313d3a274ceSZhihong Wang 2314d3a274ceSZhihong Wang static void 2315cfea1f30SPablo de Lara print_stats(void) 2316cfea1f30SPablo de Lara { 2317cfea1f30SPablo de Lara uint8_t i; 2318cfea1f30SPablo de Lara const char clr[] = { 27, '[', '2', 'J', '\0' }; 2319cfea1f30SPablo de Lara const char top_left[] = { 27, '[', '1', ';', '1', 'H', '\0' }; 2320cfea1f30SPablo de Lara 2321cfea1f30SPablo de Lara /* Clear screen and move to top left */ 2322cfea1f30SPablo de Lara printf("%s%s", clr, top_left); 2323cfea1f30SPablo de Lara 2324cfea1f30SPablo de Lara printf("\nPort statistics ===================================="); 2325cfea1f30SPablo de Lara for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) 2326cfea1f30SPablo de Lara nic_stats_display(fwd_ports_ids[i]); 2327cfea1f30SPablo de Lara } 2328cfea1f30SPablo de Lara 2329cfea1f30SPablo de Lara static void 2330d3a274ceSZhihong Wang signal_handler(int signum) 2331d3a274ceSZhihong Wang { 2332d3a274ceSZhihong Wang if (signum == SIGINT || signum == SIGTERM) { 2333d3a274ceSZhihong Wang printf("\nSignal %d received, preparing to exit...\n", 2334d3a274ceSZhihong Wang signum); 2335102b7329SReshma Pattan #ifdef RTE_LIBRTE_PDUMP 2336102b7329SReshma Pattan /* uninitialize packet capture framework */ 2337102b7329SReshma Pattan rte_pdump_uninit(); 2338102b7329SReshma Pattan #endif 233962d3216dSReshma Pattan #ifdef RTE_LIBRTE_LATENCY_STATS 234062d3216dSReshma Pattan rte_latencystats_uninit(); 234162d3216dSReshma Pattan #endif 2342d3a274ceSZhihong Wang force_quit(); 2343d9a191a0SPhil Yang /* Set flag to indicate the force termination. */ 2344d9a191a0SPhil Yang f_quit = 1; 2345d3a274ceSZhihong Wang /* exit with the expected status */ 2346d3a274ceSZhihong Wang signal(signum, SIG_DFL); 2347d3a274ceSZhihong Wang kill(getpid(), signum); 2348d3a274ceSZhihong Wang } 2349d3a274ceSZhihong Wang } 2350d3a274ceSZhihong Wang 2351af75078fSIntel int 2352af75078fSIntel main(int argc, char** argv) 2353af75078fSIntel { 2354af75078fSIntel int diag; 2355f8244c63SZhiyong Yang portid_t port_id; 2356af75078fSIntel 2357d3a274ceSZhihong Wang signal(SIGINT, signal_handler); 2358d3a274ceSZhihong Wang signal(SIGTERM, signal_handler); 2359d3a274ceSZhihong Wang 2360af75078fSIntel diag = rte_eal_init(argc, argv); 2361af75078fSIntel if (diag < 0) 2362af75078fSIntel rte_panic("Cannot init EAL\n"); 2363af75078fSIntel 2364285fd101SOlivier Matz testpmd_logtype = rte_log_register("testpmd"); 2365285fd101SOlivier Matz if (testpmd_logtype < 0) 2366285fd101SOlivier Matz rte_panic("Cannot register log type"); 2367285fd101SOlivier Matz rte_log_set_level(testpmd_logtype, RTE_LOG_DEBUG); 2368285fd101SOlivier Matz 23691c036b16SEelco Chaudron if (mlockall(MCL_CURRENT | MCL_FUTURE)) { 2370285fd101SOlivier Matz TESTPMD_LOG(NOTICE, "mlockall() failed with error \"%s\"\n", 23711c036b16SEelco Chaudron strerror(errno)); 23721c036b16SEelco Chaudron } 23731c036b16SEelco Chaudron 2374102b7329SReshma Pattan #ifdef RTE_LIBRTE_PDUMP 2375102b7329SReshma Pattan /* initialize packet capture framework */ 2376102b7329SReshma Pattan rte_pdump_init(NULL); 2377102b7329SReshma Pattan #endif 2378102b7329SReshma Pattan 2379af75078fSIntel nb_ports = (portid_t) rte_eth_dev_count(); 2380af75078fSIntel if (nb_ports == 0) 2381285fd101SOlivier Matz TESTPMD_LOG(WARNING, "No probed ethernet devices\n"); 2382af75078fSIntel 2383ffc468ffSTetsuya Mukawa /* allocate port structures, and init them */ 2384ffc468ffSTetsuya Mukawa init_port(); 2385ffc468ffSTetsuya Mukawa 2386af75078fSIntel set_def_fwd_config(); 2387af75078fSIntel if (nb_lcores == 0) 2388af75078fSIntel rte_panic("Empty set of forwarding logical cores - check the " 2389af75078fSIntel "core mask supplied in the command parameters\n"); 2390af75078fSIntel 239165eb1e54SPablo de Lara /* Bitrate/latency stats disabled by default */ 239230bcc68cSPablo de Lara #ifdef RTE_LIBRTE_BITRATE 2393e25e6c70SRemy Horton bitrate_enabled = 0; 239430bcc68cSPablo de Lara #endif 239565eb1e54SPablo de Lara #ifdef RTE_LIBRTE_LATENCY_STATS 239665eb1e54SPablo de Lara latencystats_enabled = 0; 239765eb1e54SPablo de Lara #endif 2398e25e6c70SRemy Horton 2399af75078fSIntel argc -= diag; 2400af75078fSIntel argv += diag; 2401af75078fSIntel if (argc > 1) 2402af75078fSIntel launch_args_parse(argc, argv); 2403af75078fSIntel 240499cabef0SPablo de Lara if (tx_first && interactive) 240599cabef0SPablo de Lara rte_exit(EXIT_FAILURE, "--tx-first cannot be used on " 240699cabef0SPablo de Lara "interactive mode.\n"); 24078820cba4SDavid Hunt 24088820cba4SDavid Hunt if (tx_first && lsc_interrupt) { 24098820cba4SDavid Hunt printf("Warning: lsc_interrupt needs to be off when " 24108820cba4SDavid Hunt " using tx_first. Disabling.\n"); 24118820cba4SDavid Hunt lsc_interrupt = 0; 24128820cba4SDavid Hunt } 24138820cba4SDavid Hunt 24145a8fb55cSReshma Pattan if (!nb_rxq && !nb_txq) 24155a8fb55cSReshma Pattan printf("Warning: Either rx or tx queues should be non-zero\n"); 24165a8fb55cSReshma Pattan 24175a8fb55cSReshma Pattan if (nb_rxq > 1 && nb_rxq > nb_txq) 2418af75078fSIntel printf("Warning: nb_rxq=%d enables RSS configuration, " 2419af75078fSIntel "but nb_txq=%d will prevent to fully test it.\n", 2420af75078fSIntel nb_rxq, nb_txq); 2421af75078fSIntel 2422af75078fSIntel init_config(); 2423148f963fSBruce Richardson if (start_port(RTE_PORT_ALL) != 0) 2424148f963fSBruce Richardson rte_exit(EXIT_FAILURE, "Start ports failed\n"); 2425af75078fSIntel 2426ce8d5614SIntel /* set all ports to promiscuous mode by default */ 24277d89b261SGaetan Rivet RTE_ETH_FOREACH_DEV(port_id) 2428ce8d5614SIntel rte_eth_promiscuous_enable(port_id); 2429af75078fSIntel 24307e4441c8SRemy Horton /* Init metrics library */ 24317e4441c8SRemy Horton rte_metrics_init(rte_socket_id()); 24327e4441c8SRemy Horton 243362d3216dSReshma Pattan #ifdef RTE_LIBRTE_LATENCY_STATS 243462d3216dSReshma Pattan if (latencystats_enabled != 0) { 243562d3216dSReshma Pattan int ret = rte_latencystats_init(1, NULL); 243662d3216dSReshma Pattan if (ret) 243762d3216dSReshma Pattan printf("Warning: latencystats init()" 243862d3216dSReshma Pattan " returned error %d\n", ret); 243962d3216dSReshma Pattan printf("Latencystats running on lcore %d\n", 244062d3216dSReshma Pattan latencystats_lcore_id); 244162d3216dSReshma Pattan } 244262d3216dSReshma Pattan #endif 244362d3216dSReshma Pattan 24447e4441c8SRemy Horton /* Setup bitrate stats */ 24457e4441c8SRemy Horton #ifdef RTE_LIBRTE_BITRATE 2446e25e6c70SRemy Horton if (bitrate_enabled != 0) { 24477e4441c8SRemy Horton bitrate_data = rte_stats_bitrate_create(); 24487e4441c8SRemy Horton if (bitrate_data == NULL) 2449e25e6c70SRemy Horton rte_exit(EXIT_FAILURE, 2450e25e6c70SRemy Horton "Could not allocate bitrate data.\n"); 24517e4441c8SRemy Horton rte_stats_bitrate_reg(bitrate_data); 2452e25e6c70SRemy Horton } 24537e4441c8SRemy Horton #endif 24547e4441c8SRemy Horton 24550d56cb81SThomas Monjalon #ifdef RTE_LIBRTE_CMDLINE 245681ef862bSAllain Legacy if (strlen(cmdline_filename) != 0) 245781ef862bSAllain Legacy cmdline_read_from_file(cmdline_filename); 245881ef862bSAllain Legacy 2459ca7feb22SCyril Chemparathy if (interactive == 1) { 2460ca7feb22SCyril Chemparathy if (auto_start) { 2461ca7feb22SCyril Chemparathy printf("Start automatic packet forwarding\n"); 2462ca7feb22SCyril Chemparathy start_packet_forwarding(0); 2463ca7feb22SCyril Chemparathy } 2464af75078fSIntel prompt(); 24650de738cfSJiayu Hu pmd_test_exit(); 2466ca7feb22SCyril Chemparathy } else 24670d56cb81SThomas Monjalon #endif 24680d56cb81SThomas Monjalon { 2469af75078fSIntel char c; 2470af75078fSIntel int rc; 2471af75078fSIntel 2472d9a191a0SPhil Yang f_quit = 0; 2473d9a191a0SPhil Yang 2474af75078fSIntel printf("No commandline core given, start packet forwarding\n"); 247599cabef0SPablo de Lara start_packet_forwarding(tx_first); 2476cfea1f30SPablo de Lara if (stats_period != 0) { 2477cfea1f30SPablo de Lara uint64_t prev_time = 0, cur_time, diff_time = 0; 2478cfea1f30SPablo de Lara uint64_t timer_period; 2479cfea1f30SPablo de Lara 2480cfea1f30SPablo de Lara /* Convert to number of cycles */ 2481cfea1f30SPablo de Lara timer_period = stats_period * rte_get_timer_hz(); 2482cfea1f30SPablo de Lara 2483d9a191a0SPhil Yang while (f_quit == 0) { 2484cfea1f30SPablo de Lara cur_time = rte_get_timer_cycles(); 2485cfea1f30SPablo de Lara diff_time += cur_time - prev_time; 2486cfea1f30SPablo de Lara 2487cfea1f30SPablo de Lara if (diff_time >= timer_period) { 2488cfea1f30SPablo de Lara print_stats(); 2489cfea1f30SPablo de Lara /* Reset the timer */ 2490cfea1f30SPablo de Lara diff_time = 0; 2491cfea1f30SPablo de Lara } 2492cfea1f30SPablo de Lara /* Sleep to avoid unnecessary checks */ 2493cfea1f30SPablo de Lara prev_time = cur_time; 2494cfea1f30SPablo de Lara sleep(1); 2495cfea1f30SPablo de Lara } 2496cfea1f30SPablo de Lara } 2497cfea1f30SPablo de Lara 2498af75078fSIntel printf("Press enter to exit\n"); 2499af75078fSIntel rc = read(0, &c, 1); 2500d3a274ceSZhihong Wang pmd_test_exit(); 2501af75078fSIntel if (rc < 0) 2502af75078fSIntel return 1; 2503af75078fSIntel } 2504af75078fSIntel 2505af75078fSIntel return 0; 2506af75078fSIntel } 2507