1174a1631SBruce Richardson /* SPDX-License-Identifier: BSD-3-Clause 2174a1631SBruce Richardson * Copyright(c) 2010-2017 Intel Corporation 3af75078fSIntel */ 4af75078fSIntel 5af75078fSIntel #include <stdarg.h> 6af75078fSIntel #include <stdio.h> 7af75078fSIntel #include <stdlib.h> 8af75078fSIntel #include <signal.h> 9af75078fSIntel #include <string.h> 10af75078fSIntel #include <time.h> 11af75078fSIntel #include <fcntl.h> 121c036b16SEelco Chaudron #include <sys/mman.h> 13af75078fSIntel #include <sys/types.h> 14af75078fSIntel #include <errno.h> 15fb73e096SJeff Guo #include <stdbool.h> 16af75078fSIntel 17af75078fSIntel #include <sys/queue.h> 18af75078fSIntel #include <sys/stat.h> 19af75078fSIntel 20af75078fSIntel #include <stdint.h> 21af75078fSIntel #include <unistd.h> 22af75078fSIntel #include <inttypes.h> 23af75078fSIntel 24af75078fSIntel #include <rte_common.h> 25d1eb542eSOlivier Matz #include <rte_errno.h> 26af75078fSIntel #include <rte_byteorder.h> 27af75078fSIntel #include <rte_log.h> 28af75078fSIntel #include <rte_debug.h> 29af75078fSIntel #include <rte_cycles.h> 30af75078fSIntel #include <rte_memory.h> 31af75078fSIntel #include <rte_memcpy.h> 32af75078fSIntel #include <rte_launch.h> 33af75078fSIntel #include <rte_eal.h> 34284c908cSGaetan Rivet #include <rte_alarm.h> 35af75078fSIntel #include <rte_per_lcore.h> 36af75078fSIntel #include <rte_lcore.h> 37af75078fSIntel #include <rte_atomic.h> 38af75078fSIntel #include <rte_branch_prediction.h> 39af75078fSIntel #include <rte_mempool.h> 40af75078fSIntel #include <rte_malloc.h> 41af75078fSIntel #include <rte_mbuf.h> 420e798567SPavan Nikhilesh #include <rte_mbuf_pool_ops.h> 43af75078fSIntel #include <rte_interrupts.h> 44af75078fSIntel #include <rte_pci.h> 45af75078fSIntel #include <rte_ether.h> 46af75078fSIntel #include <rte_ethdev.h> 47edab33b1STetsuya Mukawa #include <rte_dev.h> 48af75078fSIntel #include <rte_string_fns.h> 49e261265eSRadu Nicolau #ifdef RTE_LIBRTE_IXGBE_PMD 50e261265eSRadu Nicolau #include <rte_pmd_ixgbe.h> 51e261265eSRadu Nicolau #endif 52102b7329SReshma Pattan #ifdef RTE_LIBRTE_PDUMP 53102b7329SReshma Pattan #include <rte_pdump.h> 54102b7329SReshma Pattan #endif 55938a184aSAdrien Mazarguil #include <rte_flow.h> 567e4441c8SRemy Horton #include <rte_metrics.h> 577e4441c8SRemy Horton #ifdef RTE_LIBRTE_BITRATE 587e4441c8SRemy Horton #include <rte_bitrate.h> 597e4441c8SRemy Horton #endif 6062d3216dSReshma Pattan #ifdef RTE_LIBRTE_LATENCY_STATS 6162d3216dSReshma Pattan #include <rte_latencystats.h> 6262d3216dSReshma Pattan #endif 63af75078fSIntel 64af75078fSIntel #include "testpmd.h" 65af75078fSIntel 66af75078fSIntel uint16_t verbose_level = 0; /**< Silent by default. */ 67285fd101SOlivier Matz int testpmd_logtype; /**< Log type for testpmd logs */ 68af75078fSIntel 69af75078fSIntel /* use master core for command line ? */ 70af75078fSIntel uint8_t interactive = 0; 71ca7feb22SCyril Chemparathy uint8_t auto_start = 0; 7299cabef0SPablo de Lara uint8_t tx_first; 7381ef862bSAllain Legacy char cmdline_filename[PATH_MAX] = {0}; 74af75078fSIntel 75af75078fSIntel /* 76af75078fSIntel * NUMA support configuration. 77af75078fSIntel * When set, the NUMA support attempts to dispatch the allocation of the 78af75078fSIntel * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the 79af75078fSIntel * probed ports among the CPU sockets 0 and 1. 80af75078fSIntel * Otherwise, all memory is allocated from CPU socket 0. 81af75078fSIntel */ 82999b2ee0SBruce Richardson uint8_t numa_support = 1; /**< numa enabled by default */ 83af75078fSIntel 84af75078fSIntel /* 85b6ea6408SIntel * In UMA mode,all memory is allocated from socket 0 if --socket-num is 86b6ea6408SIntel * not configured. 87b6ea6408SIntel */ 88b6ea6408SIntel uint8_t socket_num = UMA_NO_CONFIG; 89b6ea6408SIntel 90b6ea6408SIntel /* 91148f963fSBruce Richardson * Use ANONYMOUS mapped memory (might be not physically continuous) for mbufs. 92148f963fSBruce Richardson */ 93148f963fSBruce Richardson uint8_t mp_anon = 0; 94148f963fSBruce Richardson 95148f963fSBruce Richardson /* 9663531389SGeorgios Katsikas * Store specified sockets on which memory pool to be used by ports 9763531389SGeorgios Katsikas * is allocated. 9863531389SGeorgios Katsikas */ 9963531389SGeorgios Katsikas uint8_t port_numa[RTE_MAX_ETHPORTS]; 10063531389SGeorgios Katsikas 10163531389SGeorgios Katsikas /* 10263531389SGeorgios Katsikas * Store specified sockets on which RX ring to be used by ports 10363531389SGeorgios Katsikas * is allocated. 10463531389SGeorgios Katsikas */ 10563531389SGeorgios Katsikas uint8_t rxring_numa[RTE_MAX_ETHPORTS]; 10663531389SGeorgios Katsikas 10763531389SGeorgios Katsikas /* 10863531389SGeorgios Katsikas * Store specified sockets on which TX ring to be used by ports 10963531389SGeorgios Katsikas * is allocated. 11063531389SGeorgios Katsikas */ 11163531389SGeorgios Katsikas uint8_t txring_numa[RTE_MAX_ETHPORTS]; 11263531389SGeorgios Katsikas 11363531389SGeorgios Katsikas /* 114af75078fSIntel * Record the Ethernet address of peer target ports to which packets are 115af75078fSIntel * forwarded. 116547d946cSNirmoy Das * Must be instantiated with the ethernet addresses of peer traffic generator 117af75078fSIntel * ports. 118af75078fSIntel */ 119af75078fSIntel struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS]; 120af75078fSIntel portid_t nb_peer_eth_addrs = 0; 121af75078fSIntel 122af75078fSIntel /* 123af75078fSIntel * Probed Target Environment. 124af75078fSIntel */ 125af75078fSIntel struct rte_port *ports; /**< For all probed ethernet ports. */ 126af75078fSIntel portid_t nb_ports; /**< Number of probed ethernet ports. */ 127af75078fSIntel struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */ 128af75078fSIntel lcoreid_t nb_lcores; /**< Number of probed logical cores. */ 129af75078fSIntel 130af75078fSIntel /* 131af75078fSIntel * Test Forwarding Configuration. 132af75078fSIntel * nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores 133af75078fSIntel * nb_fwd_ports <= nb_cfg_ports <= nb_ports 134af75078fSIntel */ 135af75078fSIntel lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */ 136af75078fSIntel lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */ 137af75078fSIntel portid_t nb_cfg_ports; /**< Number of configured ports. */ 138af75078fSIntel portid_t nb_fwd_ports; /**< Number of forwarding ports. */ 139af75078fSIntel 140af75078fSIntel unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */ 141af75078fSIntel portid_t fwd_ports_ids[RTE_MAX_ETHPORTS]; /**< Port ids configuration. */ 142af75078fSIntel 143af75078fSIntel struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */ 144af75078fSIntel streamid_t nb_fwd_streams; /**< Is equal to (nb_ports * nb_rxq). */ 145af75078fSIntel 146af75078fSIntel /* 147af75078fSIntel * Forwarding engines. 148af75078fSIntel */ 149af75078fSIntel struct fwd_engine * fwd_engines[] = { 150af75078fSIntel &io_fwd_engine, 151af75078fSIntel &mac_fwd_engine, 152d47388f1SCyril Chemparathy &mac_swap_engine, 153e9e23a61SCyril Chemparathy &flow_gen_engine, 154af75078fSIntel &rx_only_engine, 155af75078fSIntel &tx_only_engine, 156af75078fSIntel &csum_fwd_engine, 157168dfa61SIvan Boule &icmp_echo_engine, 1585b590fbeSJasvinder Singh #if defined RTE_LIBRTE_PMD_SOFTNIC && defined RTE_LIBRTE_SCHED 1595b590fbeSJasvinder Singh &softnic_tm_engine, 1605b590fbeSJasvinder Singh &softnic_tm_bypass_engine, 1615b590fbeSJasvinder Singh #endif 162af75078fSIntel #ifdef RTE_LIBRTE_IEEE1588 163af75078fSIntel &ieee1588_fwd_engine, 164af75078fSIntel #endif 165af75078fSIntel NULL, 166af75078fSIntel }; 167af75078fSIntel 168af75078fSIntel struct fwd_config cur_fwd_config; 169af75078fSIntel struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */ 170bf56fce1SZhihong Wang uint32_t retry_enabled; 171bf56fce1SZhihong Wang uint32_t burst_tx_delay_time = BURST_TX_WAIT_US; 172bf56fce1SZhihong Wang uint32_t burst_tx_retry_num = BURST_TX_RETRIES; 173af75078fSIntel 174af75078fSIntel uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */ 175c8798818SIntel uint32_t param_total_num_mbufs = 0; /**< number of mbufs in all pools - if 176c8798818SIntel * specified on command-line. */ 177cfea1f30SPablo de Lara uint16_t stats_period; /**< Period to show statistics (disabled by default) */ 178d9a191a0SPhil Yang 179d9a191a0SPhil Yang /* 180d9a191a0SPhil Yang * In container, it cannot terminate the process which running with 'stats-period' 181d9a191a0SPhil Yang * option. Set flag to exit stats period loop after received SIGINT/SIGTERM. 182d9a191a0SPhil Yang */ 183d9a191a0SPhil Yang uint8_t f_quit; 184d9a191a0SPhil Yang 185af75078fSIntel /* 186af75078fSIntel * Configuration of packet segments used by the "txonly" processing engine. 187af75078fSIntel */ 188af75078fSIntel uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */ 189af75078fSIntel uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = { 190af75078fSIntel TXONLY_DEF_PACKET_LEN, 191af75078fSIntel }; 192af75078fSIntel uint8_t tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */ 193af75078fSIntel 19479bec05bSKonstantin Ananyev enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF; 19579bec05bSKonstantin Ananyev /**< Split policy for packets to TX. */ 19679bec05bSKonstantin Ananyev 197af75078fSIntel uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */ 198e9378bbcSCunming Liang uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */ 199af75078fSIntel 200900550deSIntel /* current configuration is in DCB or not,0 means it is not in DCB mode */ 201900550deSIntel uint8_t dcb_config = 0; 202900550deSIntel 203900550deSIntel /* Whether the dcb is in testing status */ 204900550deSIntel uint8_t dcb_test = 0; 205900550deSIntel 206af75078fSIntel /* 207af75078fSIntel * Configurable number of RX/TX queues. 208af75078fSIntel */ 209af75078fSIntel queueid_t nb_rxq = 1; /**< Number of RX queues per port. */ 210af75078fSIntel queueid_t nb_txq = 1; /**< Number of TX queues per port. */ 211af75078fSIntel 212af75078fSIntel /* 213af75078fSIntel * Configurable number of RX/TX ring descriptors. 2148599ed31SRemy Horton * Defaults are supplied by drivers via ethdev. 215af75078fSIntel */ 2168599ed31SRemy Horton #define RTE_TEST_RX_DESC_DEFAULT 0 2178599ed31SRemy Horton #define RTE_TEST_TX_DESC_DEFAULT 0 218af75078fSIntel uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */ 219af75078fSIntel uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */ 220af75078fSIntel 221f2c5125aSPablo de Lara #define RTE_PMD_PARAM_UNSET -1 222af75078fSIntel /* 223af75078fSIntel * Configurable values of RX and TX ring threshold registers. 224af75078fSIntel */ 225af75078fSIntel 226f2c5125aSPablo de Lara int8_t rx_pthresh = RTE_PMD_PARAM_UNSET; 227f2c5125aSPablo de Lara int8_t rx_hthresh = RTE_PMD_PARAM_UNSET; 228f2c5125aSPablo de Lara int8_t rx_wthresh = RTE_PMD_PARAM_UNSET; 229af75078fSIntel 230f2c5125aSPablo de Lara int8_t tx_pthresh = RTE_PMD_PARAM_UNSET; 231f2c5125aSPablo de Lara int8_t tx_hthresh = RTE_PMD_PARAM_UNSET; 232f2c5125aSPablo de Lara int8_t tx_wthresh = RTE_PMD_PARAM_UNSET; 233af75078fSIntel 234af75078fSIntel /* 235af75078fSIntel * Configurable value of RX free threshold. 236af75078fSIntel */ 237f2c5125aSPablo de Lara int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET; 238af75078fSIntel 239af75078fSIntel /* 240ce8d5614SIntel * Configurable value of RX drop enable. 241ce8d5614SIntel */ 242f2c5125aSPablo de Lara int8_t rx_drop_en = RTE_PMD_PARAM_UNSET; 243ce8d5614SIntel 244ce8d5614SIntel /* 245af75078fSIntel * Configurable value of TX free threshold. 246af75078fSIntel */ 247f2c5125aSPablo de Lara int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET; 248af75078fSIntel 249af75078fSIntel /* 250af75078fSIntel * Configurable value of TX RS bit threshold. 251af75078fSIntel */ 252f2c5125aSPablo de Lara int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET; 253af75078fSIntel 254af75078fSIntel /* 255af75078fSIntel * Receive Side Scaling (RSS) configuration. 256af75078fSIntel */ 2578a387fa8SHelin Zhang uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */ 258af75078fSIntel 259af75078fSIntel /* 260af75078fSIntel * Port topology configuration 261af75078fSIntel */ 262af75078fSIntel uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */ 263af75078fSIntel 2647741e4cfSIntel /* 2657741e4cfSIntel * Avoids to flush all the RX streams before starts forwarding. 2667741e4cfSIntel */ 2677741e4cfSIntel uint8_t no_flush_rx = 0; /* flush by default */ 2687741e4cfSIntel 269af75078fSIntel /* 2707ee3e944SVasily Philipov * Flow API isolated mode. 2717ee3e944SVasily Philipov */ 2727ee3e944SVasily Philipov uint8_t flow_isolate_all; 2737ee3e944SVasily Philipov 2747ee3e944SVasily Philipov /* 275bc202406SDavid Marchand * Avoids to check link status when starting/stopping a port. 276bc202406SDavid Marchand */ 277bc202406SDavid Marchand uint8_t no_link_check = 0; /* check by default */ 278bc202406SDavid Marchand 279bc202406SDavid Marchand /* 2808ea656f8SGaetan Rivet * Enable link status change notification 2818ea656f8SGaetan Rivet */ 2828ea656f8SGaetan Rivet uint8_t lsc_interrupt = 1; /* enabled by default */ 2838ea656f8SGaetan Rivet 2848ea656f8SGaetan Rivet /* 285284c908cSGaetan Rivet * Enable device removal notification. 286284c908cSGaetan Rivet */ 287284c908cSGaetan Rivet uint8_t rmv_interrupt = 1; /* enabled by default */ 288284c908cSGaetan Rivet 289fb73e096SJeff Guo uint8_t hot_plug = 0; /**< hotplug disabled by default. */ 290fb73e096SJeff Guo 291284c908cSGaetan Rivet /* 2923af72783SGaetan Rivet * Display or mask ether events 2933af72783SGaetan Rivet * Default to all events except VF_MBOX 2943af72783SGaetan Rivet */ 2953af72783SGaetan Rivet uint32_t event_print_mask = (UINT32_C(1) << RTE_ETH_EVENT_UNKNOWN) | 2963af72783SGaetan Rivet (UINT32_C(1) << RTE_ETH_EVENT_INTR_LSC) | 2973af72783SGaetan Rivet (UINT32_C(1) << RTE_ETH_EVENT_QUEUE_STATE) | 2983af72783SGaetan Rivet (UINT32_C(1) << RTE_ETH_EVENT_INTR_RESET) | 299badb87c1SAnoob Joseph (UINT32_C(1) << RTE_ETH_EVENT_IPSEC) | 3003af72783SGaetan Rivet (UINT32_C(1) << RTE_ETH_EVENT_MACSEC) | 3013af72783SGaetan Rivet (UINT32_C(1) << RTE_ETH_EVENT_INTR_RMV); 302e505d84cSAnatoly Burakov /* 303e505d84cSAnatoly Burakov * Decide if all memory are locked for performance. 304e505d84cSAnatoly Burakov */ 305e505d84cSAnatoly Burakov int do_mlockall = 0; 3063af72783SGaetan Rivet 3073af72783SGaetan Rivet /* 3087b7e5ba7SIntel * NIC bypass mode configuration options. 3097b7e5ba7SIntel */ 3107b7e5ba7SIntel 31150c4440eSThomas Monjalon #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS 3127b7e5ba7SIntel /* The NIC bypass watchdog timeout. */ 313e261265eSRadu Nicolau uint32_t bypass_timeout = RTE_PMD_IXGBE_BYPASS_TMT_OFF; 3147b7e5ba7SIntel #endif 3157b7e5ba7SIntel 316e261265eSRadu Nicolau 31762d3216dSReshma Pattan #ifdef RTE_LIBRTE_LATENCY_STATS 31862d3216dSReshma Pattan 31962d3216dSReshma Pattan /* 32062d3216dSReshma Pattan * Set when latency stats is enabled in the commandline 32162d3216dSReshma Pattan */ 32262d3216dSReshma Pattan uint8_t latencystats_enabled; 32362d3216dSReshma Pattan 32462d3216dSReshma Pattan /* 32562d3216dSReshma Pattan * Lcore ID to serive latency statistics. 32662d3216dSReshma Pattan */ 32762d3216dSReshma Pattan lcoreid_t latencystats_lcore_id = -1; 32862d3216dSReshma Pattan 32962d3216dSReshma Pattan #endif 33062d3216dSReshma Pattan 3317b7e5ba7SIntel /* 332af75078fSIntel * Ethernet device configuration. 333af75078fSIntel */ 334af75078fSIntel struct rte_eth_rxmode rx_mode = { 335af75078fSIntel .max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */ 3368b9bd0efSMoti Haimovsky .offloads = DEV_RX_OFFLOAD_CRC_STRIP, 3370074d02fSShahaf Shuler .ignore_offload_bitfield = 1, 338af75078fSIntel }; 339af75078fSIntel 34007e5f7bdSShahaf Shuler struct rte_eth_txmode tx_mode = { 34107e5f7bdSShahaf Shuler .offloads = DEV_TX_OFFLOAD_MBUF_FAST_FREE, 34207e5f7bdSShahaf Shuler }; 343fd8c20aaSShahaf Shuler 344af75078fSIntel struct rte_fdir_conf fdir_conf = { 345af75078fSIntel .mode = RTE_FDIR_MODE_NONE, 346af75078fSIntel .pballoc = RTE_FDIR_PBALLOC_64K, 347af75078fSIntel .status = RTE_FDIR_REPORT_STATUS, 348d9d5e6f2SJingjing Wu .mask = { 349d9d5e6f2SJingjing Wu .vlan_tci_mask = 0x0, 350d9d5e6f2SJingjing Wu .ipv4_mask = { 351d9d5e6f2SJingjing Wu .src_ip = 0xFFFFFFFF, 352d9d5e6f2SJingjing Wu .dst_ip = 0xFFFFFFFF, 353d9d5e6f2SJingjing Wu }, 354d9d5e6f2SJingjing Wu .ipv6_mask = { 355d9d5e6f2SJingjing Wu .src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF}, 356d9d5e6f2SJingjing Wu .dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF}, 357d9d5e6f2SJingjing Wu }, 358d9d5e6f2SJingjing Wu .src_port_mask = 0xFFFF, 359d9d5e6f2SJingjing Wu .dst_port_mask = 0xFFFF, 36047b3ac6bSWenzhuo Lu .mac_addr_byte_mask = 0xFF, 36147b3ac6bSWenzhuo Lu .tunnel_type_mask = 1, 36247b3ac6bSWenzhuo Lu .tunnel_id_mask = 0xFFFFFFFF, 363d9d5e6f2SJingjing Wu }, 364af75078fSIntel .drop_queue = 127, 365af75078fSIntel }; 366af75078fSIntel 3672950a769SDeclan Doherty volatile int test_done = 1; /* stop packet forwarding when set to 1. */ 368af75078fSIntel 369ed30d9b6SIntel struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS]; 370ed30d9b6SIntel struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS]; 371ed30d9b6SIntel 372ed30d9b6SIntel struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array; 373ed30d9b6SIntel struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array; 374ed30d9b6SIntel 375ed30d9b6SIntel uint16_t nb_tx_queue_stats_mappings = 0; 376ed30d9b6SIntel uint16_t nb_rx_queue_stats_mappings = 0; 377ed30d9b6SIntel 378a4fd5eeeSElza Mathew /* 379a4fd5eeeSElza Mathew * Display zero values by default for xstats 380a4fd5eeeSElza Mathew */ 381a4fd5eeeSElza Mathew uint8_t xstats_hide_zero; 382a4fd5eeeSElza Mathew 383c9cafcc8SShahaf Shuler unsigned int num_sockets = 0; 384c9cafcc8SShahaf Shuler unsigned int socket_ids[RTE_MAX_NUMA_NODES]; 3857acf894dSStephen Hurd 386e25e6c70SRemy Horton #ifdef RTE_LIBRTE_BITRATE 3877e4441c8SRemy Horton /* Bitrate statistics */ 3887e4441c8SRemy Horton struct rte_stats_bitrates *bitrate_data; 389e25e6c70SRemy Horton lcoreid_t bitrate_lcore_id; 390e25e6c70SRemy Horton uint8_t bitrate_enabled; 391e25e6c70SRemy Horton #endif 3927e4441c8SRemy Horton 393b40f8d78SJiayu Hu struct gro_status gro_ports[RTE_MAX_ETHPORTS]; 394b7091f1dSJiayu Hu uint8_t gro_flush_cycles = GRO_DEFAULT_FLUSH_CYCLES; 395b40f8d78SJiayu Hu 396ed30d9b6SIntel /* Forward function declarations */ 39728caa76aSZhiyong Yang static void map_port_queue_stats_mapping_registers(portid_t pi, 39828caa76aSZhiyong Yang struct rte_port *port); 399edab33b1STetsuya Mukawa static void check_all_ports_link_status(uint32_t port_mask); 400f8244c63SZhiyong Yang static int eth_event_callback(portid_t port_id, 40176ad4a2dSGaetan Rivet enum rte_eth_event_type type, 402d6af1a13SBernard Iremonger void *param, void *ret_param); 403fb73e096SJeff Guo static void eth_dev_event_callback(char *device_name, 404fb73e096SJeff Guo enum rte_dev_event_type type, 405fb73e096SJeff Guo void *param); 406fb73e096SJeff Guo static int eth_dev_event_callback_register(void); 407fb73e096SJeff Guo static int eth_dev_event_callback_unregister(void); 408fb73e096SJeff Guo 409ce8d5614SIntel 410ce8d5614SIntel /* 411ce8d5614SIntel * Check if all the ports are started. 412ce8d5614SIntel * If yes, return positive value. If not, return zero. 413ce8d5614SIntel */ 414ce8d5614SIntel static int all_ports_started(void); 415ed30d9b6SIntel 41652f38a20SJiayu Hu struct gso_status gso_ports[RTE_MAX_ETHPORTS]; 41752f38a20SJiayu Hu uint16_t gso_max_segment_size = ETHER_MAX_LEN - ETHER_CRC_LEN; 41852f38a20SJiayu Hu 419af75078fSIntel /* 42098a7ea33SJerin Jacob * Helper function to check if socket is already discovered. 421c9cafcc8SShahaf Shuler * If yes, return positive value. If not, return zero. 422c9cafcc8SShahaf Shuler */ 423c9cafcc8SShahaf Shuler int 424c9cafcc8SShahaf Shuler new_socket_id(unsigned int socket_id) 425c9cafcc8SShahaf Shuler { 426c9cafcc8SShahaf Shuler unsigned int i; 427c9cafcc8SShahaf Shuler 428c9cafcc8SShahaf Shuler for (i = 0; i < num_sockets; i++) { 429c9cafcc8SShahaf Shuler if (socket_ids[i] == socket_id) 430c9cafcc8SShahaf Shuler return 0; 431c9cafcc8SShahaf Shuler } 432c9cafcc8SShahaf Shuler return 1; 433c9cafcc8SShahaf Shuler } 434c9cafcc8SShahaf Shuler 435c9cafcc8SShahaf Shuler /* 436af75078fSIntel * Setup default configuration. 437af75078fSIntel */ 438af75078fSIntel static void 439af75078fSIntel set_default_fwd_lcores_config(void) 440af75078fSIntel { 441af75078fSIntel unsigned int i; 442af75078fSIntel unsigned int nb_lc; 4437acf894dSStephen Hurd unsigned int sock_num; 444af75078fSIntel 445af75078fSIntel nb_lc = 0; 446af75078fSIntel for (i = 0; i < RTE_MAX_LCORE; i++) { 447c9cafcc8SShahaf Shuler sock_num = rte_lcore_to_socket_id(i); 448c9cafcc8SShahaf Shuler if (new_socket_id(sock_num)) { 449c9cafcc8SShahaf Shuler if (num_sockets >= RTE_MAX_NUMA_NODES) { 450c9cafcc8SShahaf Shuler rte_exit(EXIT_FAILURE, 451c9cafcc8SShahaf Shuler "Total sockets greater than %u\n", 452c9cafcc8SShahaf Shuler RTE_MAX_NUMA_NODES); 453c9cafcc8SShahaf Shuler } 454c9cafcc8SShahaf Shuler socket_ids[num_sockets++] = sock_num; 4557acf894dSStephen Hurd } 456f54fe5eeSStephen Hurd if (!rte_lcore_is_enabled(i)) 457f54fe5eeSStephen Hurd continue; 458f54fe5eeSStephen Hurd if (i == rte_get_master_lcore()) 459f54fe5eeSStephen Hurd continue; 460f54fe5eeSStephen Hurd fwd_lcores_cpuids[nb_lc++] = i; 461af75078fSIntel } 462af75078fSIntel nb_lcores = (lcoreid_t) nb_lc; 463af75078fSIntel nb_cfg_lcores = nb_lcores; 464af75078fSIntel nb_fwd_lcores = 1; 465af75078fSIntel } 466af75078fSIntel 467af75078fSIntel static void 468af75078fSIntel set_def_peer_eth_addrs(void) 469af75078fSIntel { 470af75078fSIntel portid_t i; 471af75078fSIntel 472af75078fSIntel for (i = 0; i < RTE_MAX_ETHPORTS; i++) { 473af75078fSIntel peer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR; 474af75078fSIntel peer_eth_addrs[i].addr_bytes[5] = i; 475af75078fSIntel } 476af75078fSIntel } 477af75078fSIntel 478af75078fSIntel static void 479af75078fSIntel set_default_fwd_ports_config(void) 480af75078fSIntel { 481af75078fSIntel portid_t pt_id; 48265a7360cSMatan Azrad int i = 0; 483af75078fSIntel 48465a7360cSMatan Azrad RTE_ETH_FOREACH_DEV(pt_id) 48565a7360cSMatan Azrad fwd_ports_ids[i++] = pt_id; 486af75078fSIntel 487af75078fSIntel nb_cfg_ports = nb_ports; 488af75078fSIntel nb_fwd_ports = nb_ports; 489af75078fSIntel } 490af75078fSIntel 491af75078fSIntel void 492af75078fSIntel set_def_fwd_config(void) 493af75078fSIntel { 494af75078fSIntel set_default_fwd_lcores_config(); 495af75078fSIntel set_def_peer_eth_addrs(); 496af75078fSIntel set_default_fwd_ports_config(); 497af75078fSIntel } 498af75078fSIntel 499af75078fSIntel /* 500af75078fSIntel * Configuration initialisation done once at init time. 501af75078fSIntel */ 502af75078fSIntel static void 503af75078fSIntel mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf, 504af75078fSIntel unsigned int socket_id) 505af75078fSIntel { 506af75078fSIntel char pool_name[RTE_MEMPOOL_NAMESIZE]; 507bece7b6cSChristian Ehrhardt struct rte_mempool *rte_mp = NULL; 508af75078fSIntel uint32_t mb_size; 509af75078fSIntel 510dfb03bbeSOlivier Matz mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size; 511af75078fSIntel mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name)); 512148f963fSBruce Richardson 513285fd101SOlivier Matz TESTPMD_LOG(INFO, 514d1eb542eSOlivier Matz "create a new mbuf pool <%s>: n=%u, size=%u, socket=%u\n", 515d1eb542eSOlivier Matz pool_name, nb_mbuf, mbuf_seg_size, socket_id); 516d1eb542eSOlivier Matz 517b19a0c75SOlivier Matz if (mp_anon != 0) { 518b19a0c75SOlivier Matz rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf, 519bece7b6cSChristian Ehrhardt mb_size, (unsigned) mb_mempool_cache, 520148f963fSBruce Richardson sizeof(struct rte_pktmbuf_pool_private), 521148f963fSBruce Richardson socket_id, 0); 52224427bb9SOlivier Matz if (rte_mp == NULL) 52324427bb9SOlivier Matz goto err; 524b19a0c75SOlivier Matz 525b19a0c75SOlivier Matz if (rte_mempool_populate_anon(rte_mp) == 0) { 526b19a0c75SOlivier Matz rte_mempool_free(rte_mp); 527b19a0c75SOlivier Matz rte_mp = NULL; 52824427bb9SOlivier Matz goto err; 529b19a0c75SOlivier Matz } 530b19a0c75SOlivier Matz rte_pktmbuf_pool_init(rte_mp, NULL); 531b19a0c75SOlivier Matz rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL); 532b19a0c75SOlivier Matz } else { 533ea0c20eaSOlivier Matz /* wrapper to rte_mempool_create() */ 5340e798567SPavan Nikhilesh TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n", 5350e798567SPavan Nikhilesh rte_mbuf_best_mempool_ops()); 536ea0c20eaSOlivier Matz rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf, 537ea0c20eaSOlivier Matz mb_mempool_cache, 0, mbuf_seg_size, socket_id); 538bece7b6cSChristian Ehrhardt } 539148f963fSBruce Richardson 54024427bb9SOlivier Matz err: 541af75078fSIntel if (rte_mp == NULL) { 542d1eb542eSOlivier Matz rte_exit(EXIT_FAILURE, 543d1eb542eSOlivier Matz "Creation of mbuf pool for socket %u failed: %s\n", 544d1eb542eSOlivier Matz socket_id, rte_strerror(rte_errno)); 545148f963fSBruce Richardson } else if (verbose_level > 0) { 546591a9d79SStephen Hemminger rte_mempool_dump(stdout, rte_mp); 547af75078fSIntel } 548af75078fSIntel } 549af75078fSIntel 55020a0286fSLiu Xiaofeng /* 55120a0286fSLiu Xiaofeng * Check given socket id is valid or not with NUMA mode, 55220a0286fSLiu Xiaofeng * if valid, return 0, else return -1 55320a0286fSLiu Xiaofeng */ 55420a0286fSLiu Xiaofeng static int 55520a0286fSLiu Xiaofeng check_socket_id(const unsigned int socket_id) 55620a0286fSLiu Xiaofeng { 55720a0286fSLiu Xiaofeng static int warning_once = 0; 55820a0286fSLiu Xiaofeng 559c9cafcc8SShahaf Shuler if (new_socket_id(socket_id)) { 56020a0286fSLiu Xiaofeng if (!warning_once && numa_support) 56120a0286fSLiu Xiaofeng printf("Warning: NUMA should be configured manually by" 56220a0286fSLiu Xiaofeng " using --port-numa-config and" 56320a0286fSLiu Xiaofeng " --ring-numa-config parameters along with" 56420a0286fSLiu Xiaofeng " --numa.\n"); 56520a0286fSLiu Xiaofeng warning_once = 1; 56620a0286fSLiu Xiaofeng return -1; 56720a0286fSLiu Xiaofeng } 56820a0286fSLiu Xiaofeng return 0; 56920a0286fSLiu Xiaofeng } 57020a0286fSLiu Xiaofeng 5713f7311baSWei Dai /* 5723f7311baSWei Dai * Get the allowed maximum number of RX queues. 5733f7311baSWei Dai * *pid return the port id which has minimal value of 5743f7311baSWei Dai * max_rx_queues in all ports. 5753f7311baSWei Dai */ 5763f7311baSWei Dai queueid_t 5773f7311baSWei Dai get_allowed_max_nb_rxq(portid_t *pid) 5783f7311baSWei Dai { 5793f7311baSWei Dai queueid_t allowed_max_rxq = MAX_QUEUE_ID; 5803f7311baSWei Dai portid_t pi; 5813f7311baSWei Dai struct rte_eth_dev_info dev_info; 5823f7311baSWei Dai 5833f7311baSWei Dai RTE_ETH_FOREACH_DEV(pi) { 5843f7311baSWei Dai rte_eth_dev_info_get(pi, &dev_info); 5853f7311baSWei Dai if (dev_info.max_rx_queues < allowed_max_rxq) { 5863f7311baSWei Dai allowed_max_rxq = dev_info.max_rx_queues; 5873f7311baSWei Dai *pid = pi; 5883f7311baSWei Dai } 5893f7311baSWei Dai } 5903f7311baSWei Dai return allowed_max_rxq; 5913f7311baSWei Dai } 5923f7311baSWei Dai 5933f7311baSWei Dai /* 5943f7311baSWei Dai * Check input rxq is valid or not. 5953f7311baSWei Dai * If input rxq is not greater than any of maximum number 5963f7311baSWei Dai * of RX queues of all ports, it is valid. 5973f7311baSWei Dai * if valid, return 0, else return -1 5983f7311baSWei Dai */ 5993f7311baSWei Dai int 6003f7311baSWei Dai check_nb_rxq(queueid_t rxq) 6013f7311baSWei Dai { 6023f7311baSWei Dai queueid_t allowed_max_rxq; 6033f7311baSWei Dai portid_t pid = 0; 6043f7311baSWei Dai 6053f7311baSWei Dai allowed_max_rxq = get_allowed_max_nb_rxq(&pid); 6063f7311baSWei Dai if (rxq > allowed_max_rxq) { 6073f7311baSWei Dai printf("Fail: input rxq (%u) can't be greater " 6083f7311baSWei Dai "than max_rx_queues (%u) of port %u\n", 6093f7311baSWei Dai rxq, 6103f7311baSWei Dai allowed_max_rxq, 6113f7311baSWei Dai pid); 6123f7311baSWei Dai return -1; 6133f7311baSWei Dai } 6143f7311baSWei Dai return 0; 6153f7311baSWei Dai } 6163f7311baSWei Dai 61736db4f6cSWei Dai /* 61836db4f6cSWei Dai * Get the allowed maximum number of TX queues. 61936db4f6cSWei Dai * *pid return the port id which has minimal value of 62036db4f6cSWei Dai * max_tx_queues in all ports. 62136db4f6cSWei Dai */ 62236db4f6cSWei Dai queueid_t 62336db4f6cSWei Dai get_allowed_max_nb_txq(portid_t *pid) 62436db4f6cSWei Dai { 62536db4f6cSWei Dai queueid_t allowed_max_txq = MAX_QUEUE_ID; 62636db4f6cSWei Dai portid_t pi; 62736db4f6cSWei Dai struct rte_eth_dev_info dev_info; 62836db4f6cSWei Dai 62936db4f6cSWei Dai RTE_ETH_FOREACH_DEV(pi) { 63036db4f6cSWei Dai rte_eth_dev_info_get(pi, &dev_info); 63136db4f6cSWei Dai if (dev_info.max_tx_queues < allowed_max_txq) { 63236db4f6cSWei Dai allowed_max_txq = dev_info.max_tx_queues; 63336db4f6cSWei Dai *pid = pi; 63436db4f6cSWei Dai } 63536db4f6cSWei Dai } 63636db4f6cSWei Dai return allowed_max_txq; 63736db4f6cSWei Dai } 63836db4f6cSWei Dai 63936db4f6cSWei Dai /* 64036db4f6cSWei Dai * Check input txq is valid or not. 64136db4f6cSWei Dai * If input txq is not greater than any of maximum number 64236db4f6cSWei Dai * of TX queues of all ports, it is valid. 64336db4f6cSWei Dai * if valid, return 0, else return -1 64436db4f6cSWei Dai */ 64536db4f6cSWei Dai int 64636db4f6cSWei Dai check_nb_txq(queueid_t txq) 64736db4f6cSWei Dai { 64836db4f6cSWei Dai queueid_t allowed_max_txq; 64936db4f6cSWei Dai portid_t pid = 0; 65036db4f6cSWei Dai 65136db4f6cSWei Dai allowed_max_txq = get_allowed_max_nb_txq(&pid); 65236db4f6cSWei Dai if (txq > allowed_max_txq) { 65336db4f6cSWei Dai printf("Fail: input txq (%u) can't be greater " 65436db4f6cSWei Dai "than max_tx_queues (%u) of port %u\n", 65536db4f6cSWei Dai txq, 65636db4f6cSWei Dai allowed_max_txq, 65736db4f6cSWei Dai pid); 65836db4f6cSWei Dai return -1; 65936db4f6cSWei Dai } 66036db4f6cSWei Dai return 0; 66136db4f6cSWei Dai } 66236db4f6cSWei Dai 663af75078fSIntel static void 664af75078fSIntel init_config(void) 665af75078fSIntel { 666ce8d5614SIntel portid_t pid; 667af75078fSIntel struct rte_port *port; 668af75078fSIntel struct rte_mempool *mbp; 669af75078fSIntel unsigned int nb_mbuf_per_pool; 670af75078fSIntel lcoreid_t lc_id; 6717acf894dSStephen Hurd uint8_t port_per_socket[RTE_MAX_NUMA_NODES]; 672b7091f1dSJiayu Hu struct rte_gro_param gro_param; 67352f38a20SJiayu Hu uint32_t gso_types; 674c73a9071SWei Dai int k; 675af75078fSIntel 6767acf894dSStephen Hurd memset(port_per_socket,0,RTE_MAX_NUMA_NODES); 677487f9a59SYulong Pei 678487f9a59SYulong Pei if (numa_support) { 679487f9a59SYulong Pei memset(port_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS); 680487f9a59SYulong Pei memset(rxring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS); 681487f9a59SYulong Pei memset(txring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS); 682487f9a59SYulong Pei } 683487f9a59SYulong Pei 684af75078fSIntel /* Configuration of logical cores. */ 685af75078fSIntel fwd_lcores = rte_zmalloc("testpmd: fwd_lcores", 686af75078fSIntel sizeof(struct fwd_lcore *) * nb_lcores, 687fdf20fa7SSergio Gonzalez Monroy RTE_CACHE_LINE_SIZE); 688af75078fSIntel if (fwd_lcores == NULL) { 689ce8d5614SIntel rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) " 690ce8d5614SIntel "failed\n", nb_lcores); 691af75078fSIntel } 692af75078fSIntel for (lc_id = 0; lc_id < nb_lcores; lc_id++) { 693af75078fSIntel fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore", 694af75078fSIntel sizeof(struct fwd_lcore), 695fdf20fa7SSergio Gonzalez Monroy RTE_CACHE_LINE_SIZE); 696af75078fSIntel if (fwd_lcores[lc_id] == NULL) { 697ce8d5614SIntel rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) " 698ce8d5614SIntel "failed\n"); 699af75078fSIntel } 700af75078fSIntel fwd_lcores[lc_id]->cpuid_idx = lc_id; 701af75078fSIntel } 702af75078fSIntel 7037d89b261SGaetan Rivet RTE_ETH_FOREACH_DEV(pid) { 704ce8d5614SIntel port = &ports[pid]; 7058b9bd0efSMoti Haimovsky /* Apply default TxRx configuration for all ports */ 706fd8c20aaSShahaf Shuler port->dev_conf.txmode = tx_mode; 707384161e0SShahaf Shuler port->dev_conf.rxmode = rx_mode; 708ce8d5614SIntel rte_eth_dev_info_get(pid, &port->dev_info); 7097c45f6c0SFerruh Yigit 7107c45f6c0SFerruh Yigit if (!(port->dev_info.rx_offload_capa & 7117c45f6c0SFerruh Yigit DEV_RX_OFFLOAD_CRC_STRIP)) 7127c45f6c0SFerruh Yigit port->dev_conf.rxmode.offloads &= 7137c45f6c0SFerruh Yigit ~DEV_RX_OFFLOAD_CRC_STRIP; 71407e5f7bdSShahaf Shuler if (!(port->dev_info.tx_offload_capa & 71507e5f7bdSShahaf Shuler DEV_TX_OFFLOAD_MBUF_FAST_FREE)) 71607e5f7bdSShahaf Shuler port->dev_conf.txmode.offloads &= 71707e5f7bdSShahaf Shuler ~DEV_TX_OFFLOAD_MBUF_FAST_FREE; 718b6ea6408SIntel if (numa_support) { 719b6ea6408SIntel if (port_numa[pid] != NUMA_NO_CONFIG) 720b6ea6408SIntel port_per_socket[port_numa[pid]]++; 721b6ea6408SIntel else { 722b6ea6408SIntel uint32_t socket_id = rte_eth_dev_socket_id(pid); 72320a0286fSLiu Xiaofeng 72420a0286fSLiu Xiaofeng /* if socket_id is invalid, set to 0 */ 72520a0286fSLiu Xiaofeng if (check_socket_id(socket_id) < 0) 72620a0286fSLiu Xiaofeng socket_id = 0; 727b6ea6408SIntel port_per_socket[socket_id]++; 728b6ea6408SIntel } 729b6ea6408SIntel } 730b6ea6408SIntel 731c73a9071SWei Dai /* Apply Rx offloads configuration */ 732c73a9071SWei Dai for (k = 0; k < port->dev_info.max_rx_queues; k++) 733c73a9071SWei Dai port->rx_conf[k].offloads = 734c73a9071SWei Dai port->dev_conf.rxmode.offloads; 735c73a9071SWei Dai /* Apply Tx offloads configuration */ 736c73a9071SWei Dai for (k = 0; k < port->dev_info.max_tx_queues; k++) 737c73a9071SWei Dai port->tx_conf[k].offloads = 738c73a9071SWei Dai port->dev_conf.txmode.offloads; 739c73a9071SWei Dai 740ce8d5614SIntel /* set flag to initialize port/queue */ 741ce8d5614SIntel port->need_reconfig = 1; 742ce8d5614SIntel port->need_reconfig_queues = 1; 743ce8d5614SIntel } 744ce8d5614SIntel 7453ab64341SOlivier Matz /* 7463ab64341SOlivier Matz * Create pools of mbuf. 7473ab64341SOlivier Matz * If NUMA support is disabled, create a single pool of mbuf in 7483ab64341SOlivier Matz * socket 0 memory by default. 7493ab64341SOlivier Matz * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1. 7503ab64341SOlivier Matz * 7513ab64341SOlivier Matz * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and 7523ab64341SOlivier Matz * nb_txd can be configured at run time. 7533ab64341SOlivier Matz */ 7543ab64341SOlivier Matz if (param_total_num_mbufs) 7553ab64341SOlivier Matz nb_mbuf_per_pool = param_total_num_mbufs; 7563ab64341SOlivier Matz else { 7573ab64341SOlivier Matz nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX + 7583ab64341SOlivier Matz (nb_lcores * mb_mempool_cache) + 7593ab64341SOlivier Matz RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST; 7603ab64341SOlivier Matz nb_mbuf_per_pool *= RTE_MAX_ETHPORTS; 7613ab64341SOlivier Matz } 7623ab64341SOlivier Matz 763b6ea6408SIntel if (numa_support) { 764b6ea6408SIntel uint8_t i; 765ce8d5614SIntel 766c9cafcc8SShahaf Shuler for (i = 0; i < num_sockets; i++) 767c9cafcc8SShahaf Shuler mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 768c9cafcc8SShahaf Shuler socket_ids[i]); 7693ab64341SOlivier Matz } else { 7703ab64341SOlivier Matz if (socket_num == UMA_NO_CONFIG) 7713ab64341SOlivier Matz mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0); 7723ab64341SOlivier Matz else 7733ab64341SOlivier Matz mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 7743ab64341SOlivier Matz socket_num); 7753ab64341SOlivier Matz } 776b6ea6408SIntel 777b6ea6408SIntel init_port_config(); 7785886ae07SAdrien Mazarguil 77952f38a20SJiayu Hu gso_types = DEV_TX_OFFLOAD_TCP_TSO | DEV_TX_OFFLOAD_VXLAN_TNL_TSO | 78052f38a20SJiayu Hu DEV_TX_OFFLOAD_GRE_TNL_TSO; 7815886ae07SAdrien Mazarguil /* 7825886ae07SAdrien Mazarguil * Records which Mbuf pool to use by each logical core, if needed. 7835886ae07SAdrien Mazarguil */ 7845886ae07SAdrien Mazarguil for (lc_id = 0; lc_id < nb_lcores; lc_id++) { 7858fd8bebcSAdrien Mazarguil mbp = mbuf_pool_find( 7868fd8bebcSAdrien Mazarguil rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id])); 7878fd8bebcSAdrien Mazarguil 7885886ae07SAdrien Mazarguil if (mbp == NULL) 7895886ae07SAdrien Mazarguil mbp = mbuf_pool_find(0); 7905886ae07SAdrien Mazarguil fwd_lcores[lc_id]->mbp = mbp; 79152f38a20SJiayu Hu /* initialize GSO context */ 79252f38a20SJiayu Hu fwd_lcores[lc_id]->gso_ctx.direct_pool = mbp; 79352f38a20SJiayu Hu fwd_lcores[lc_id]->gso_ctx.indirect_pool = mbp; 79452f38a20SJiayu Hu fwd_lcores[lc_id]->gso_ctx.gso_types = gso_types; 79552f38a20SJiayu Hu fwd_lcores[lc_id]->gso_ctx.gso_size = ETHER_MAX_LEN - 79652f38a20SJiayu Hu ETHER_CRC_LEN; 79752f38a20SJiayu Hu fwd_lcores[lc_id]->gso_ctx.flag = 0; 7985886ae07SAdrien Mazarguil } 7995886ae07SAdrien Mazarguil 800ce8d5614SIntel /* Configuration of packet forwarding streams. */ 801ce8d5614SIntel if (init_fwd_streams() < 0) 802ce8d5614SIntel rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n"); 8030c0db76fSBernard Iremonger 8040c0db76fSBernard Iremonger fwd_config_setup(); 805b7091f1dSJiayu Hu 806b7091f1dSJiayu Hu /* create a gro context for each lcore */ 807b7091f1dSJiayu Hu gro_param.gro_types = RTE_GRO_TCP_IPV4; 808b7091f1dSJiayu Hu gro_param.max_flow_num = GRO_MAX_FLUSH_CYCLES; 809b7091f1dSJiayu Hu gro_param.max_item_per_flow = MAX_PKT_BURST; 810b7091f1dSJiayu Hu for (lc_id = 0; lc_id < nb_lcores; lc_id++) { 811b7091f1dSJiayu Hu gro_param.socket_id = rte_lcore_to_socket_id( 812b7091f1dSJiayu Hu fwd_lcores_cpuids[lc_id]); 813b7091f1dSJiayu Hu fwd_lcores[lc_id]->gro_ctx = rte_gro_ctx_create(&gro_param); 814b7091f1dSJiayu Hu if (fwd_lcores[lc_id]->gro_ctx == NULL) { 815b7091f1dSJiayu Hu rte_exit(EXIT_FAILURE, 816b7091f1dSJiayu Hu "rte_gro_ctx_create() failed\n"); 817b7091f1dSJiayu Hu } 818b7091f1dSJiayu Hu } 819ce8d5614SIntel } 820ce8d5614SIntel 8212950a769SDeclan Doherty 8222950a769SDeclan Doherty void 823a21d5a4bSDeclan Doherty reconfig(portid_t new_port_id, unsigned socket_id) 8242950a769SDeclan Doherty { 8252950a769SDeclan Doherty struct rte_port *port; 8262950a769SDeclan Doherty 8272950a769SDeclan Doherty /* Reconfiguration of Ethernet ports. */ 8282950a769SDeclan Doherty port = &ports[new_port_id]; 8292950a769SDeclan Doherty rte_eth_dev_info_get(new_port_id, &port->dev_info); 8302950a769SDeclan Doherty 8312950a769SDeclan Doherty /* set flag to initialize port/queue */ 8322950a769SDeclan Doherty port->need_reconfig = 1; 8332950a769SDeclan Doherty port->need_reconfig_queues = 1; 834a21d5a4bSDeclan Doherty port->socket_id = socket_id; 8352950a769SDeclan Doherty 8362950a769SDeclan Doherty init_port_config(); 8372950a769SDeclan Doherty } 8382950a769SDeclan Doherty 8392950a769SDeclan Doherty 840ce8d5614SIntel int 841ce8d5614SIntel init_fwd_streams(void) 842ce8d5614SIntel { 843ce8d5614SIntel portid_t pid; 844ce8d5614SIntel struct rte_port *port; 845ce8d5614SIntel streamid_t sm_id, nb_fwd_streams_new; 8465a8fb55cSReshma Pattan queueid_t q; 847ce8d5614SIntel 848ce8d5614SIntel /* set socket id according to numa or not */ 8497d89b261SGaetan Rivet RTE_ETH_FOREACH_DEV(pid) { 850ce8d5614SIntel port = &ports[pid]; 851ce8d5614SIntel if (nb_rxq > port->dev_info.max_rx_queues) { 852ce8d5614SIntel printf("Fail: nb_rxq(%d) is greater than " 853ce8d5614SIntel "max_rx_queues(%d)\n", nb_rxq, 854ce8d5614SIntel port->dev_info.max_rx_queues); 855ce8d5614SIntel return -1; 856ce8d5614SIntel } 857ce8d5614SIntel if (nb_txq > port->dev_info.max_tx_queues) { 858ce8d5614SIntel printf("Fail: nb_txq(%d) is greater than " 859ce8d5614SIntel "max_tx_queues(%d)\n", nb_txq, 860ce8d5614SIntel port->dev_info.max_tx_queues); 861ce8d5614SIntel return -1; 862ce8d5614SIntel } 86320a0286fSLiu Xiaofeng if (numa_support) { 86420a0286fSLiu Xiaofeng if (port_numa[pid] != NUMA_NO_CONFIG) 86520a0286fSLiu Xiaofeng port->socket_id = port_numa[pid]; 86620a0286fSLiu Xiaofeng else { 867b6ea6408SIntel port->socket_id = rte_eth_dev_socket_id(pid); 86820a0286fSLiu Xiaofeng 86920a0286fSLiu Xiaofeng /* if socket_id is invalid, set to 0 */ 87020a0286fSLiu Xiaofeng if (check_socket_id(port->socket_id) < 0) 87120a0286fSLiu Xiaofeng port->socket_id = 0; 87220a0286fSLiu Xiaofeng } 87320a0286fSLiu Xiaofeng } 874b6ea6408SIntel else { 875b6ea6408SIntel if (socket_num == UMA_NO_CONFIG) 876af75078fSIntel port->socket_id = 0; 877b6ea6408SIntel else 878b6ea6408SIntel port->socket_id = socket_num; 879b6ea6408SIntel } 880af75078fSIntel } 881af75078fSIntel 8825a8fb55cSReshma Pattan q = RTE_MAX(nb_rxq, nb_txq); 8835a8fb55cSReshma Pattan if (q == 0) { 8845a8fb55cSReshma Pattan printf("Fail: Cannot allocate fwd streams as number of queues is 0\n"); 8855a8fb55cSReshma Pattan return -1; 8865a8fb55cSReshma Pattan } 8875a8fb55cSReshma Pattan nb_fwd_streams_new = (streamid_t)(nb_ports * q); 888ce8d5614SIntel if (nb_fwd_streams_new == nb_fwd_streams) 889ce8d5614SIntel return 0; 890ce8d5614SIntel /* clear the old */ 891ce8d5614SIntel if (fwd_streams != NULL) { 892ce8d5614SIntel for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) { 893ce8d5614SIntel if (fwd_streams[sm_id] == NULL) 894ce8d5614SIntel continue; 895ce8d5614SIntel rte_free(fwd_streams[sm_id]); 896ce8d5614SIntel fwd_streams[sm_id] = NULL; 897af75078fSIntel } 898ce8d5614SIntel rte_free(fwd_streams); 899ce8d5614SIntel fwd_streams = NULL; 900ce8d5614SIntel } 901ce8d5614SIntel 902ce8d5614SIntel /* init new */ 903ce8d5614SIntel nb_fwd_streams = nb_fwd_streams_new; 9041f84c469SMatan Azrad if (nb_fwd_streams) { 905ce8d5614SIntel fwd_streams = rte_zmalloc("testpmd: fwd_streams", 9061f84c469SMatan Azrad sizeof(struct fwd_stream *) * nb_fwd_streams, 9071f84c469SMatan Azrad RTE_CACHE_LINE_SIZE); 908ce8d5614SIntel if (fwd_streams == NULL) 9091f84c469SMatan Azrad rte_exit(EXIT_FAILURE, "rte_zmalloc(%d" 9101f84c469SMatan Azrad " (struct fwd_stream *)) failed\n", 9111f84c469SMatan Azrad nb_fwd_streams); 912ce8d5614SIntel 913af75078fSIntel for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) { 9141f84c469SMatan Azrad fwd_streams[sm_id] = rte_zmalloc("testpmd:" 9151f84c469SMatan Azrad " struct fwd_stream", sizeof(struct fwd_stream), 9161f84c469SMatan Azrad RTE_CACHE_LINE_SIZE); 917ce8d5614SIntel if (fwd_streams[sm_id] == NULL) 9181f84c469SMatan Azrad rte_exit(EXIT_FAILURE, "rte_zmalloc" 9191f84c469SMatan Azrad "(struct fwd_stream) failed\n"); 9201f84c469SMatan Azrad } 921af75078fSIntel } 922ce8d5614SIntel 923ce8d5614SIntel return 0; 924af75078fSIntel } 925af75078fSIntel 926af75078fSIntel #ifdef RTE_TEST_PMD_RECORD_BURST_STATS 927af75078fSIntel static void 928af75078fSIntel pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs) 929af75078fSIntel { 930af75078fSIntel unsigned int total_burst; 931af75078fSIntel unsigned int nb_burst; 932af75078fSIntel unsigned int burst_stats[3]; 933af75078fSIntel uint16_t pktnb_stats[3]; 934af75078fSIntel uint16_t nb_pkt; 935af75078fSIntel int burst_percent[3]; 936af75078fSIntel 937af75078fSIntel /* 938af75078fSIntel * First compute the total number of packet bursts and the 939af75078fSIntel * two highest numbers of bursts of the same number of packets. 940af75078fSIntel */ 941af75078fSIntel total_burst = 0; 942af75078fSIntel burst_stats[0] = burst_stats[1] = burst_stats[2] = 0; 943af75078fSIntel pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0; 944af75078fSIntel for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) { 945af75078fSIntel nb_burst = pbs->pkt_burst_spread[nb_pkt]; 946af75078fSIntel if (nb_burst == 0) 947af75078fSIntel continue; 948af75078fSIntel total_burst += nb_burst; 949af75078fSIntel if (nb_burst > burst_stats[0]) { 950af75078fSIntel burst_stats[1] = burst_stats[0]; 951af75078fSIntel pktnb_stats[1] = pktnb_stats[0]; 952af75078fSIntel burst_stats[0] = nb_burst; 953af75078fSIntel pktnb_stats[0] = nb_pkt; 954*fe613657SDaniel Shelepov } else if (nb_burst > burst_stats[1]) { 955*fe613657SDaniel Shelepov burst_stats[1] = nb_burst; 956*fe613657SDaniel Shelepov pktnb_stats[1] = nb_pkt; 957af75078fSIntel } 958af75078fSIntel } 959af75078fSIntel if (total_burst == 0) 960af75078fSIntel return; 961af75078fSIntel burst_percent[0] = (burst_stats[0] * 100) / total_burst; 962af75078fSIntel printf(" %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst, 963af75078fSIntel burst_percent[0], (int) pktnb_stats[0]); 964af75078fSIntel if (burst_stats[0] == total_burst) { 965af75078fSIntel printf("]\n"); 966af75078fSIntel return; 967af75078fSIntel } 968af75078fSIntel if (burst_stats[0] + burst_stats[1] == total_burst) { 969af75078fSIntel printf(" + %d%% of %d pkts]\n", 970af75078fSIntel 100 - burst_percent[0], pktnb_stats[1]); 971af75078fSIntel return; 972af75078fSIntel } 973af75078fSIntel burst_percent[1] = (burst_stats[1] * 100) / total_burst; 974af75078fSIntel burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]); 975af75078fSIntel if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) { 976af75078fSIntel printf(" + %d%% of others]\n", 100 - burst_percent[0]); 977af75078fSIntel return; 978af75078fSIntel } 979af75078fSIntel printf(" + %d%% of %d pkts + %d%% of others]\n", 980af75078fSIntel burst_percent[1], (int) pktnb_stats[1], burst_percent[2]); 981af75078fSIntel } 982af75078fSIntel #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */ 983af75078fSIntel 984af75078fSIntel static void 985af75078fSIntel fwd_port_stats_display(portid_t port_id, struct rte_eth_stats *stats) 986af75078fSIntel { 987af75078fSIntel struct rte_port *port; 988013af9b6SIntel uint8_t i; 989af75078fSIntel 990af75078fSIntel static const char *fwd_stats_border = "----------------------"; 991af75078fSIntel 992af75078fSIntel port = &ports[port_id]; 993af75078fSIntel printf("\n %s Forward statistics for port %-2d %s\n", 994af75078fSIntel fwd_stats_border, port_id, fwd_stats_border); 995013af9b6SIntel 996013af9b6SIntel if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) { 997af75078fSIntel printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: " 998af75078fSIntel "%-"PRIu64"\n", 99970bdb186SIvan Boule stats->ipackets, stats->imissed, 100070bdb186SIvan Boule (uint64_t) (stats->ipackets + stats->imissed)); 1001af75078fSIntel 1002af75078fSIntel if (cur_fwd_eng == &csum_fwd_engine) 1003af75078fSIntel printf(" Bad-ipcsum: %-14"PRIu64" Bad-l4csum: %-14"PRIu64" \n", 1004af75078fSIntel port->rx_bad_ip_csum, port->rx_bad_l4_csum); 100586057c99SIgor Ryzhov if ((stats->ierrors + stats->rx_nombuf) > 0) { 1006f72a0fa6SStephen Hemminger printf(" RX-error: %-"PRIu64"\n", stats->ierrors); 100770bdb186SIvan Boule printf(" RX-nombufs: %-14"PRIu64"\n", stats->rx_nombuf); 100870bdb186SIvan Boule } 1009af75078fSIntel 1010af75078fSIntel printf(" TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: " 1011af75078fSIntel "%-"PRIu64"\n", 1012af75078fSIntel stats->opackets, port->tx_dropped, 1013af75078fSIntel (uint64_t) (stats->opackets + port->tx_dropped)); 1014013af9b6SIntel } 1015013af9b6SIntel else { 1016013af9b6SIntel printf(" RX-packets: %14"PRIu64" RX-dropped:%14"PRIu64" RX-total:" 1017013af9b6SIntel "%14"PRIu64"\n", 101870bdb186SIvan Boule stats->ipackets, stats->imissed, 101970bdb186SIvan Boule (uint64_t) (stats->ipackets + stats->imissed)); 1020013af9b6SIntel 1021013af9b6SIntel if (cur_fwd_eng == &csum_fwd_engine) 1022013af9b6SIntel printf(" Bad-ipcsum:%14"PRIu64" Bad-l4csum:%14"PRIu64"\n", 1023013af9b6SIntel port->rx_bad_ip_csum, port->rx_bad_l4_csum); 102486057c99SIgor Ryzhov if ((stats->ierrors + stats->rx_nombuf) > 0) { 1025f72a0fa6SStephen Hemminger printf(" RX-error:%"PRIu64"\n", stats->ierrors); 102670bdb186SIvan Boule printf(" RX-nombufs: %14"PRIu64"\n", 102770bdb186SIvan Boule stats->rx_nombuf); 102870bdb186SIvan Boule } 1029013af9b6SIntel 1030013af9b6SIntel printf(" TX-packets: %14"PRIu64" TX-dropped:%14"PRIu64" TX-total:" 1031013af9b6SIntel "%14"PRIu64"\n", 1032013af9b6SIntel stats->opackets, port->tx_dropped, 1033013af9b6SIntel (uint64_t) (stats->opackets + port->tx_dropped)); 1034013af9b6SIntel } 1035e659b6b4SIvan Boule 1036af75078fSIntel #ifdef RTE_TEST_PMD_RECORD_BURST_STATS 1037af75078fSIntel if (port->rx_stream) 1038013af9b6SIntel pkt_burst_stats_display("RX", 1039013af9b6SIntel &port->rx_stream->rx_burst_stats); 1040af75078fSIntel if (port->tx_stream) 1041013af9b6SIntel pkt_burst_stats_display("TX", 1042013af9b6SIntel &port->tx_stream->tx_burst_stats); 1043af75078fSIntel #endif 1044af75078fSIntel 1045013af9b6SIntel if (port->rx_queue_stats_mapping_enabled) { 1046013af9b6SIntel printf("\n"); 1047013af9b6SIntel for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) { 1048013af9b6SIntel printf(" Stats reg %2d RX-packets:%14"PRIu64 1049013af9b6SIntel " RX-errors:%14"PRIu64 1050013af9b6SIntel " RX-bytes:%14"PRIu64"\n", 1051013af9b6SIntel i, stats->q_ipackets[i], stats->q_errors[i], stats->q_ibytes[i]); 1052013af9b6SIntel } 1053013af9b6SIntel printf("\n"); 1054013af9b6SIntel } 1055013af9b6SIntel if (port->tx_queue_stats_mapping_enabled) { 1056013af9b6SIntel for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) { 1057013af9b6SIntel printf(" Stats reg %2d TX-packets:%14"PRIu64 1058013af9b6SIntel " TX-bytes:%14"PRIu64"\n", 1059013af9b6SIntel i, stats->q_opackets[i], stats->q_obytes[i]); 1060013af9b6SIntel } 1061013af9b6SIntel } 1062013af9b6SIntel 1063af75078fSIntel printf(" %s--------------------------------%s\n", 1064af75078fSIntel fwd_stats_border, fwd_stats_border); 1065af75078fSIntel } 1066af75078fSIntel 1067af75078fSIntel static void 1068af75078fSIntel fwd_stream_stats_display(streamid_t stream_id) 1069af75078fSIntel { 1070af75078fSIntel struct fwd_stream *fs; 1071af75078fSIntel static const char *fwd_top_stats_border = "-------"; 1072af75078fSIntel 1073af75078fSIntel fs = fwd_streams[stream_id]; 1074af75078fSIntel if ((fs->rx_packets == 0) && (fs->tx_packets == 0) && 1075af75078fSIntel (fs->fwd_dropped == 0)) 1076af75078fSIntel return; 1077af75078fSIntel printf("\n %s Forward Stats for RX Port=%2d/Queue=%2d -> " 1078af75078fSIntel "TX Port=%2d/Queue=%2d %s\n", 1079af75078fSIntel fwd_top_stats_border, fs->rx_port, fs->rx_queue, 1080af75078fSIntel fs->tx_port, fs->tx_queue, fwd_top_stats_border); 1081af75078fSIntel printf(" RX-packets: %-14u TX-packets: %-14u TX-dropped: %-14u", 1082af75078fSIntel fs->rx_packets, fs->tx_packets, fs->fwd_dropped); 1083af75078fSIntel 1084af75078fSIntel /* if checksum mode */ 1085af75078fSIntel if (cur_fwd_eng == &csum_fwd_engine) { 1086013af9b6SIntel printf(" RX- bad IP checksum: %-14u Rx- bad L4 checksum: " 1087013af9b6SIntel "%-14u\n", fs->rx_bad_ip_csum, fs->rx_bad_l4_csum); 1088af75078fSIntel } 1089af75078fSIntel 1090af75078fSIntel #ifdef RTE_TEST_PMD_RECORD_BURST_STATS 1091af75078fSIntel pkt_burst_stats_display("RX", &fs->rx_burst_stats); 1092af75078fSIntel pkt_burst_stats_display("TX", &fs->tx_burst_stats); 1093af75078fSIntel #endif 1094af75078fSIntel } 1095af75078fSIntel 1096af75078fSIntel static void 10977741e4cfSIntel flush_fwd_rx_queues(void) 1098af75078fSIntel { 1099af75078fSIntel struct rte_mbuf *pkts_burst[MAX_PKT_BURST]; 1100af75078fSIntel portid_t rxp; 11017741e4cfSIntel portid_t port_id; 1102af75078fSIntel queueid_t rxq; 1103af75078fSIntel uint16_t nb_rx; 1104af75078fSIntel uint16_t i; 1105af75078fSIntel uint8_t j; 1106f487715fSReshma Pattan uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0; 1107594302c7SJames Poole uint64_t timer_period; 1108f487715fSReshma Pattan 1109f487715fSReshma Pattan /* convert to number of cycles */ 1110594302c7SJames Poole timer_period = rte_get_timer_hz(); /* 1 second timeout */ 1111af75078fSIntel 1112af75078fSIntel for (j = 0; j < 2; j++) { 11137741e4cfSIntel for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) { 1114af75078fSIntel for (rxq = 0; rxq < nb_rxq; rxq++) { 11157741e4cfSIntel port_id = fwd_ports_ids[rxp]; 1116f487715fSReshma Pattan /** 1117f487715fSReshma Pattan * testpmd can stuck in the below do while loop 1118f487715fSReshma Pattan * if rte_eth_rx_burst() always returns nonzero 1119f487715fSReshma Pattan * packets. So timer is added to exit this loop 1120f487715fSReshma Pattan * after 1sec timer expiry. 1121f487715fSReshma Pattan */ 1122f487715fSReshma Pattan prev_tsc = rte_rdtsc(); 1123af75078fSIntel do { 11247741e4cfSIntel nb_rx = rte_eth_rx_burst(port_id, rxq, 1125013af9b6SIntel pkts_burst, MAX_PKT_BURST); 1126af75078fSIntel for (i = 0; i < nb_rx; i++) 1127af75078fSIntel rte_pktmbuf_free(pkts_burst[i]); 1128f487715fSReshma Pattan 1129f487715fSReshma Pattan cur_tsc = rte_rdtsc(); 1130f487715fSReshma Pattan diff_tsc = cur_tsc - prev_tsc; 1131f487715fSReshma Pattan timer_tsc += diff_tsc; 1132f487715fSReshma Pattan } while ((nb_rx > 0) && 1133f487715fSReshma Pattan (timer_tsc < timer_period)); 1134f487715fSReshma Pattan timer_tsc = 0; 1135af75078fSIntel } 1136af75078fSIntel } 1137af75078fSIntel rte_delay_ms(10); /* wait 10 milli-seconds before retrying */ 1138af75078fSIntel } 1139af75078fSIntel } 1140af75078fSIntel 1141af75078fSIntel static void 1142af75078fSIntel run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd) 1143af75078fSIntel { 1144af75078fSIntel struct fwd_stream **fsm; 1145af75078fSIntel streamid_t nb_fs; 1146af75078fSIntel streamid_t sm_id; 11477e4441c8SRemy Horton #ifdef RTE_LIBRTE_BITRATE 11487e4441c8SRemy Horton uint64_t tics_per_1sec; 11497e4441c8SRemy Horton uint64_t tics_datum; 11507e4441c8SRemy Horton uint64_t tics_current; 11518728ccf3SThomas Monjalon uint16_t idx_port; 1152af75078fSIntel 11537e4441c8SRemy Horton tics_datum = rte_rdtsc(); 11547e4441c8SRemy Horton tics_per_1sec = rte_get_timer_hz(); 11557e4441c8SRemy Horton #endif 1156af75078fSIntel fsm = &fwd_streams[fc->stream_idx]; 1157af75078fSIntel nb_fs = fc->stream_nb; 1158af75078fSIntel do { 1159af75078fSIntel for (sm_id = 0; sm_id < nb_fs; sm_id++) 1160af75078fSIntel (*pkt_fwd)(fsm[sm_id]); 11617e4441c8SRemy Horton #ifdef RTE_LIBRTE_BITRATE 1162e25e6c70SRemy Horton if (bitrate_enabled != 0 && 1163e25e6c70SRemy Horton bitrate_lcore_id == rte_lcore_id()) { 11647e4441c8SRemy Horton tics_current = rte_rdtsc(); 11657e4441c8SRemy Horton if (tics_current - tics_datum >= tics_per_1sec) { 11667e4441c8SRemy Horton /* Periodic bitrate calculation */ 11678728ccf3SThomas Monjalon RTE_ETH_FOREACH_DEV(idx_port) 1168e25e6c70SRemy Horton rte_stats_bitrate_calc(bitrate_data, 1169e25e6c70SRemy Horton idx_port); 11707e4441c8SRemy Horton tics_datum = tics_current; 11717e4441c8SRemy Horton } 1172e25e6c70SRemy Horton } 11737e4441c8SRemy Horton #endif 117462d3216dSReshma Pattan #ifdef RTE_LIBRTE_LATENCY_STATS 117565eb1e54SPablo de Lara if (latencystats_enabled != 0 && 117665eb1e54SPablo de Lara latencystats_lcore_id == rte_lcore_id()) 117762d3216dSReshma Pattan rte_latencystats_update(); 117862d3216dSReshma Pattan #endif 117962d3216dSReshma Pattan 1180af75078fSIntel } while (! fc->stopped); 1181af75078fSIntel } 1182af75078fSIntel 1183af75078fSIntel static int 1184af75078fSIntel start_pkt_forward_on_core(void *fwd_arg) 1185af75078fSIntel { 1186af75078fSIntel run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg, 1187af75078fSIntel cur_fwd_config.fwd_eng->packet_fwd); 1188af75078fSIntel return 0; 1189af75078fSIntel } 1190af75078fSIntel 1191af75078fSIntel /* 1192af75078fSIntel * Run the TXONLY packet forwarding engine to send a single burst of packets. 1193af75078fSIntel * Used to start communication flows in network loopback test configurations. 1194af75078fSIntel */ 1195af75078fSIntel static int 1196af75078fSIntel run_one_txonly_burst_on_core(void *fwd_arg) 1197af75078fSIntel { 1198af75078fSIntel struct fwd_lcore *fwd_lc; 1199af75078fSIntel struct fwd_lcore tmp_lcore; 1200af75078fSIntel 1201af75078fSIntel fwd_lc = (struct fwd_lcore *) fwd_arg; 1202af75078fSIntel tmp_lcore = *fwd_lc; 1203af75078fSIntel tmp_lcore.stopped = 1; 1204af75078fSIntel run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd); 1205af75078fSIntel return 0; 1206af75078fSIntel } 1207af75078fSIntel 1208af75078fSIntel /* 1209af75078fSIntel * Launch packet forwarding: 1210af75078fSIntel * - Setup per-port forwarding context. 1211af75078fSIntel * - launch logical cores with their forwarding configuration. 1212af75078fSIntel */ 1213af75078fSIntel static void 1214af75078fSIntel launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore) 1215af75078fSIntel { 1216af75078fSIntel port_fwd_begin_t port_fwd_begin; 1217af75078fSIntel unsigned int i; 1218af75078fSIntel unsigned int lc_id; 1219af75078fSIntel int diag; 1220af75078fSIntel 1221af75078fSIntel port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin; 1222af75078fSIntel if (port_fwd_begin != NULL) { 1223af75078fSIntel for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) 1224af75078fSIntel (*port_fwd_begin)(fwd_ports_ids[i]); 1225af75078fSIntel } 1226af75078fSIntel for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) { 1227af75078fSIntel lc_id = fwd_lcores_cpuids[i]; 1228af75078fSIntel if ((interactive == 0) || (lc_id != rte_lcore_id())) { 1229af75078fSIntel fwd_lcores[i]->stopped = 0; 1230af75078fSIntel diag = rte_eal_remote_launch(pkt_fwd_on_lcore, 1231af75078fSIntel fwd_lcores[i], lc_id); 1232af75078fSIntel if (diag != 0) 1233af75078fSIntel printf("launch lcore %u failed - diag=%d\n", 1234af75078fSIntel lc_id, diag); 1235af75078fSIntel } 1236af75078fSIntel } 1237af75078fSIntel } 1238af75078fSIntel 1239af75078fSIntel /* 124003ce2c53SMatan Azrad * Update the forward ports list. 124103ce2c53SMatan Azrad */ 124203ce2c53SMatan Azrad void 124303ce2c53SMatan Azrad update_fwd_ports(portid_t new_pid) 124403ce2c53SMatan Azrad { 124503ce2c53SMatan Azrad unsigned int i; 124603ce2c53SMatan Azrad unsigned int new_nb_fwd_ports = 0; 124703ce2c53SMatan Azrad int move = 0; 124803ce2c53SMatan Azrad 124903ce2c53SMatan Azrad for (i = 0; i < nb_fwd_ports; ++i) { 125003ce2c53SMatan Azrad if (port_id_is_invalid(fwd_ports_ids[i], DISABLED_WARN)) 125103ce2c53SMatan Azrad move = 1; 125203ce2c53SMatan Azrad else if (move) 125303ce2c53SMatan Azrad fwd_ports_ids[new_nb_fwd_ports++] = fwd_ports_ids[i]; 125403ce2c53SMatan Azrad else 125503ce2c53SMatan Azrad new_nb_fwd_ports++; 125603ce2c53SMatan Azrad } 125703ce2c53SMatan Azrad if (new_pid < RTE_MAX_ETHPORTS) 125803ce2c53SMatan Azrad fwd_ports_ids[new_nb_fwd_ports++] = new_pid; 125903ce2c53SMatan Azrad 126003ce2c53SMatan Azrad nb_fwd_ports = new_nb_fwd_ports; 126103ce2c53SMatan Azrad nb_cfg_ports = new_nb_fwd_ports; 126203ce2c53SMatan Azrad } 126303ce2c53SMatan Azrad 126403ce2c53SMatan Azrad /* 1265af75078fSIntel * Launch packet forwarding configuration. 1266af75078fSIntel */ 1267af75078fSIntel void 1268af75078fSIntel start_packet_forwarding(int with_tx_first) 1269af75078fSIntel { 1270af75078fSIntel port_fwd_begin_t port_fwd_begin; 1271af75078fSIntel port_fwd_end_t port_fwd_end; 1272af75078fSIntel struct rte_port *port; 1273af75078fSIntel unsigned int i; 1274af75078fSIntel portid_t pt_id; 1275af75078fSIntel streamid_t sm_id; 1276af75078fSIntel 12775a8fb55cSReshma Pattan if (strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") == 0 && !nb_rxq) 12785a8fb55cSReshma Pattan rte_exit(EXIT_FAILURE, "rxq are 0, cannot use rxonly fwd mode\n"); 12795a8fb55cSReshma Pattan 12805a8fb55cSReshma Pattan if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 && !nb_txq) 12815a8fb55cSReshma Pattan rte_exit(EXIT_FAILURE, "txq are 0, cannot use txonly fwd mode\n"); 12825a8fb55cSReshma Pattan 12835a8fb55cSReshma Pattan if ((strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") != 0 && 12845a8fb55cSReshma Pattan strcmp(cur_fwd_eng->fwd_mode_name, "txonly") != 0) && 12855a8fb55cSReshma Pattan (!nb_rxq || !nb_txq)) 12865a8fb55cSReshma Pattan rte_exit(EXIT_FAILURE, 12875a8fb55cSReshma Pattan "Either rxq or txq are 0, cannot use %s fwd mode\n", 12885a8fb55cSReshma Pattan cur_fwd_eng->fwd_mode_name); 12895a8fb55cSReshma Pattan 1290ce8d5614SIntel if (all_ports_started() == 0) { 1291ce8d5614SIntel printf("Not all ports were started\n"); 1292ce8d5614SIntel return; 1293ce8d5614SIntel } 1294af75078fSIntel if (test_done == 0) { 1295af75078fSIntel printf("Packet forwarding already started\n"); 1296af75078fSIntel return; 1297af75078fSIntel } 1298edf87b4aSBernard Iremonger 1299edf87b4aSBernard Iremonger 13007741e4cfSIntel if(dcb_test) { 13017741e4cfSIntel for (i = 0; i < nb_fwd_ports; i++) { 13027741e4cfSIntel pt_id = fwd_ports_ids[i]; 13037741e4cfSIntel port = &ports[pt_id]; 13047741e4cfSIntel if (!port->dcb_flag) { 13057741e4cfSIntel printf("In DCB mode, all forwarding ports must " 13067741e4cfSIntel "be configured in this mode.\n"); 1307013af9b6SIntel return; 1308013af9b6SIntel } 13097741e4cfSIntel } 13107741e4cfSIntel if (nb_fwd_lcores == 1) { 13117741e4cfSIntel printf("In DCB mode,the nb forwarding cores " 13127741e4cfSIntel "should be larger than 1.\n"); 13137741e4cfSIntel return; 13147741e4cfSIntel } 13157741e4cfSIntel } 1316af75078fSIntel test_done = 0; 13177741e4cfSIntel 131847a767b2SMatan Azrad fwd_config_setup(); 131947a767b2SMatan Azrad 13207741e4cfSIntel if(!no_flush_rx) 13217741e4cfSIntel flush_fwd_rx_queues(); 13227741e4cfSIntel 1323933617d8SZhihong Wang pkt_fwd_config_display(&cur_fwd_config); 1324af75078fSIntel rxtx_config_display(); 1325af75078fSIntel 1326af75078fSIntel for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) { 1327af75078fSIntel pt_id = fwd_ports_ids[i]; 1328af75078fSIntel port = &ports[pt_id]; 1329af75078fSIntel rte_eth_stats_get(pt_id, &port->stats); 1330af75078fSIntel port->tx_dropped = 0; 1331013af9b6SIntel 1332013af9b6SIntel map_port_queue_stats_mapping_registers(pt_id, port); 1333af75078fSIntel } 1334af75078fSIntel for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) { 1335af75078fSIntel fwd_streams[sm_id]->rx_packets = 0; 1336af75078fSIntel fwd_streams[sm_id]->tx_packets = 0; 1337af75078fSIntel fwd_streams[sm_id]->fwd_dropped = 0; 1338af75078fSIntel fwd_streams[sm_id]->rx_bad_ip_csum = 0; 1339af75078fSIntel fwd_streams[sm_id]->rx_bad_l4_csum = 0; 1340af75078fSIntel 1341af75078fSIntel #ifdef RTE_TEST_PMD_RECORD_BURST_STATS 1342af75078fSIntel memset(&fwd_streams[sm_id]->rx_burst_stats, 0, 1343af75078fSIntel sizeof(fwd_streams[sm_id]->rx_burst_stats)); 1344af75078fSIntel memset(&fwd_streams[sm_id]->tx_burst_stats, 0, 1345af75078fSIntel sizeof(fwd_streams[sm_id]->tx_burst_stats)); 1346af75078fSIntel #endif 1347af75078fSIntel #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES 1348af75078fSIntel fwd_streams[sm_id]->core_cycles = 0; 1349af75078fSIntel #endif 1350af75078fSIntel } 1351af75078fSIntel if (with_tx_first) { 1352af75078fSIntel port_fwd_begin = tx_only_engine.port_fwd_begin; 1353af75078fSIntel if (port_fwd_begin != NULL) { 1354af75078fSIntel for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) 1355af75078fSIntel (*port_fwd_begin)(fwd_ports_ids[i]); 1356af75078fSIntel } 1357acbf77a6SZhihong Wang while (with_tx_first--) { 1358acbf77a6SZhihong Wang launch_packet_forwarding( 1359acbf77a6SZhihong Wang run_one_txonly_burst_on_core); 1360af75078fSIntel rte_eal_mp_wait_lcore(); 1361acbf77a6SZhihong Wang } 1362af75078fSIntel port_fwd_end = tx_only_engine.port_fwd_end; 1363af75078fSIntel if (port_fwd_end != NULL) { 1364af75078fSIntel for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) 1365af75078fSIntel (*port_fwd_end)(fwd_ports_ids[i]); 1366af75078fSIntel } 1367af75078fSIntel } 1368af75078fSIntel launch_packet_forwarding(start_pkt_forward_on_core); 1369af75078fSIntel } 1370af75078fSIntel 1371af75078fSIntel void 1372af75078fSIntel stop_packet_forwarding(void) 1373af75078fSIntel { 1374af75078fSIntel struct rte_eth_stats stats; 1375af75078fSIntel struct rte_port *port; 1376af75078fSIntel port_fwd_end_t port_fwd_end; 1377af75078fSIntel int i; 1378af75078fSIntel portid_t pt_id; 1379af75078fSIntel streamid_t sm_id; 1380af75078fSIntel lcoreid_t lc_id; 1381af75078fSIntel uint64_t total_recv; 1382af75078fSIntel uint64_t total_xmit; 1383af75078fSIntel uint64_t total_rx_dropped; 1384af75078fSIntel uint64_t total_tx_dropped; 1385af75078fSIntel uint64_t total_rx_nombuf; 1386af75078fSIntel uint64_t tx_dropped; 1387af75078fSIntel uint64_t rx_bad_ip_csum; 1388af75078fSIntel uint64_t rx_bad_l4_csum; 1389af75078fSIntel #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES 1390af75078fSIntel uint64_t fwd_cycles; 1391af75078fSIntel #endif 1392b7091f1dSJiayu Hu 1393af75078fSIntel static const char *acc_stats_border = "+++++++++++++++"; 1394af75078fSIntel 1395af75078fSIntel if (test_done) { 1396af75078fSIntel printf("Packet forwarding not started\n"); 1397af75078fSIntel return; 1398af75078fSIntel } 1399af75078fSIntel printf("Telling cores to stop..."); 1400af75078fSIntel for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) 1401af75078fSIntel fwd_lcores[lc_id]->stopped = 1; 1402af75078fSIntel printf("\nWaiting for lcores to finish...\n"); 1403af75078fSIntel rte_eal_mp_wait_lcore(); 1404af75078fSIntel port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end; 1405af75078fSIntel if (port_fwd_end != NULL) { 1406af75078fSIntel for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) { 1407af75078fSIntel pt_id = fwd_ports_ids[i]; 1408af75078fSIntel (*port_fwd_end)(pt_id); 1409af75078fSIntel } 1410af75078fSIntel } 1411af75078fSIntel #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES 1412af75078fSIntel fwd_cycles = 0; 1413af75078fSIntel #endif 1414af75078fSIntel for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) { 1415af75078fSIntel if (cur_fwd_config.nb_fwd_streams > 1416af75078fSIntel cur_fwd_config.nb_fwd_ports) { 1417af75078fSIntel fwd_stream_stats_display(sm_id); 1418af75078fSIntel ports[fwd_streams[sm_id]->tx_port].tx_stream = NULL; 1419af75078fSIntel ports[fwd_streams[sm_id]->rx_port].rx_stream = NULL; 1420af75078fSIntel } else { 1421af75078fSIntel ports[fwd_streams[sm_id]->tx_port].tx_stream = 1422af75078fSIntel fwd_streams[sm_id]; 1423af75078fSIntel ports[fwd_streams[sm_id]->rx_port].rx_stream = 1424af75078fSIntel fwd_streams[sm_id]; 1425af75078fSIntel } 1426af75078fSIntel tx_dropped = ports[fwd_streams[sm_id]->tx_port].tx_dropped; 1427af75078fSIntel tx_dropped = (uint64_t) (tx_dropped + 1428af75078fSIntel fwd_streams[sm_id]->fwd_dropped); 1429af75078fSIntel ports[fwd_streams[sm_id]->tx_port].tx_dropped = tx_dropped; 1430af75078fSIntel 1431013af9b6SIntel rx_bad_ip_csum = 1432013af9b6SIntel ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum; 1433af75078fSIntel rx_bad_ip_csum = (uint64_t) (rx_bad_ip_csum + 1434af75078fSIntel fwd_streams[sm_id]->rx_bad_ip_csum); 1435013af9b6SIntel ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum = 1436013af9b6SIntel rx_bad_ip_csum; 1437af75078fSIntel 1438013af9b6SIntel rx_bad_l4_csum = 1439013af9b6SIntel ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum; 1440af75078fSIntel rx_bad_l4_csum = (uint64_t) (rx_bad_l4_csum + 1441af75078fSIntel fwd_streams[sm_id]->rx_bad_l4_csum); 1442013af9b6SIntel ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum = 1443013af9b6SIntel rx_bad_l4_csum; 1444af75078fSIntel 1445af75078fSIntel #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES 1446af75078fSIntel fwd_cycles = (uint64_t) (fwd_cycles + 1447af75078fSIntel fwd_streams[sm_id]->core_cycles); 1448af75078fSIntel #endif 1449af75078fSIntel } 1450af75078fSIntel total_recv = 0; 1451af75078fSIntel total_xmit = 0; 1452af75078fSIntel total_rx_dropped = 0; 1453af75078fSIntel total_tx_dropped = 0; 1454af75078fSIntel total_rx_nombuf = 0; 14557741e4cfSIntel for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) { 1456af75078fSIntel pt_id = fwd_ports_ids[i]; 1457af75078fSIntel 1458af75078fSIntel port = &ports[pt_id]; 1459af75078fSIntel rte_eth_stats_get(pt_id, &stats); 1460af75078fSIntel stats.ipackets -= port->stats.ipackets; 1461af75078fSIntel port->stats.ipackets = 0; 1462af75078fSIntel stats.opackets -= port->stats.opackets; 1463af75078fSIntel port->stats.opackets = 0; 1464af75078fSIntel stats.ibytes -= port->stats.ibytes; 1465af75078fSIntel port->stats.ibytes = 0; 1466af75078fSIntel stats.obytes -= port->stats.obytes; 1467af75078fSIntel port->stats.obytes = 0; 146870bdb186SIvan Boule stats.imissed -= port->stats.imissed; 146970bdb186SIvan Boule port->stats.imissed = 0; 1470af75078fSIntel stats.oerrors -= port->stats.oerrors; 1471af75078fSIntel port->stats.oerrors = 0; 1472af75078fSIntel stats.rx_nombuf -= port->stats.rx_nombuf; 1473af75078fSIntel port->stats.rx_nombuf = 0; 1474af75078fSIntel 1475af75078fSIntel total_recv += stats.ipackets; 1476af75078fSIntel total_xmit += stats.opackets; 147770bdb186SIvan Boule total_rx_dropped += stats.imissed; 1478af75078fSIntel total_tx_dropped += port->tx_dropped; 1479af75078fSIntel total_rx_nombuf += stats.rx_nombuf; 1480af75078fSIntel 1481af75078fSIntel fwd_port_stats_display(pt_id, &stats); 1482af75078fSIntel } 1483b7091f1dSJiayu Hu 1484af75078fSIntel printf("\n %s Accumulated forward statistics for all ports" 1485af75078fSIntel "%s\n", 1486af75078fSIntel acc_stats_border, acc_stats_border); 1487af75078fSIntel printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: " 1488af75078fSIntel "%-"PRIu64"\n" 1489af75078fSIntel " TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: " 1490af75078fSIntel "%-"PRIu64"\n", 1491af75078fSIntel total_recv, total_rx_dropped, total_recv + total_rx_dropped, 1492af75078fSIntel total_xmit, total_tx_dropped, total_xmit + total_tx_dropped); 1493af75078fSIntel if (total_rx_nombuf > 0) 1494af75078fSIntel printf(" RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf); 1495af75078fSIntel printf(" %s++++++++++++++++++++++++++++++++++++++++++++++" 1496af75078fSIntel "%s\n", 1497af75078fSIntel acc_stats_border, acc_stats_border); 1498af75078fSIntel #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES 1499af75078fSIntel if (total_recv > 0) 1500af75078fSIntel printf("\n CPU cycles/packet=%u (total cycles=" 1501af75078fSIntel "%"PRIu64" / total RX packets=%"PRIu64")\n", 1502af75078fSIntel (unsigned int)(fwd_cycles / total_recv), 1503af75078fSIntel fwd_cycles, total_recv); 1504af75078fSIntel #endif 1505af75078fSIntel printf("\nDone.\n"); 1506af75078fSIntel test_done = 1; 1507af75078fSIntel } 1508af75078fSIntel 1509cfae07fdSOuyang Changchun void 1510cfae07fdSOuyang Changchun dev_set_link_up(portid_t pid) 1511cfae07fdSOuyang Changchun { 1512492ab604SZhiyong Yang if (rte_eth_dev_set_link_up(pid) < 0) 1513cfae07fdSOuyang Changchun printf("\nSet link up fail.\n"); 1514cfae07fdSOuyang Changchun } 1515cfae07fdSOuyang Changchun 1516cfae07fdSOuyang Changchun void 1517cfae07fdSOuyang Changchun dev_set_link_down(portid_t pid) 1518cfae07fdSOuyang Changchun { 1519492ab604SZhiyong Yang if (rte_eth_dev_set_link_down(pid) < 0) 1520cfae07fdSOuyang Changchun printf("\nSet link down fail.\n"); 1521cfae07fdSOuyang Changchun } 1522cfae07fdSOuyang Changchun 1523ce8d5614SIntel static int 1524ce8d5614SIntel all_ports_started(void) 1525ce8d5614SIntel { 1526ce8d5614SIntel portid_t pi; 1527ce8d5614SIntel struct rte_port *port; 1528ce8d5614SIntel 15297d89b261SGaetan Rivet RTE_ETH_FOREACH_DEV(pi) { 1530ce8d5614SIntel port = &ports[pi]; 1531ce8d5614SIntel /* Check if there is a port which is not started */ 153241b05095SBernard Iremonger if ((port->port_status != RTE_PORT_STARTED) && 153341b05095SBernard Iremonger (port->slave_flag == 0)) 1534ce8d5614SIntel return 0; 1535ce8d5614SIntel } 1536ce8d5614SIntel 1537ce8d5614SIntel /* No port is not started */ 1538ce8d5614SIntel return 1; 1539ce8d5614SIntel } 1540ce8d5614SIntel 1541148f963fSBruce Richardson int 15426018eb8cSShahaf Shuler port_is_stopped(portid_t port_id) 15436018eb8cSShahaf Shuler { 15446018eb8cSShahaf Shuler struct rte_port *port = &ports[port_id]; 15456018eb8cSShahaf Shuler 15466018eb8cSShahaf Shuler if ((port->port_status != RTE_PORT_STOPPED) && 15476018eb8cSShahaf Shuler (port->slave_flag == 0)) 15486018eb8cSShahaf Shuler return 0; 15496018eb8cSShahaf Shuler return 1; 15506018eb8cSShahaf Shuler } 15516018eb8cSShahaf Shuler 15526018eb8cSShahaf Shuler int 1553edab33b1STetsuya Mukawa all_ports_stopped(void) 1554edab33b1STetsuya Mukawa { 1555edab33b1STetsuya Mukawa portid_t pi; 1556edab33b1STetsuya Mukawa 15577d89b261SGaetan Rivet RTE_ETH_FOREACH_DEV(pi) { 15586018eb8cSShahaf Shuler if (!port_is_stopped(pi)) 1559edab33b1STetsuya Mukawa return 0; 1560edab33b1STetsuya Mukawa } 1561edab33b1STetsuya Mukawa 1562edab33b1STetsuya Mukawa return 1; 1563edab33b1STetsuya Mukawa } 1564edab33b1STetsuya Mukawa 1565edab33b1STetsuya Mukawa int 1566edab33b1STetsuya Mukawa port_is_started(portid_t port_id) 1567edab33b1STetsuya Mukawa { 1568edab33b1STetsuya Mukawa if (port_id_is_invalid(port_id, ENABLED_WARN)) 1569edab33b1STetsuya Mukawa return 0; 1570edab33b1STetsuya Mukawa 1571edab33b1STetsuya Mukawa if (ports[port_id].port_status != RTE_PORT_STARTED) 1572edab33b1STetsuya Mukawa return 0; 1573edab33b1STetsuya Mukawa 1574edab33b1STetsuya Mukawa return 1; 1575edab33b1STetsuya Mukawa } 1576edab33b1STetsuya Mukawa 1577edab33b1STetsuya Mukawa static int 1578edab33b1STetsuya Mukawa port_is_closed(portid_t port_id) 1579edab33b1STetsuya Mukawa { 1580edab33b1STetsuya Mukawa if (port_id_is_invalid(port_id, ENABLED_WARN)) 1581edab33b1STetsuya Mukawa return 0; 1582edab33b1STetsuya Mukawa 1583edab33b1STetsuya Mukawa if (ports[port_id].port_status != RTE_PORT_CLOSED) 1584edab33b1STetsuya Mukawa return 0; 1585edab33b1STetsuya Mukawa 1586edab33b1STetsuya Mukawa return 1; 1587edab33b1STetsuya Mukawa } 1588edab33b1STetsuya Mukawa 1589edab33b1STetsuya Mukawa int 1590ce8d5614SIntel start_port(portid_t pid) 1591ce8d5614SIntel { 159292d2703eSMichael Qiu int diag, need_check_link_status = -1; 1593ce8d5614SIntel portid_t pi; 1594ce8d5614SIntel queueid_t qi; 1595ce8d5614SIntel struct rte_port *port; 15962950a769SDeclan Doherty struct ether_addr mac_addr; 159776ad4a2dSGaetan Rivet enum rte_eth_event_type event_type; 1598ce8d5614SIntel 15994468635fSMichael Qiu if (port_id_is_invalid(pid, ENABLED_WARN)) 16004468635fSMichael Qiu return 0; 16014468635fSMichael Qiu 1602ce8d5614SIntel if(dcb_config) 1603ce8d5614SIntel dcb_test = 1; 16047d89b261SGaetan Rivet RTE_ETH_FOREACH_DEV(pi) { 1605edab33b1STetsuya Mukawa if (pid != pi && pid != (portid_t)RTE_PORT_ALL) 1606ce8d5614SIntel continue; 1607ce8d5614SIntel 160892d2703eSMichael Qiu need_check_link_status = 0; 1609ce8d5614SIntel port = &ports[pi]; 1610ce8d5614SIntel if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED, 1611ce8d5614SIntel RTE_PORT_HANDLING) == 0) { 1612ce8d5614SIntel printf("Port %d is now not stopped\n", pi); 1613ce8d5614SIntel continue; 1614ce8d5614SIntel } 1615ce8d5614SIntel 1616ce8d5614SIntel if (port->need_reconfig > 0) { 1617ce8d5614SIntel port->need_reconfig = 0; 1618ce8d5614SIntel 16197ee3e944SVasily Philipov if (flow_isolate_all) { 16207ee3e944SVasily Philipov int ret = port_flow_isolate(pi, 1); 16217ee3e944SVasily Philipov if (ret) { 16227ee3e944SVasily Philipov printf("Failed to apply isolated" 16237ee3e944SVasily Philipov " mode on port %d\n", pi); 16247ee3e944SVasily Philipov return -1; 16257ee3e944SVasily Philipov } 16267ee3e944SVasily Philipov } 16277ee3e944SVasily Philipov 16285706de65SJulien Cretin printf("Configuring Port %d (socket %u)\n", pi, 162920a0286fSLiu Xiaofeng port->socket_id); 1630ce8d5614SIntel /* configure port */ 1631ce8d5614SIntel diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq, 1632ce8d5614SIntel &(port->dev_conf)); 1633ce8d5614SIntel if (diag != 0) { 1634ce8d5614SIntel if (rte_atomic16_cmpset(&(port->port_status), 1635ce8d5614SIntel RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0) 1636ce8d5614SIntel printf("Port %d can not be set back " 1637ce8d5614SIntel "to stopped\n", pi); 1638ce8d5614SIntel printf("Fail to configure port %d\n", pi); 1639ce8d5614SIntel /* try to reconfigure port next time */ 1640ce8d5614SIntel port->need_reconfig = 1; 1641148f963fSBruce Richardson return -1; 1642ce8d5614SIntel } 1643ce8d5614SIntel } 1644ce8d5614SIntel if (port->need_reconfig_queues > 0) { 1645ce8d5614SIntel port->need_reconfig_queues = 0; 1646ce8d5614SIntel /* setup tx queues */ 1647ce8d5614SIntel for (qi = 0; qi < nb_txq; qi++) { 1648d44f8a48SQi Zhang port->tx_conf[qi].txq_flags = 1649d44f8a48SQi Zhang ETH_TXQ_FLAGS_IGNORE; 1650b6ea6408SIntel if ((numa_support) && 1651b6ea6408SIntel (txring_numa[pi] != NUMA_NO_CONFIG)) 1652b6ea6408SIntel diag = rte_eth_tx_queue_setup(pi, qi, 1653d44f8a48SQi Zhang port->nb_tx_desc[qi], 1654d44f8a48SQi Zhang txring_numa[pi], 1655d44f8a48SQi Zhang &(port->tx_conf[qi])); 1656b6ea6408SIntel else 1657b6ea6408SIntel diag = rte_eth_tx_queue_setup(pi, qi, 1658d44f8a48SQi Zhang port->nb_tx_desc[qi], 1659d44f8a48SQi Zhang port->socket_id, 1660d44f8a48SQi Zhang &(port->tx_conf[qi])); 1661b6ea6408SIntel 1662ce8d5614SIntel if (diag == 0) 1663ce8d5614SIntel continue; 1664ce8d5614SIntel 1665ce8d5614SIntel /* Fail to setup tx queue, return */ 1666ce8d5614SIntel if (rte_atomic16_cmpset(&(port->port_status), 1667ce8d5614SIntel RTE_PORT_HANDLING, 1668ce8d5614SIntel RTE_PORT_STOPPED) == 0) 1669ce8d5614SIntel printf("Port %d can not be set back " 1670ce8d5614SIntel "to stopped\n", pi); 1671d44f8a48SQi Zhang printf("Fail to configure port %d tx queues\n", 1672d44f8a48SQi Zhang pi); 1673ce8d5614SIntel /* try to reconfigure queues next time */ 1674ce8d5614SIntel port->need_reconfig_queues = 1; 1675148f963fSBruce Richardson return -1; 1676ce8d5614SIntel } 1677ce8d5614SIntel for (qi = 0; qi < nb_rxq; qi++) { 1678d44f8a48SQi Zhang /* setup rx queues */ 1679b6ea6408SIntel if ((numa_support) && 1680b6ea6408SIntel (rxring_numa[pi] != NUMA_NO_CONFIG)) { 1681b6ea6408SIntel struct rte_mempool * mp = 1682b6ea6408SIntel mbuf_pool_find(rxring_numa[pi]); 1683b6ea6408SIntel if (mp == NULL) { 1684b6ea6408SIntel printf("Failed to setup RX queue:" 1685b6ea6408SIntel "No mempool allocation" 1686b6ea6408SIntel " on the socket %d\n", 1687b6ea6408SIntel rxring_numa[pi]); 1688148f963fSBruce Richardson return -1; 1689b6ea6408SIntel } 1690b6ea6408SIntel 1691b6ea6408SIntel diag = rte_eth_rx_queue_setup(pi, qi, 1692d44f8a48SQi Zhang port->nb_rx_desc[pi], 1693d44f8a48SQi Zhang rxring_numa[pi], 1694d44f8a48SQi Zhang &(port->rx_conf[qi]), 1695d44f8a48SQi Zhang mp); 16961e1d6bddSBernard Iremonger } else { 16971e1d6bddSBernard Iremonger struct rte_mempool *mp = 16981e1d6bddSBernard Iremonger mbuf_pool_find(port->socket_id); 16991e1d6bddSBernard Iremonger if (mp == NULL) { 17001e1d6bddSBernard Iremonger printf("Failed to setup RX queue:" 17011e1d6bddSBernard Iremonger "No mempool allocation" 17021e1d6bddSBernard Iremonger " on the socket %d\n", 17031e1d6bddSBernard Iremonger port->socket_id); 17041e1d6bddSBernard Iremonger return -1; 1705b6ea6408SIntel } 1706b6ea6408SIntel diag = rte_eth_rx_queue_setup(pi, qi, 1707d44f8a48SQi Zhang port->nb_rx_desc[pi], 1708d44f8a48SQi Zhang port->socket_id, 1709d44f8a48SQi Zhang &(port->rx_conf[qi]), 1710d44f8a48SQi Zhang mp); 17111e1d6bddSBernard Iremonger } 1712ce8d5614SIntel if (diag == 0) 1713ce8d5614SIntel continue; 1714ce8d5614SIntel 1715ce8d5614SIntel /* Fail to setup rx queue, return */ 1716ce8d5614SIntel if (rte_atomic16_cmpset(&(port->port_status), 1717ce8d5614SIntel RTE_PORT_HANDLING, 1718ce8d5614SIntel RTE_PORT_STOPPED) == 0) 1719ce8d5614SIntel printf("Port %d can not be set back " 1720ce8d5614SIntel "to stopped\n", pi); 1721d44f8a48SQi Zhang printf("Fail to configure port %d rx queues\n", 1722d44f8a48SQi Zhang pi); 1723ce8d5614SIntel /* try to reconfigure queues next time */ 1724ce8d5614SIntel port->need_reconfig_queues = 1; 1725148f963fSBruce Richardson return -1; 1726ce8d5614SIntel } 1727ce8d5614SIntel } 172876ad4a2dSGaetan Rivet 1729ce8d5614SIntel /* start port */ 1730ce8d5614SIntel if (rte_eth_dev_start(pi) < 0) { 1731ce8d5614SIntel printf("Fail to start port %d\n", pi); 1732ce8d5614SIntel 1733ce8d5614SIntel /* Fail to setup rx queue, return */ 1734ce8d5614SIntel if (rte_atomic16_cmpset(&(port->port_status), 1735ce8d5614SIntel RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0) 1736ce8d5614SIntel printf("Port %d can not be set back to " 1737ce8d5614SIntel "stopped\n", pi); 1738ce8d5614SIntel continue; 1739ce8d5614SIntel } 1740ce8d5614SIntel 1741ce8d5614SIntel if (rte_atomic16_cmpset(&(port->port_status), 1742ce8d5614SIntel RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0) 1743ce8d5614SIntel printf("Port %d can not be set into started\n", pi); 1744ce8d5614SIntel 17452950a769SDeclan Doherty rte_eth_macaddr_get(pi, &mac_addr); 1746d8c89163SZijie Pan printf("Port %d: %02X:%02X:%02X:%02X:%02X:%02X\n", pi, 17472950a769SDeclan Doherty mac_addr.addr_bytes[0], mac_addr.addr_bytes[1], 17482950a769SDeclan Doherty mac_addr.addr_bytes[2], mac_addr.addr_bytes[3], 17492950a769SDeclan Doherty mac_addr.addr_bytes[4], mac_addr.addr_bytes[5]); 1750d8c89163SZijie Pan 1751ce8d5614SIntel /* at least one port started, need checking link status */ 1752ce8d5614SIntel need_check_link_status = 1; 1753ce8d5614SIntel } 1754ce8d5614SIntel 17554fb82244SMatan Azrad for (event_type = RTE_ETH_EVENT_UNKNOWN; 17564fb82244SMatan Azrad event_type < RTE_ETH_EVENT_MAX; 17574fb82244SMatan Azrad event_type++) { 17584fb82244SMatan Azrad diag = rte_eth_dev_callback_register(RTE_ETH_ALL, 17594fb82244SMatan Azrad event_type, 17604fb82244SMatan Azrad eth_event_callback, 17614fb82244SMatan Azrad NULL); 17624fb82244SMatan Azrad if (diag) { 17634fb82244SMatan Azrad printf("Failed to setup even callback for event %d\n", 17644fb82244SMatan Azrad event_type); 17654fb82244SMatan Azrad return -1; 17664fb82244SMatan Azrad } 17674fb82244SMatan Azrad } 17684fb82244SMatan Azrad 176992d2703eSMichael Qiu if (need_check_link_status == 1 && !no_link_check) 1770edab33b1STetsuya Mukawa check_all_ports_link_status(RTE_PORT_ALL); 177192d2703eSMichael Qiu else if (need_check_link_status == 0) 1772ce8d5614SIntel printf("Please stop the ports first\n"); 1773ce8d5614SIntel 1774ce8d5614SIntel printf("Done\n"); 1775148f963fSBruce Richardson return 0; 1776ce8d5614SIntel } 1777ce8d5614SIntel 1778ce8d5614SIntel void 1779ce8d5614SIntel stop_port(portid_t pid) 1780ce8d5614SIntel { 1781ce8d5614SIntel portid_t pi; 1782ce8d5614SIntel struct rte_port *port; 1783ce8d5614SIntel int need_check_link_status = 0; 1784ce8d5614SIntel 1785ce8d5614SIntel if (dcb_test) { 1786ce8d5614SIntel dcb_test = 0; 1787ce8d5614SIntel dcb_config = 0; 1788ce8d5614SIntel } 17894468635fSMichael Qiu 17904468635fSMichael Qiu if (port_id_is_invalid(pid, ENABLED_WARN)) 17914468635fSMichael Qiu return; 17924468635fSMichael Qiu 1793ce8d5614SIntel printf("Stopping ports...\n"); 1794ce8d5614SIntel 17957d89b261SGaetan Rivet RTE_ETH_FOREACH_DEV(pi) { 17964468635fSMichael Qiu if (pid != pi && pid != (portid_t)RTE_PORT_ALL) 1797ce8d5614SIntel continue; 1798ce8d5614SIntel 1799a8ef3e3aSBernard Iremonger if (port_is_forwarding(pi) != 0 && test_done == 0) { 1800a8ef3e3aSBernard Iremonger printf("Please remove port %d from forwarding configuration.\n", pi); 1801a8ef3e3aSBernard Iremonger continue; 1802a8ef3e3aSBernard Iremonger } 1803a8ef3e3aSBernard Iremonger 18040e545d30SBernard Iremonger if (port_is_bonding_slave(pi)) { 18050e545d30SBernard Iremonger printf("Please remove port %d from bonded device.\n", pi); 18060e545d30SBernard Iremonger continue; 18070e545d30SBernard Iremonger } 18080e545d30SBernard Iremonger 1809ce8d5614SIntel port = &ports[pi]; 1810ce8d5614SIntel if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED, 1811ce8d5614SIntel RTE_PORT_HANDLING) == 0) 1812ce8d5614SIntel continue; 1813ce8d5614SIntel 1814ce8d5614SIntel rte_eth_dev_stop(pi); 1815ce8d5614SIntel 1816ce8d5614SIntel if (rte_atomic16_cmpset(&(port->port_status), 1817ce8d5614SIntel RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0) 1818ce8d5614SIntel printf("Port %d can not be set into stopped\n", pi); 1819ce8d5614SIntel need_check_link_status = 1; 1820ce8d5614SIntel } 1821bc202406SDavid Marchand if (need_check_link_status && !no_link_check) 1822edab33b1STetsuya Mukawa check_all_ports_link_status(RTE_PORT_ALL); 1823ce8d5614SIntel 1824ce8d5614SIntel printf("Done\n"); 1825ce8d5614SIntel } 1826ce8d5614SIntel 1827ce8d5614SIntel void 1828ce8d5614SIntel close_port(portid_t pid) 1829ce8d5614SIntel { 1830ce8d5614SIntel portid_t pi; 1831ce8d5614SIntel struct rte_port *port; 1832ce8d5614SIntel 18334468635fSMichael Qiu if (port_id_is_invalid(pid, ENABLED_WARN)) 18344468635fSMichael Qiu return; 18354468635fSMichael Qiu 1836ce8d5614SIntel printf("Closing ports...\n"); 1837ce8d5614SIntel 18387d89b261SGaetan Rivet RTE_ETH_FOREACH_DEV(pi) { 18394468635fSMichael Qiu if (pid != pi && pid != (portid_t)RTE_PORT_ALL) 1840ce8d5614SIntel continue; 1841ce8d5614SIntel 1842a8ef3e3aSBernard Iremonger if (port_is_forwarding(pi) != 0 && test_done == 0) { 1843a8ef3e3aSBernard Iremonger printf("Please remove port %d from forwarding configuration.\n", pi); 1844a8ef3e3aSBernard Iremonger continue; 1845a8ef3e3aSBernard Iremonger } 1846a8ef3e3aSBernard Iremonger 18470e545d30SBernard Iremonger if (port_is_bonding_slave(pi)) { 18480e545d30SBernard Iremonger printf("Please remove port %d from bonded device.\n", pi); 18490e545d30SBernard Iremonger continue; 18500e545d30SBernard Iremonger } 18510e545d30SBernard Iremonger 1852ce8d5614SIntel port = &ports[pi]; 1853ce8d5614SIntel if (rte_atomic16_cmpset(&(port->port_status), 1854d4e8ad64SMichael Qiu RTE_PORT_CLOSED, RTE_PORT_CLOSED) == 1) { 1855d4e8ad64SMichael Qiu printf("Port %d is already closed\n", pi); 1856d4e8ad64SMichael Qiu continue; 1857d4e8ad64SMichael Qiu } 1858d4e8ad64SMichael Qiu 1859d4e8ad64SMichael Qiu if (rte_atomic16_cmpset(&(port->port_status), 1860ce8d5614SIntel RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) { 1861ce8d5614SIntel printf("Port %d is now not stopped\n", pi); 1862ce8d5614SIntel continue; 1863ce8d5614SIntel } 1864ce8d5614SIntel 1865938a184aSAdrien Mazarguil if (port->flow_list) 1866938a184aSAdrien Mazarguil port_flow_flush(pi); 1867ce8d5614SIntel rte_eth_dev_close(pi); 1868ce8d5614SIntel 1869ce8d5614SIntel if (rte_atomic16_cmpset(&(port->port_status), 1870ce8d5614SIntel RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0) 1871b38bb262SPablo de Lara printf("Port %d cannot be set to closed\n", pi); 1872ce8d5614SIntel } 1873ce8d5614SIntel 1874ce8d5614SIntel printf("Done\n"); 1875ce8d5614SIntel } 1876ce8d5614SIntel 1877edab33b1STetsuya Mukawa void 187897f1e196SWei Dai reset_port(portid_t pid) 187997f1e196SWei Dai { 188097f1e196SWei Dai int diag; 188197f1e196SWei Dai portid_t pi; 188297f1e196SWei Dai struct rte_port *port; 188397f1e196SWei Dai 188497f1e196SWei Dai if (port_id_is_invalid(pid, ENABLED_WARN)) 188597f1e196SWei Dai return; 188697f1e196SWei Dai 188797f1e196SWei Dai printf("Resetting ports...\n"); 188897f1e196SWei Dai 188997f1e196SWei Dai RTE_ETH_FOREACH_DEV(pi) { 189097f1e196SWei Dai if (pid != pi && pid != (portid_t)RTE_PORT_ALL) 189197f1e196SWei Dai continue; 189297f1e196SWei Dai 189397f1e196SWei Dai if (port_is_forwarding(pi) != 0 && test_done == 0) { 189497f1e196SWei Dai printf("Please remove port %d from forwarding " 189597f1e196SWei Dai "configuration.\n", pi); 189697f1e196SWei Dai continue; 189797f1e196SWei Dai } 189897f1e196SWei Dai 189997f1e196SWei Dai if (port_is_bonding_slave(pi)) { 190097f1e196SWei Dai printf("Please remove port %d from bonded device.\n", 190197f1e196SWei Dai pi); 190297f1e196SWei Dai continue; 190397f1e196SWei Dai } 190497f1e196SWei Dai 190597f1e196SWei Dai diag = rte_eth_dev_reset(pi); 190697f1e196SWei Dai if (diag == 0) { 190797f1e196SWei Dai port = &ports[pi]; 190897f1e196SWei Dai port->need_reconfig = 1; 190997f1e196SWei Dai port->need_reconfig_queues = 1; 191097f1e196SWei Dai } else { 191197f1e196SWei Dai printf("Failed to reset port %d. diag=%d\n", pi, diag); 191297f1e196SWei Dai } 191397f1e196SWei Dai } 191497f1e196SWei Dai 191597f1e196SWei Dai printf("Done\n"); 191697f1e196SWei Dai } 191797f1e196SWei Dai 1918fb73e096SJeff Guo static int 1919fb73e096SJeff Guo eth_dev_event_callback_register(void) 1920fb73e096SJeff Guo { 1921fb73e096SJeff Guo int ret; 1922fb73e096SJeff Guo 1923fb73e096SJeff Guo /* register the device event callback */ 1924fb73e096SJeff Guo ret = rte_dev_event_callback_register(NULL, 1925fb73e096SJeff Guo eth_dev_event_callback, NULL); 1926fb73e096SJeff Guo if (ret) { 1927fb73e096SJeff Guo printf("Failed to register device event callback\n"); 1928fb73e096SJeff Guo return -1; 1929fb73e096SJeff Guo } 1930fb73e096SJeff Guo 1931fb73e096SJeff Guo return 0; 1932fb73e096SJeff Guo } 1933fb73e096SJeff Guo 1934fb73e096SJeff Guo 1935fb73e096SJeff Guo static int 1936fb73e096SJeff Guo eth_dev_event_callback_unregister(void) 1937fb73e096SJeff Guo { 1938fb73e096SJeff Guo int ret; 1939fb73e096SJeff Guo 1940fb73e096SJeff Guo /* unregister the device event callback */ 1941fb73e096SJeff Guo ret = rte_dev_event_callback_unregister(NULL, 1942fb73e096SJeff Guo eth_dev_event_callback, NULL); 1943fb73e096SJeff Guo if (ret < 0) { 1944fb73e096SJeff Guo printf("Failed to unregister device event callback\n"); 1945fb73e096SJeff Guo return -1; 1946fb73e096SJeff Guo } 1947fb73e096SJeff Guo 1948fb73e096SJeff Guo return 0; 1949fb73e096SJeff Guo } 1950fb73e096SJeff Guo 195197f1e196SWei Dai void 1952edab33b1STetsuya Mukawa attach_port(char *identifier) 1953ce8d5614SIntel { 1954ebf5e9b7SBernard Iremonger portid_t pi = 0; 1955931126baSBernard Iremonger unsigned int socket_id; 1956ce8d5614SIntel 1957edab33b1STetsuya Mukawa printf("Attaching a new port...\n"); 1958edab33b1STetsuya Mukawa 1959edab33b1STetsuya Mukawa if (identifier == NULL) { 1960edab33b1STetsuya Mukawa printf("Invalid parameters are specified\n"); 1961edab33b1STetsuya Mukawa return; 1962ce8d5614SIntel } 1963ce8d5614SIntel 1964edab33b1STetsuya Mukawa if (rte_eth_dev_attach(identifier, &pi)) 1965edab33b1STetsuya Mukawa return; 1966edab33b1STetsuya Mukawa 1967931126baSBernard Iremonger socket_id = (unsigned)rte_eth_dev_socket_id(pi); 1968931126baSBernard Iremonger /* if socket_id is invalid, set to 0 */ 1969931126baSBernard Iremonger if (check_socket_id(socket_id) < 0) 1970931126baSBernard Iremonger socket_id = 0; 1971931126baSBernard Iremonger reconfig(pi, socket_id); 1972edab33b1STetsuya Mukawa rte_eth_promiscuous_enable(pi); 1973edab33b1STetsuya Mukawa 1974d9a42a69SThomas Monjalon nb_ports = rte_eth_dev_count_avail(); 1975edab33b1STetsuya Mukawa 1976edab33b1STetsuya Mukawa ports[pi].port_status = RTE_PORT_STOPPED; 1977edab33b1STetsuya Mukawa 197803ce2c53SMatan Azrad update_fwd_ports(pi); 197903ce2c53SMatan Azrad 1980edab33b1STetsuya Mukawa printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports); 1981edab33b1STetsuya Mukawa printf("Done\n"); 1982edab33b1STetsuya Mukawa } 1983edab33b1STetsuya Mukawa 1984edab33b1STetsuya Mukawa void 198528caa76aSZhiyong Yang detach_port(portid_t port_id) 19865f4ec54fSChen Jing D(Mark) { 1987edab33b1STetsuya Mukawa char name[RTE_ETH_NAME_MAX_LEN]; 19885f4ec54fSChen Jing D(Mark) 1989edab33b1STetsuya Mukawa printf("Detaching a port...\n"); 19905f4ec54fSChen Jing D(Mark) 1991edab33b1STetsuya Mukawa if (!port_is_closed(port_id)) { 1992edab33b1STetsuya Mukawa printf("Please close port first\n"); 1993edab33b1STetsuya Mukawa return; 1994edab33b1STetsuya Mukawa } 1995edab33b1STetsuya Mukawa 1996938a184aSAdrien Mazarguil if (ports[port_id].flow_list) 1997938a184aSAdrien Mazarguil port_flow_flush(port_id); 1998938a184aSAdrien Mazarguil 19993070419eSGaetan Rivet if (rte_eth_dev_detach(port_id, name)) { 2000adea04c4SZhiyong Yang TESTPMD_LOG(ERR, "Failed to detach port %u\n", port_id); 2001edab33b1STetsuya Mukawa return; 20023070419eSGaetan Rivet } 2003edab33b1STetsuya Mukawa 2004d9a42a69SThomas Monjalon nb_ports = rte_eth_dev_count_avail(); 2005edab33b1STetsuya Mukawa 200603ce2c53SMatan Azrad update_fwd_ports(RTE_MAX_ETHPORTS); 200703ce2c53SMatan Azrad 2008adea04c4SZhiyong Yang printf("Port %u is detached. Now total ports is %d\n", 2009adea04c4SZhiyong Yang port_id, nb_ports); 2010edab33b1STetsuya Mukawa printf("Done\n"); 2011edab33b1STetsuya Mukawa return; 20125f4ec54fSChen Jing D(Mark) } 20135f4ec54fSChen Jing D(Mark) 2014af75078fSIntel void 2015af75078fSIntel pmd_test_exit(void) 2016af75078fSIntel { 2017af75078fSIntel portid_t pt_id; 2018fb73e096SJeff Guo int ret; 2019af75078fSIntel 20208210ec25SPablo de Lara if (test_done == 0) 20218210ec25SPablo de Lara stop_packet_forwarding(); 20228210ec25SPablo de Lara 2023d3a274ceSZhihong Wang if (ports != NULL) { 2024d3a274ceSZhihong Wang no_link_check = 1; 20257d89b261SGaetan Rivet RTE_ETH_FOREACH_DEV(pt_id) { 2026d3a274ceSZhihong Wang printf("\nShutting down port %d...\n", pt_id); 2027af75078fSIntel fflush(stdout); 2028d3a274ceSZhihong Wang stop_port(pt_id); 2029d3a274ceSZhihong Wang close_port(pt_id); 2030af75078fSIntel } 2031d3a274ceSZhihong Wang } 2032fb73e096SJeff Guo 2033fb73e096SJeff Guo if (hot_plug) { 2034fb73e096SJeff Guo ret = rte_dev_event_monitor_stop(); 2035fb73e096SJeff Guo if (ret) 2036fb73e096SJeff Guo RTE_LOG(ERR, EAL, 2037fb73e096SJeff Guo "fail to stop device event monitor."); 2038fb73e096SJeff Guo 2039fb73e096SJeff Guo ret = eth_dev_event_callback_unregister(); 2040fb73e096SJeff Guo if (ret) 2041fb73e096SJeff Guo RTE_LOG(ERR, EAL, 2042fb73e096SJeff Guo "fail to unregister all event callbacks."); 2043fb73e096SJeff Guo } 2044fb73e096SJeff Guo 2045d3a274ceSZhihong Wang printf("\nBye...\n"); 2046af75078fSIntel } 2047af75078fSIntel 2048af75078fSIntel typedef void (*cmd_func_t)(void); 2049af75078fSIntel struct pmd_test_command { 2050af75078fSIntel const char *cmd_name; 2051af75078fSIntel cmd_func_t cmd_func; 2052af75078fSIntel }; 2053af75078fSIntel 2054af75078fSIntel #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0])) 2055af75078fSIntel 2056ce8d5614SIntel /* Check the link status of all ports in up to 9s, and print them finally */ 2057af75078fSIntel static void 2058edab33b1STetsuya Mukawa check_all_ports_link_status(uint32_t port_mask) 2059af75078fSIntel { 2060ce8d5614SIntel #define CHECK_INTERVAL 100 /* 100ms */ 2061ce8d5614SIntel #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */ 2062f8244c63SZhiyong Yang portid_t portid; 2063f8244c63SZhiyong Yang uint8_t count, all_ports_up, print_flag = 0; 2064ce8d5614SIntel struct rte_eth_link link; 2065ce8d5614SIntel 2066ce8d5614SIntel printf("Checking link statuses...\n"); 2067ce8d5614SIntel fflush(stdout); 2068ce8d5614SIntel for (count = 0; count <= MAX_CHECK_TIME; count++) { 2069ce8d5614SIntel all_ports_up = 1; 20707d89b261SGaetan Rivet RTE_ETH_FOREACH_DEV(portid) { 2071ce8d5614SIntel if ((port_mask & (1 << portid)) == 0) 2072ce8d5614SIntel continue; 2073ce8d5614SIntel memset(&link, 0, sizeof(link)); 2074ce8d5614SIntel rte_eth_link_get_nowait(portid, &link); 2075ce8d5614SIntel /* print link status if flag set */ 2076ce8d5614SIntel if (print_flag == 1) { 2077ce8d5614SIntel if (link.link_status) 2078f8244c63SZhiyong Yang printf( 2079f8244c63SZhiyong Yang "Port%d Link Up. speed %u Mbps- %s\n", 2080f8244c63SZhiyong Yang portid, link.link_speed, 2081ce8d5614SIntel (link.link_duplex == ETH_LINK_FULL_DUPLEX) ? 2082ce8d5614SIntel ("full-duplex") : ("half-duplex\n")); 2083ce8d5614SIntel else 2084f8244c63SZhiyong Yang printf("Port %d Link Down\n", portid); 2085ce8d5614SIntel continue; 2086ce8d5614SIntel } 2087ce8d5614SIntel /* clear all_ports_up flag if any link down */ 208809419f23SThomas Monjalon if (link.link_status == ETH_LINK_DOWN) { 2089ce8d5614SIntel all_ports_up = 0; 2090ce8d5614SIntel break; 2091ce8d5614SIntel } 2092ce8d5614SIntel } 2093ce8d5614SIntel /* after finally printing all link status, get out */ 2094ce8d5614SIntel if (print_flag == 1) 2095ce8d5614SIntel break; 2096ce8d5614SIntel 2097ce8d5614SIntel if (all_ports_up == 0) { 2098ce8d5614SIntel fflush(stdout); 2099ce8d5614SIntel rte_delay_ms(CHECK_INTERVAL); 2100ce8d5614SIntel } 2101ce8d5614SIntel 2102ce8d5614SIntel /* set the print_flag if all ports up or timeout */ 2103ce8d5614SIntel if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) { 2104ce8d5614SIntel print_flag = 1; 2105ce8d5614SIntel } 21068ea656f8SGaetan Rivet 21078ea656f8SGaetan Rivet if (lsc_interrupt) 21088ea656f8SGaetan Rivet break; 2109ce8d5614SIntel } 2110af75078fSIntel } 2111af75078fSIntel 2112284c908cSGaetan Rivet static void 2113284c908cSGaetan Rivet rmv_event_callback(void *arg) 2114284c908cSGaetan Rivet { 21153b97888aSMatan Azrad int need_to_start = 0; 21160da2a62bSMatan Azrad int org_no_link_check = no_link_check; 211728caa76aSZhiyong Yang portid_t port_id = (intptr_t)arg; 2118284c908cSGaetan Rivet 2119284c908cSGaetan Rivet RTE_ETH_VALID_PORTID_OR_RET(port_id); 2120284c908cSGaetan Rivet 21213b97888aSMatan Azrad if (!test_done && port_is_forwarding(port_id)) { 21223b97888aSMatan Azrad need_to_start = 1; 21233b97888aSMatan Azrad stop_packet_forwarding(); 21243b97888aSMatan Azrad } 21250da2a62bSMatan Azrad no_link_check = 1; 2126284c908cSGaetan Rivet stop_port(port_id); 21270da2a62bSMatan Azrad no_link_check = org_no_link_check; 2128284c908cSGaetan Rivet close_port(port_id); 21293b97888aSMatan Azrad detach_port(port_id); 21303b97888aSMatan Azrad if (need_to_start) 21313b97888aSMatan Azrad start_packet_forwarding(0); 2132284c908cSGaetan Rivet } 2133284c908cSGaetan Rivet 213476ad4a2dSGaetan Rivet /* This function is used by the interrupt thread */ 2135d6af1a13SBernard Iremonger static int 2136f8244c63SZhiyong Yang eth_event_callback(portid_t port_id, enum rte_eth_event_type type, void *param, 2137d6af1a13SBernard Iremonger void *ret_param) 213876ad4a2dSGaetan Rivet { 213976ad4a2dSGaetan Rivet static const char * const event_desc[] = { 214076ad4a2dSGaetan Rivet [RTE_ETH_EVENT_UNKNOWN] = "Unknown", 214176ad4a2dSGaetan Rivet [RTE_ETH_EVENT_INTR_LSC] = "LSC", 214276ad4a2dSGaetan Rivet [RTE_ETH_EVENT_QUEUE_STATE] = "Queue state", 214376ad4a2dSGaetan Rivet [RTE_ETH_EVENT_INTR_RESET] = "Interrupt reset", 214476ad4a2dSGaetan Rivet [RTE_ETH_EVENT_VF_MBOX] = "VF Mbox", 2145badb87c1SAnoob Joseph [RTE_ETH_EVENT_IPSEC] = "IPsec", 214676ad4a2dSGaetan Rivet [RTE_ETH_EVENT_MACSEC] = "MACsec", 214776ad4a2dSGaetan Rivet [RTE_ETH_EVENT_INTR_RMV] = "device removal", 21484fb82244SMatan Azrad [RTE_ETH_EVENT_NEW] = "device probed", 21494fb82244SMatan Azrad [RTE_ETH_EVENT_DESTROY] = "device released", 215076ad4a2dSGaetan Rivet [RTE_ETH_EVENT_MAX] = NULL, 215176ad4a2dSGaetan Rivet }; 215276ad4a2dSGaetan Rivet 215376ad4a2dSGaetan Rivet RTE_SET_USED(param); 2154d6af1a13SBernard Iremonger RTE_SET_USED(ret_param); 215576ad4a2dSGaetan Rivet 215676ad4a2dSGaetan Rivet if (type >= RTE_ETH_EVENT_MAX) { 215776ad4a2dSGaetan Rivet fprintf(stderr, "\nPort %" PRIu8 ": %s called upon invalid event %d\n", 215876ad4a2dSGaetan Rivet port_id, __func__, type); 215976ad4a2dSGaetan Rivet fflush(stderr); 21603af72783SGaetan Rivet } else if (event_print_mask & (UINT32_C(1) << type)) { 216176ad4a2dSGaetan Rivet printf("\nPort %" PRIu8 ": %s event\n", port_id, 216276ad4a2dSGaetan Rivet event_desc[type]); 216376ad4a2dSGaetan Rivet fflush(stdout); 216476ad4a2dSGaetan Rivet } 2165284c908cSGaetan Rivet 21660e45c64dSMatan Azrad if (port_id_is_invalid(port_id, DISABLED_WARN)) 21670e45c64dSMatan Azrad return 0; 21680e45c64dSMatan Azrad 2169284c908cSGaetan Rivet switch (type) { 2170284c908cSGaetan Rivet case RTE_ETH_EVENT_INTR_RMV: 2171284c908cSGaetan Rivet if (rte_eal_alarm_set(100000, 2172284c908cSGaetan Rivet rmv_event_callback, (void *)(intptr_t)port_id)) 2173284c908cSGaetan Rivet fprintf(stderr, "Could not set up deferred device removal\n"); 2174284c908cSGaetan Rivet break; 2175284c908cSGaetan Rivet default: 2176284c908cSGaetan Rivet break; 2177284c908cSGaetan Rivet } 2178d6af1a13SBernard Iremonger return 0; 217976ad4a2dSGaetan Rivet } 218076ad4a2dSGaetan Rivet 2181fb73e096SJeff Guo /* This function is used by the interrupt thread */ 2182fb73e096SJeff Guo static void 2183fb73e096SJeff Guo eth_dev_event_callback(char *device_name, enum rte_dev_event_type type, 2184fb73e096SJeff Guo __rte_unused void *arg) 2185fb73e096SJeff Guo { 2186fb73e096SJeff Guo if (type >= RTE_DEV_EVENT_MAX) { 2187fb73e096SJeff Guo fprintf(stderr, "%s called upon invalid event %d\n", 2188fb73e096SJeff Guo __func__, type); 2189fb73e096SJeff Guo fflush(stderr); 2190fb73e096SJeff Guo } 2191fb73e096SJeff Guo 2192fb73e096SJeff Guo switch (type) { 2193fb73e096SJeff Guo case RTE_DEV_EVENT_REMOVE: 2194fb73e096SJeff Guo RTE_LOG(ERR, EAL, "The device: %s has been removed!\n", 2195fb73e096SJeff Guo device_name); 2196fb73e096SJeff Guo /* TODO: After finish failure handle, begin to stop 2197fb73e096SJeff Guo * packet forward, stop port, close port, detach port. 2198fb73e096SJeff Guo */ 2199fb73e096SJeff Guo break; 2200fb73e096SJeff Guo case RTE_DEV_EVENT_ADD: 2201fb73e096SJeff Guo RTE_LOG(ERR, EAL, "The device: %s has been added!\n", 2202fb73e096SJeff Guo device_name); 2203fb73e096SJeff Guo /* TODO: After finish kernel driver binding, 2204fb73e096SJeff Guo * begin to attach port. 2205fb73e096SJeff Guo */ 2206fb73e096SJeff Guo break; 2207fb73e096SJeff Guo default: 2208fb73e096SJeff Guo break; 2209fb73e096SJeff Guo } 2210fb73e096SJeff Guo } 2211fb73e096SJeff Guo 2212013af9b6SIntel static int 221328caa76aSZhiyong Yang set_tx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port) 2214af75078fSIntel { 2215013af9b6SIntel uint16_t i; 2216af75078fSIntel int diag; 2217013af9b6SIntel uint8_t mapping_found = 0; 2218af75078fSIntel 2219013af9b6SIntel for (i = 0; i < nb_tx_queue_stats_mappings; i++) { 2220013af9b6SIntel if ((tx_queue_stats_mappings[i].port_id == port_id) && 2221013af9b6SIntel (tx_queue_stats_mappings[i].queue_id < nb_txq )) { 2222013af9b6SIntel diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id, 2223013af9b6SIntel tx_queue_stats_mappings[i].queue_id, 2224013af9b6SIntel tx_queue_stats_mappings[i].stats_counter_id); 2225013af9b6SIntel if (diag != 0) 2226013af9b6SIntel return diag; 2227013af9b6SIntel mapping_found = 1; 2228af75078fSIntel } 2229013af9b6SIntel } 2230013af9b6SIntel if (mapping_found) 2231013af9b6SIntel port->tx_queue_stats_mapping_enabled = 1; 2232013af9b6SIntel return 0; 2233013af9b6SIntel } 2234013af9b6SIntel 2235013af9b6SIntel static int 223628caa76aSZhiyong Yang set_rx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port) 2237013af9b6SIntel { 2238013af9b6SIntel uint16_t i; 2239013af9b6SIntel int diag; 2240013af9b6SIntel uint8_t mapping_found = 0; 2241013af9b6SIntel 2242013af9b6SIntel for (i = 0; i < nb_rx_queue_stats_mappings; i++) { 2243013af9b6SIntel if ((rx_queue_stats_mappings[i].port_id == port_id) && 2244013af9b6SIntel (rx_queue_stats_mappings[i].queue_id < nb_rxq )) { 2245013af9b6SIntel diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id, 2246013af9b6SIntel rx_queue_stats_mappings[i].queue_id, 2247013af9b6SIntel rx_queue_stats_mappings[i].stats_counter_id); 2248013af9b6SIntel if (diag != 0) 2249013af9b6SIntel return diag; 2250013af9b6SIntel mapping_found = 1; 2251013af9b6SIntel } 2252013af9b6SIntel } 2253013af9b6SIntel if (mapping_found) 2254013af9b6SIntel port->rx_queue_stats_mapping_enabled = 1; 2255013af9b6SIntel return 0; 2256013af9b6SIntel } 2257013af9b6SIntel 2258013af9b6SIntel static void 225928caa76aSZhiyong Yang map_port_queue_stats_mapping_registers(portid_t pi, struct rte_port *port) 2260013af9b6SIntel { 2261013af9b6SIntel int diag = 0; 2262013af9b6SIntel 2263013af9b6SIntel diag = set_tx_queue_stats_mapping_registers(pi, port); 2264af75078fSIntel if (diag != 0) { 2265013af9b6SIntel if (diag == -ENOTSUP) { 2266013af9b6SIntel port->tx_queue_stats_mapping_enabled = 0; 2267013af9b6SIntel printf("TX queue stats mapping not supported port id=%d\n", pi); 2268013af9b6SIntel } 2269013af9b6SIntel else 2270013af9b6SIntel rte_exit(EXIT_FAILURE, 2271013af9b6SIntel "set_tx_queue_stats_mapping_registers " 2272013af9b6SIntel "failed for port id=%d diag=%d\n", 2273af75078fSIntel pi, diag); 2274af75078fSIntel } 2275013af9b6SIntel 2276013af9b6SIntel diag = set_rx_queue_stats_mapping_registers(pi, port); 2277af75078fSIntel if (diag != 0) { 2278013af9b6SIntel if (diag == -ENOTSUP) { 2279013af9b6SIntel port->rx_queue_stats_mapping_enabled = 0; 2280013af9b6SIntel printf("RX queue stats mapping not supported port id=%d\n", pi); 2281013af9b6SIntel } 2282013af9b6SIntel else 2283013af9b6SIntel rte_exit(EXIT_FAILURE, 2284013af9b6SIntel "set_rx_queue_stats_mapping_registers " 2285013af9b6SIntel "failed for port id=%d diag=%d\n", 2286af75078fSIntel pi, diag); 2287af75078fSIntel } 2288af75078fSIntel } 2289af75078fSIntel 2290f2c5125aSPablo de Lara static void 2291f2c5125aSPablo de Lara rxtx_port_config(struct rte_port *port) 2292f2c5125aSPablo de Lara { 2293d44f8a48SQi Zhang uint16_t qid; 2294f2c5125aSPablo de Lara 2295d44f8a48SQi Zhang for (qid = 0; qid < nb_rxq; qid++) { 2296d44f8a48SQi Zhang port->rx_conf[qid] = port->dev_info.default_rxconf; 2297d44f8a48SQi Zhang 2298d44f8a48SQi Zhang /* Check if any Rx parameters have been passed */ 2299f2c5125aSPablo de Lara if (rx_pthresh != RTE_PMD_PARAM_UNSET) 2300d44f8a48SQi Zhang port->rx_conf[qid].rx_thresh.pthresh = rx_pthresh; 2301f2c5125aSPablo de Lara 2302f2c5125aSPablo de Lara if (rx_hthresh != RTE_PMD_PARAM_UNSET) 2303d44f8a48SQi Zhang port->rx_conf[qid].rx_thresh.hthresh = rx_hthresh; 2304f2c5125aSPablo de Lara 2305f2c5125aSPablo de Lara if (rx_wthresh != RTE_PMD_PARAM_UNSET) 2306d44f8a48SQi Zhang port->rx_conf[qid].rx_thresh.wthresh = rx_wthresh; 2307f2c5125aSPablo de Lara 2308f2c5125aSPablo de Lara if (rx_free_thresh != RTE_PMD_PARAM_UNSET) 2309d44f8a48SQi Zhang port->rx_conf[qid].rx_free_thresh = rx_free_thresh; 2310f2c5125aSPablo de Lara 2311f2c5125aSPablo de Lara if (rx_drop_en != RTE_PMD_PARAM_UNSET) 2312d44f8a48SQi Zhang port->rx_conf[qid].rx_drop_en = rx_drop_en; 2313f2c5125aSPablo de Lara 2314d44f8a48SQi Zhang port->nb_rx_desc[qid] = nb_rxd; 2315d44f8a48SQi Zhang } 2316d44f8a48SQi Zhang 2317d44f8a48SQi Zhang for (qid = 0; qid < nb_txq; qid++) { 2318d44f8a48SQi Zhang port->tx_conf[qid] = port->dev_info.default_txconf; 2319d44f8a48SQi Zhang 2320d44f8a48SQi Zhang /* Check if any Tx parameters have been passed */ 2321f2c5125aSPablo de Lara if (tx_pthresh != RTE_PMD_PARAM_UNSET) 2322d44f8a48SQi Zhang port->tx_conf[qid].tx_thresh.pthresh = tx_pthresh; 2323f2c5125aSPablo de Lara 2324f2c5125aSPablo de Lara if (tx_hthresh != RTE_PMD_PARAM_UNSET) 2325d44f8a48SQi Zhang port->tx_conf[qid].tx_thresh.hthresh = tx_hthresh; 2326f2c5125aSPablo de Lara 2327f2c5125aSPablo de Lara if (tx_wthresh != RTE_PMD_PARAM_UNSET) 2328d44f8a48SQi Zhang port->tx_conf[qid].tx_thresh.wthresh = tx_wthresh; 2329f2c5125aSPablo de Lara 2330f2c5125aSPablo de Lara if (tx_rs_thresh != RTE_PMD_PARAM_UNSET) 2331d44f8a48SQi Zhang port->tx_conf[qid].tx_rs_thresh = tx_rs_thresh; 2332f2c5125aSPablo de Lara 2333f2c5125aSPablo de Lara if (tx_free_thresh != RTE_PMD_PARAM_UNSET) 2334d44f8a48SQi Zhang port->tx_conf[qid].tx_free_thresh = tx_free_thresh; 2335d44f8a48SQi Zhang 2336d44f8a48SQi Zhang port->nb_tx_desc[qid] = nb_txd; 2337d44f8a48SQi Zhang } 2338f2c5125aSPablo de Lara } 2339f2c5125aSPablo de Lara 2340013af9b6SIntel void 2341013af9b6SIntel init_port_config(void) 2342013af9b6SIntel { 2343013af9b6SIntel portid_t pid; 2344013af9b6SIntel struct rte_port *port; 234590892962SQi Zhang struct rte_eth_dev_info dev_info; 2346013af9b6SIntel 23477d89b261SGaetan Rivet RTE_ETH_FOREACH_DEV(pid) { 2348013af9b6SIntel port = &ports[pid]; 2349013af9b6SIntel port->dev_conf.fdir_conf = fdir_conf; 23503ce690d3SBruce Richardson if (nb_rxq > 1) { 235190892962SQi Zhang rte_eth_dev_info_get(pid, &dev_info); 2352013af9b6SIntel port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL; 235390892962SQi Zhang port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 235490892962SQi Zhang rss_hf & dev_info.flow_type_rss_offloads; 2355af75078fSIntel } else { 2356013af9b6SIntel port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL; 2357013af9b6SIntel port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0; 2358af75078fSIntel } 23593ce690d3SBruce Richardson 23605f592039SJingjing Wu if (port->dcb_flag == 0) { 23613ce690d3SBruce Richardson if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0) 23623ce690d3SBruce Richardson port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_RSS; 23633ce690d3SBruce Richardson else 23643ce690d3SBruce Richardson port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE; 23653ce690d3SBruce Richardson } 23663ce690d3SBruce Richardson 2367f2c5125aSPablo de Lara rxtx_port_config(port); 2368013af9b6SIntel 2369013af9b6SIntel rte_eth_macaddr_get(pid, &port->eth_addr); 2370013af9b6SIntel 2371013af9b6SIntel map_port_queue_stats_mapping_registers(pid, port); 237250c4440eSThomas Monjalon #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS 2373e261265eSRadu Nicolau rte_pmd_ixgbe_bypass_init(pid); 23747b7e5ba7SIntel #endif 23758ea656f8SGaetan Rivet 23768ea656f8SGaetan Rivet if (lsc_interrupt && 23778ea656f8SGaetan Rivet (rte_eth_devices[pid].data->dev_flags & 23788ea656f8SGaetan Rivet RTE_ETH_DEV_INTR_LSC)) 23798ea656f8SGaetan Rivet port->dev_conf.intr_conf.lsc = 1; 2380284c908cSGaetan Rivet if (rmv_interrupt && 2381284c908cSGaetan Rivet (rte_eth_devices[pid].data->dev_flags & 2382284c908cSGaetan Rivet RTE_ETH_DEV_INTR_RMV)) 2383284c908cSGaetan Rivet port->dev_conf.intr_conf.rmv = 1; 23845b590fbeSJasvinder Singh 23855b590fbeSJasvinder Singh #if defined RTE_LIBRTE_PMD_SOFTNIC && defined RTE_LIBRTE_SCHED 23865b590fbeSJasvinder Singh /* Detect softnic port */ 23875b590fbeSJasvinder Singh if (!strcmp(port->dev_info.driver_name, "net_softnic")) { 23885b590fbeSJasvinder Singh port->softnic_enable = 1; 23895b590fbeSJasvinder Singh memset(&port->softport, 0, sizeof(struct softnic_port)); 23905b590fbeSJasvinder Singh 23915b590fbeSJasvinder Singh if (!strcmp(cur_fwd_eng->fwd_mode_name, "tm")) 23925b590fbeSJasvinder Singh port->softport.tm_flag = 1; 23935b590fbeSJasvinder Singh } 23945b590fbeSJasvinder Singh #endif 2395013af9b6SIntel } 2396013af9b6SIntel } 2397013af9b6SIntel 239841b05095SBernard Iremonger void set_port_slave_flag(portid_t slave_pid) 239941b05095SBernard Iremonger { 240041b05095SBernard Iremonger struct rte_port *port; 240141b05095SBernard Iremonger 240241b05095SBernard Iremonger port = &ports[slave_pid]; 240341b05095SBernard Iremonger port->slave_flag = 1; 240441b05095SBernard Iremonger } 240541b05095SBernard Iremonger 240641b05095SBernard Iremonger void clear_port_slave_flag(portid_t slave_pid) 240741b05095SBernard Iremonger { 240841b05095SBernard Iremonger struct rte_port *port; 240941b05095SBernard Iremonger 241041b05095SBernard Iremonger port = &ports[slave_pid]; 241141b05095SBernard Iremonger port->slave_flag = 0; 241241b05095SBernard Iremonger } 241341b05095SBernard Iremonger 24140e545d30SBernard Iremonger uint8_t port_is_bonding_slave(portid_t slave_pid) 24150e545d30SBernard Iremonger { 24160e545d30SBernard Iremonger struct rte_port *port; 24170e545d30SBernard Iremonger 24180e545d30SBernard Iremonger port = &ports[slave_pid]; 2419b8b8b344SMatan Azrad if ((rte_eth_devices[slave_pid].data->dev_flags & 2420b8b8b344SMatan Azrad RTE_ETH_DEV_BONDED_SLAVE) || (port->slave_flag == 1)) 2421b8b8b344SMatan Azrad return 1; 2422b8b8b344SMatan Azrad return 0; 24230e545d30SBernard Iremonger } 24240e545d30SBernard Iremonger 2425013af9b6SIntel const uint16_t vlan_tags[] = { 2426013af9b6SIntel 0, 1, 2, 3, 4, 5, 6, 7, 2427013af9b6SIntel 8, 9, 10, 11, 12, 13, 14, 15, 2428013af9b6SIntel 16, 17, 18, 19, 20, 21, 22, 23, 2429013af9b6SIntel 24, 25, 26, 27, 28, 29, 30, 31 2430013af9b6SIntel }; 2431013af9b6SIntel 2432013af9b6SIntel static int 24331a572499SJingjing Wu get_eth_dcb_conf(struct rte_eth_conf *eth_conf, 24341a572499SJingjing Wu enum dcb_mode_enable dcb_mode, 24351a572499SJingjing Wu enum rte_eth_nb_tcs num_tcs, 24361a572499SJingjing Wu uint8_t pfc_en) 2437013af9b6SIntel { 2438013af9b6SIntel uint8_t i; 2439af75078fSIntel 2440af75078fSIntel /* 2441013af9b6SIntel * Builds up the correct configuration for dcb+vt based on the vlan tags array 2442013af9b6SIntel * given above, and the number of traffic classes available for use. 2443af75078fSIntel */ 24441a572499SJingjing Wu if (dcb_mode == DCB_VT_ENABLED) { 24451a572499SJingjing Wu struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf = 24461a572499SJingjing Wu ð_conf->rx_adv_conf.vmdq_dcb_conf; 24471a572499SJingjing Wu struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf = 24481a572499SJingjing Wu ð_conf->tx_adv_conf.vmdq_dcb_tx_conf; 2449013af9b6SIntel 2450547d946cSNirmoy Das /* VMDQ+DCB RX and TX configurations */ 24511a572499SJingjing Wu vmdq_rx_conf->enable_default_pool = 0; 24521a572499SJingjing Wu vmdq_rx_conf->default_pool = 0; 24531a572499SJingjing Wu vmdq_rx_conf->nb_queue_pools = 24541a572499SJingjing Wu (num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS); 24551a572499SJingjing Wu vmdq_tx_conf->nb_queue_pools = 24561a572499SJingjing Wu (num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS); 2457013af9b6SIntel 24581a572499SJingjing Wu vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools; 24591a572499SJingjing Wu for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) { 24601a572499SJingjing Wu vmdq_rx_conf->pool_map[i].vlan_id = vlan_tags[i]; 24611a572499SJingjing Wu vmdq_rx_conf->pool_map[i].pools = 24621a572499SJingjing Wu 1 << (i % vmdq_rx_conf->nb_queue_pools); 2463af75078fSIntel } 2464013af9b6SIntel for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) { 2465f59908feSWei Dai vmdq_rx_conf->dcb_tc[i] = i % num_tcs; 2466f59908feSWei Dai vmdq_tx_conf->dcb_tc[i] = i % num_tcs; 2467013af9b6SIntel } 2468013af9b6SIntel 2469013af9b6SIntel /* set DCB mode of RX and TX of multiple queues */ 247032e7aa0bSIntel eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB; 247132e7aa0bSIntel eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB; 24721a572499SJingjing Wu } else { 24731a572499SJingjing Wu struct rte_eth_dcb_rx_conf *rx_conf = 24741a572499SJingjing Wu ð_conf->rx_adv_conf.dcb_rx_conf; 24751a572499SJingjing Wu struct rte_eth_dcb_tx_conf *tx_conf = 24761a572499SJingjing Wu ð_conf->tx_adv_conf.dcb_tx_conf; 2477013af9b6SIntel 24781a572499SJingjing Wu rx_conf->nb_tcs = num_tcs; 24791a572499SJingjing Wu tx_conf->nb_tcs = num_tcs; 24801a572499SJingjing Wu 2481bcd0e432SJingjing Wu for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) { 2482bcd0e432SJingjing Wu rx_conf->dcb_tc[i] = i % num_tcs; 2483bcd0e432SJingjing Wu tx_conf->dcb_tc[i] = i % num_tcs; 2484013af9b6SIntel } 24851a572499SJingjing Wu eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB_RSS; 24861a572499SJingjing Wu eth_conf->rx_adv_conf.rss_conf.rss_hf = rss_hf; 248732e7aa0bSIntel eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB; 24881a572499SJingjing Wu } 24891a572499SJingjing Wu 24901a572499SJingjing Wu if (pfc_en) 24911a572499SJingjing Wu eth_conf->dcb_capability_en = 24921a572499SJingjing Wu ETH_DCB_PG_SUPPORT | ETH_DCB_PFC_SUPPORT; 2493013af9b6SIntel else 2494013af9b6SIntel eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT; 2495013af9b6SIntel 2496013af9b6SIntel return 0; 2497013af9b6SIntel } 2498013af9b6SIntel 2499013af9b6SIntel int 25001a572499SJingjing Wu init_port_dcb_config(portid_t pid, 25011a572499SJingjing Wu enum dcb_mode_enable dcb_mode, 25021a572499SJingjing Wu enum rte_eth_nb_tcs num_tcs, 25031a572499SJingjing Wu uint8_t pfc_en) 2504013af9b6SIntel { 2505013af9b6SIntel struct rte_eth_conf port_conf; 2506013af9b6SIntel struct rte_port *rte_port; 2507013af9b6SIntel int retval; 2508013af9b6SIntel uint16_t i; 2509013af9b6SIntel 25102a977b89SWenzhuo Lu rte_port = &ports[pid]; 2511013af9b6SIntel 2512013af9b6SIntel memset(&port_conf, 0, sizeof(struct rte_eth_conf)); 2513013af9b6SIntel /* Enter DCB configuration status */ 2514013af9b6SIntel dcb_config = 1; 2515013af9b6SIntel 2516d5354e89SYanglong Wu port_conf.rxmode = rte_port->dev_conf.rxmode; 2517d5354e89SYanglong Wu port_conf.txmode = rte_port->dev_conf.txmode; 2518d5354e89SYanglong Wu 2519013af9b6SIntel /*set configuration of DCB in vt mode and DCB in non-vt mode*/ 25201a572499SJingjing Wu retval = get_eth_dcb_conf(&port_conf, dcb_mode, num_tcs, pfc_en); 2521013af9b6SIntel if (retval < 0) 2522013af9b6SIntel return retval; 25230074d02fSShahaf Shuler port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER; 2524013af9b6SIntel 25252f203d44SQi Zhang /* re-configure the device . */ 25262f203d44SQi Zhang rte_eth_dev_configure(pid, nb_rxq, nb_rxq, &port_conf); 25272a977b89SWenzhuo Lu 25282a977b89SWenzhuo Lu rte_eth_dev_info_get(pid, &rte_port->dev_info); 25292a977b89SWenzhuo Lu 25302a977b89SWenzhuo Lu /* If dev_info.vmdq_pool_base is greater than 0, 25312a977b89SWenzhuo Lu * the queue id of vmdq pools is started after pf queues. 25322a977b89SWenzhuo Lu */ 25332a977b89SWenzhuo Lu if (dcb_mode == DCB_VT_ENABLED && 25342a977b89SWenzhuo Lu rte_port->dev_info.vmdq_pool_base > 0) { 25352a977b89SWenzhuo Lu printf("VMDQ_DCB multi-queue mode is nonsensical" 25362a977b89SWenzhuo Lu " for port %d.", pid); 25372a977b89SWenzhuo Lu return -1; 25382a977b89SWenzhuo Lu } 25392a977b89SWenzhuo Lu 25402a977b89SWenzhuo Lu /* Assume the ports in testpmd have the same dcb capability 25412a977b89SWenzhuo Lu * and has the same number of rxq and txq in dcb mode 25422a977b89SWenzhuo Lu */ 25432a977b89SWenzhuo Lu if (dcb_mode == DCB_VT_ENABLED) { 254486ef65eeSBernard Iremonger if (rte_port->dev_info.max_vfs > 0) { 254586ef65eeSBernard Iremonger nb_rxq = rte_port->dev_info.nb_rx_queues; 254686ef65eeSBernard Iremonger nb_txq = rte_port->dev_info.nb_tx_queues; 254786ef65eeSBernard Iremonger } else { 25482a977b89SWenzhuo Lu nb_rxq = rte_port->dev_info.max_rx_queues; 25492a977b89SWenzhuo Lu nb_txq = rte_port->dev_info.max_tx_queues; 255086ef65eeSBernard Iremonger } 25512a977b89SWenzhuo Lu } else { 25522a977b89SWenzhuo Lu /*if vt is disabled, use all pf queues */ 25532a977b89SWenzhuo Lu if (rte_port->dev_info.vmdq_pool_base == 0) { 25542a977b89SWenzhuo Lu nb_rxq = rte_port->dev_info.max_rx_queues; 25552a977b89SWenzhuo Lu nb_txq = rte_port->dev_info.max_tx_queues; 25562a977b89SWenzhuo Lu } else { 25572a977b89SWenzhuo Lu nb_rxq = (queueid_t)num_tcs; 25582a977b89SWenzhuo Lu nb_txq = (queueid_t)num_tcs; 25592a977b89SWenzhuo Lu 25602a977b89SWenzhuo Lu } 25612a977b89SWenzhuo Lu } 25622a977b89SWenzhuo Lu rx_free_thresh = 64; 25632a977b89SWenzhuo Lu 2564013af9b6SIntel memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf)); 2565013af9b6SIntel 2566f2c5125aSPablo de Lara rxtx_port_config(rte_port); 2567013af9b6SIntel /* VLAN filter */ 25680074d02fSShahaf Shuler rte_port->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER; 25691a572499SJingjing Wu for (i = 0; i < RTE_DIM(vlan_tags); i++) 2570013af9b6SIntel rx_vft_set(pid, vlan_tags[i], 1); 2571013af9b6SIntel 2572013af9b6SIntel rte_eth_macaddr_get(pid, &rte_port->eth_addr); 2573013af9b6SIntel map_port_queue_stats_mapping_registers(pid, rte_port); 2574013af9b6SIntel 25757741e4cfSIntel rte_port->dcb_flag = 1; 25767741e4cfSIntel 2577013af9b6SIntel return 0; 2578af75078fSIntel } 2579af75078fSIntel 2580ffc468ffSTetsuya Mukawa static void 2581ffc468ffSTetsuya Mukawa init_port(void) 2582ffc468ffSTetsuya Mukawa { 2583ffc468ffSTetsuya Mukawa /* Configuration of Ethernet ports. */ 2584ffc468ffSTetsuya Mukawa ports = rte_zmalloc("testpmd: ports", 2585ffc468ffSTetsuya Mukawa sizeof(struct rte_port) * RTE_MAX_ETHPORTS, 2586ffc468ffSTetsuya Mukawa RTE_CACHE_LINE_SIZE); 2587ffc468ffSTetsuya Mukawa if (ports == NULL) { 2588ffc468ffSTetsuya Mukawa rte_exit(EXIT_FAILURE, 2589ffc468ffSTetsuya Mukawa "rte_zmalloc(%d struct rte_port) failed\n", 2590ffc468ffSTetsuya Mukawa RTE_MAX_ETHPORTS); 2591ffc468ffSTetsuya Mukawa } 2592ffc468ffSTetsuya Mukawa } 2593ffc468ffSTetsuya Mukawa 2594d3a274ceSZhihong Wang static void 2595d3a274ceSZhihong Wang force_quit(void) 2596d3a274ceSZhihong Wang { 2597d3a274ceSZhihong Wang pmd_test_exit(); 2598d3a274ceSZhihong Wang prompt_exit(); 2599d3a274ceSZhihong Wang } 2600d3a274ceSZhihong Wang 2601d3a274ceSZhihong Wang static void 2602cfea1f30SPablo de Lara print_stats(void) 2603cfea1f30SPablo de Lara { 2604cfea1f30SPablo de Lara uint8_t i; 2605cfea1f30SPablo de Lara const char clr[] = { 27, '[', '2', 'J', '\0' }; 2606cfea1f30SPablo de Lara const char top_left[] = { 27, '[', '1', ';', '1', 'H', '\0' }; 2607cfea1f30SPablo de Lara 2608cfea1f30SPablo de Lara /* Clear screen and move to top left */ 2609cfea1f30SPablo de Lara printf("%s%s", clr, top_left); 2610cfea1f30SPablo de Lara 2611cfea1f30SPablo de Lara printf("\nPort statistics ===================================="); 2612cfea1f30SPablo de Lara for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) 2613cfea1f30SPablo de Lara nic_stats_display(fwd_ports_ids[i]); 2614cfea1f30SPablo de Lara } 2615cfea1f30SPablo de Lara 2616cfea1f30SPablo de Lara static void 2617d3a274ceSZhihong Wang signal_handler(int signum) 2618d3a274ceSZhihong Wang { 2619d3a274ceSZhihong Wang if (signum == SIGINT || signum == SIGTERM) { 2620d3a274ceSZhihong Wang printf("\nSignal %d received, preparing to exit...\n", 2621d3a274ceSZhihong Wang signum); 2622102b7329SReshma Pattan #ifdef RTE_LIBRTE_PDUMP 2623102b7329SReshma Pattan /* uninitialize packet capture framework */ 2624102b7329SReshma Pattan rte_pdump_uninit(); 2625102b7329SReshma Pattan #endif 262662d3216dSReshma Pattan #ifdef RTE_LIBRTE_LATENCY_STATS 262762d3216dSReshma Pattan rte_latencystats_uninit(); 262862d3216dSReshma Pattan #endif 2629d3a274ceSZhihong Wang force_quit(); 2630d9a191a0SPhil Yang /* Set flag to indicate the force termination. */ 2631d9a191a0SPhil Yang f_quit = 1; 2632d3a274ceSZhihong Wang /* exit with the expected status */ 2633d3a274ceSZhihong Wang signal(signum, SIG_DFL); 2634d3a274ceSZhihong Wang kill(getpid(), signum); 2635d3a274ceSZhihong Wang } 2636d3a274ceSZhihong Wang } 2637d3a274ceSZhihong Wang 2638af75078fSIntel int 2639af75078fSIntel main(int argc, char** argv) 2640af75078fSIntel { 2641af75078fSIntel int diag; 2642f8244c63SZhiyong Yang portid_t port_id; 2643fb73e096SJeff Guo int ret; 2644af75078fSIntel 2645d3a274ceSZhihong Wang signal(SIGINT, signal_handler); 2646d3a274ceSZhihong Wang signal(SIGTERM, signal_handler); 2647d3a274ceSZhihong Wang 2648af75078fSIntel diag = rte_eal_init(argc, argv); 2649af75078fSIntel if (diag < 0) 2650af75078fSIntel rte_panic("Cannot init EAL\n"); 2651af75078fSIntel 2652285fd101SOlivier Matz testpmd_logtype = rte_log_register("testpmd"); 2653285fd101SOlivier Matz if (testpmd_logtype < 0) 2654285fd101SOlivier Matz rte_panic("Cannot register log type"); 2655285fd101SOlivier Matz rte_log_set_level(testpmd_logtype, RTE_LOG_DEBUG); 2656285fd101SOlivier Matz 26574aa0d012SAnatoly Burakov #ifdef RTE_LIBRTE_PDUMP 26584aa0d012SAnatoly Burakov /* initialize packet capture framework */ 26594aa0d012SAnatoly Burakov rte_pdump_init(NULL); 26604aa0d012SAnatoly Burakov #endif 26614aa0d012SAnatoly Burakov 26624aa0d012SAnatoly Burakov nb_ports = (portid_t) rte_eth_dev_count_avail(); 26634aa0d012SAnatoly Burakov if (nb_ports == 0) 26644aa0d012SAnatoly Burakov TESTPMD_LOG(WARNING, "No probed ethernet devices\n"); 26654aa0d012SAnatoly Burakov 26664aa0d012SAnatoly Burakov /* allocate port structures, and init them */ 26674aa0d012SAnatoly Burakov init_port(); 26684aa0d012SAnatoly Burakov 26694aa0d012SAnatoly Burakov set_def_fwd_config(); 26704aa0d012SAnatoly Burakov if (nb_lcores == 0) 26714aa0d012SAnatoly Burakov rte_panic("Empty set of forwarding logical cores - check the " 26724aa0d012SAnatoly Burakov "core mask supplied in the command parameters\n"); 26734aa0d012SAnatoly Burakov 2674e505d84cSAnatoly Burakov /* Bitrate/latency stats disabled by default */ 2675e505d84cSAnatoly Burakov #ifdef RTE_LIBRTE_BITRATE 2676e505d84cSAnatoly Burakov bitrate_enabled = 0; 2677e505d84cSAnatoly Burakov #endif 2678e505d84cSAnatoly Burakov #ifdef RTE_LIBRTE_LATENCY_STATS 2679e505d84cSAnatoly Burakov latencystats_enabled = 0; 2680e505d84cSAnatoly Burakov #endif 2681e505d84cSAnatoly Burakov 2682fb7b8b32SAnatoly Burakov /* on FreeBSD, mlockall() is disabled by default */ 2683fb7b8b32SAnatoly Burakov #ifdef RTE_EXEC_ENV_BSDAPP 2684fb7b8b32SAnatoly Burakov do_mlockall = 0; 2685fb7b8b32SAnatoly Burakov #else 2686fb7b8b32SAnatoly Burakov do_mlockall = 1; 2687fb7b8b32SAnatoly Burakov #endif 2688fb7b8b32SAnatoly Burakov 2689e505d84cSAnatoly Burakov argc -= diag; 2690e505d84cSAnatoly Burakov argv += diag; 2691e505d84cSAnatoly Burakov if (argc > 1) 2692e505d84cSAnatoly Burakov launch_args_parse(argc, argv); 2693e505d84cSAnatoly Burakov 2694e505d84cSAnatoly Burakov if (do_mlockall && mlockall(MCL_CURRENT | MCL_FUTURE)) { 2695285fd101SOlivier Matz TESTPMD_LOG(NOTICE, "mlockall() failed with error \"%s\"\n", 26961c036b16SEelco Chaudron strerror(errno)); 26971c036b16SEelco Chaudron } 26981c036b16SEelco Chaudron 269999cabef0SPablo de Lara if (tx_first && interactive) 270099cabef0SPablo de Lara rte_exit(EXIT_FAILURE, "--tx-first cannot be used on " 270199cabef0SPablo de Lara "interactive mode.\n"); 27028820cba4SDavid Hunt 27038820cba4SDavid Hunt if (tx_first && lsc_interrupt) { 27048820cba4SDavid Hunt printf("Warning: lsc_interrupt needs to be off when " 27058820cba4SDavid Hunt " using tx_first. Disabling.\n"); 27068820cba4SDavid Hunt lsc_interrupt = 0; 27078820cba4SDavid Hunt } 27088820cba4SDavid Hunt 27095a8fb55cSReshma Pattan if (!nb_rxq && !nb_txq) 27105a8fb55cSReshma Pattan printf("Warning: Either rx or tx queues should be non-zero\n"); 27115a8fb55cSReshma Pattan 27125a8fb55cSReshma Pattan if (nb_rxq > 1 && nb_rxq > nb_txq) 2713af75078fSIntel printf("Warning: nb_rxq=%d enables RSS configuration, " 2714af75078fSIntel "but nb_txq=%d will prevent to fully test it.\n", 2715af75078fSIntel nb_rxq, nb_txq); 2716af75078fSIntel 2717af75078fSIntel init_config(); 2718fb73e096SJeff Guo 2719fb73e096SJeff Guo if (hot_plug) { 2720fb73e096SJeff Guo /* enable hot plug monitoring */ 2721fb73e096SJeff Guo ret = rte_dev_event_monitor_start(); 2722fb73e096SJeff Guo if (ret) { 2723fb73e096SJeff Guo rte_errno = EINVAL; 2724fb73e096SJeff Guo return -1; 2725fb73e096SJeff Guo } 2726fb73e096SJeff Guo eth_dev_event_callback_register(); 2727fb73e096SJeff Guo 2728fb73e096SJeff Guo } 2729fb73e096SJeff Guo 2730148f963fSBruce Richardson if (start_port(RTE_PORT_ALL) != 0) 2731148f963fSBruce Richardson rte_exit(EXIT_FAILURE, "Start ports failed\n"); 2732af75078fSIntel 2733ce8d5614SIntel /* set all ports to promiscuous mode by default */ 27347d89b261SGaetan Rivet RTE_ETH_FOREACH_DEV(port_id) 2735ce8d5614SIntel rte_eth_promiscuous_enable(port_id); 2736af75078fSIntel 27377e4441c8SRemy Horton /* Init metrics library */ 27387e4441c8SRemy Horton rte_metrics_init(rte_socket_id()); 27397e4441c8SRemy Horton 274062d3216dSReshma Pattan #ifdef RTE_LIBRTE_LATENCY_STATS 274162d3216dSReshma Pattan if (latencystats_enabled != 0) { 274262d3216dSReshma Pattan int ret = rte_latencystats_init(1, NULL); 274362d3216dSReshma Pattan if (ret) 274462d3216dSReshma Pattan printf("Warning: latencystats init()" 274562d3216dSReshma Pattan " returned error %d\n", ret); 274662d3216dSReshma Pattan printf("Latencystats running on lcore %d\n", 274762d3216dSReshma Pattan latencystats_lcore_id); 274862d3216dSReshma Pattan } 274962d3216dSReshma Pattan #endif 275062d3216dSReshma Pattan 27517e4441c8SRemy Horton /* Setup bitrate stats */ 27527e4441c8SRemy Horton #ifdef RTE_LIBRTE_BITRATE 2753e25e6c70SRemy Horton if (bitrate_enabled != 0) { 27547e4441c8SRemy Horton bitrate_data = rte_stats_bitrate_create(); 27557e4441c8SRemy Horton if (bitrate_data == NULL) 2756e25e6c70SRemy Horton rte_exit(EXIT_FAILURE, 2757e25e6c70SRemy Horton "Could not allocate bitrate data.\n"); 27587e4441c8SRemy Horton rte_stats_bitrate_reg(bitrate_data); 2759e25e6c70SRemy Horton } 27607e4441c8SRemy Horton #endif 27617e4441c8SRemy Horton 27620d56cb81SThomas Monjalon #ifdef RTE_LIBRTE_CMDLINE 276381ef862bSAllain Legacy if (strlen(cmdline_filename) != 0) 276481ef862bSAllain Legacy cmdline_read_from_file(cmdline_filename); 276581ef862bSAllain Legacy 2766ca7feb22SCyril Chemparathy if (interactive == 1) { 2767ca7feb22SCyril Chemparathy if (auto_start) { 2768ca7feb22SCyril Chemparathy printf("Start automatic packet forwarding\n"); 2769ca7feb22SCyril Chemparathy start_packet_forwarding(0); 2770ca7feb22SCyril Chemparathy } 2771af75078fSIntel prompt(); 27720de738cfSJiayu Hu pmd_test_exit(); 2773ca7feb22SCyril Chemparathy } else 27740d56cb81SThomas Monjalon #endif 27750d56cb81SThomas Monjalon { 2776af75078fSIntel char c; 2777af75078fSIntel int rc; 2778af75078fSIntel 2779d9a191a0SPhil Yang f_quit = 0; 2780d9a191a0SPhil Yang 2781af75078fSIntel printf("No commandline core given, start packet forwarding\n"); 278299cabef0SPablo de Lara start_packet_forwarding(tx_first); 2783cfea1f30SPablo de Lara if (stats_period != 0) { 2784cfea1f30SPablo de Lara uint64_t prev_time = 0, cur_time, diff_time = 0; 2785cfea1f30SPablo de Lara uint64_t timer_period; 2786cfea1f30SPablo de Lara 2787cfea1f30SPablo de Lara /* Convert to number of cycles */ 2788cfea1f30SPablo de Lara timer_period = stats_period * rte_get_timer_hz(); 2789cfea1f30SPablo de Lara 2790d9a191a0SPhil Yang while (f_quit == 0) { 2791cfea1f30SPablo de Lara cur_time = rte_get_timer_cycles(); 2792cfea1f30SPablo de Lara diff_time += cur_time - prev_time; 2793cfea1f30SPablo de Lara 2794cfea1f30SPablo de Lara if (diff_time >= timer_period) { 2795cfea1f30SPablo de Lara print_stats(); 2796cfea1f30SPablo de Lara /* Reset the timer */ 2797cfea1f30SPablo de Lara diff_time = 0; 2798cfea1f30SPablo de Lara } 2799cfea1f30SPablo de Lara /* Sleep to avoid unnecessary checks */ 2800cfea1f30SPablo de Lara prev_time = cur_time; 2801cfea1f30SPablo de Lara sleep(1); 2802cfea1f30SPablo de Lara } 2803cfea1f30SPablo de Lara } 2804cfea1f30SPablo de Lara 2805af75078fSIntel printf("Press enter to exit\n"); 2806af75078fSIntel rc = read(0, &c, 1); 2807d3a274ceSZhihong Wang pmd_test_exit(); 2808af75078fSIntel if (rc < 0) 2809af75078fSIntel return 1; 2810af75078fSIntel } 2811af75078fSIntel 2812af75078fSIntel return 0; 2813af75078fSIntel } 2814