1174a1631SBruce Richardson /* SPDX-License-Identifier: BSD-3-Clause 2174a1631SBruce Richardson * Copyright(c) 2010-2017 Intel Corporation 3af75078fSIntel */ 4af75078fSIntel 5af75078fSIntel #include <stdarg.h> 6af75078fSIntel #include <stdio.h> 7af75078fSIntel #include <stdlib.h> 8af75078fSIntel #include <signal.h> 9af75078fSIntel #include <string.h> 10af75078fSIntel #include <time.h> 11af75078fSIntel #include <fcntl.h> 121c036b16SEelco Chaudron #include <sys/mman.h> 13af75078fSIntel #include <sys/types.h> 14af75078fSIntel #include <errno.h> 15fb73e096SJeff Guo #include <stdbool.h> 16af75078fSIntel 17af75078fSIntel #include <sys/queue.h> 18af75078fSIntel #include <sys/stat.h> 19af75078fSIntel 20af75078fSIntel #include <stdint.h> 21af75078fSIntel #include <unistd.h> 22af75078fSIntel #include <inttypes.h> 23af75078fSIntel 24af75078fSIntel #include <rte_common.h> 25d1eb542eSOlivier Matz #include <rte_errno.h> 26af75078fSIntel #include <rte_byteorder.h> 27af75078fSIntel #include <rte_log.h> 28af75078fSIntel #include <rte_debug.h> 29af75078fSIntel #include <rte_cycles.h> 30af75078fSIntel #include <rte_memory.h> 31af75078fSIntel #include <rte_memcpy.h> 32af75078fSIntel #include <rte_launch.h> 33af75078fSIntel #include <rte_eal.h> 34284c908cSGaetan Rivet #include <rte_alarm.h> 35af75078fSIntel #include <rte_per_lcore.h> 36af75078fSIntel #include <rte_lcore.h> 37af75078fSIntel #include <rte_atomic.h> 38af75078fSIntel #include <rte_branch_prediction.h> 39af75078fSIntel #include <rte_mempool.h> 40af75078fSIntel #include <rte_malloc.h> 41af75078fSIntel #include <rte_mbuf.h> 420e798567SPavan Nikhilesh #include <rte_mbuf_pool_ops.h> 43af75078fSIntel #include <rte_interrupts.h> 44af75078fSIntel #include <rte_pci.h> 45af75078fSIntel #include <rte_ether.h> 46af75078fSIntel #include <rte_ethdev.h> 47edab33b1STetsuya Mukawa #include <rte_dev.h> 48af75078fSIntel #include <rte_string_fns.h> 49e261265eSRadu Nicolau #ifdef RTE_LIBRTE_IXGBE_PMD 50e261265eSRadu Nicolau #include <rte_pmd_ixgbe.h> 51e261265eSRadu Nicolau #endif 52102b7329SReshma Pattan #ifdef RTE_LIBRTE_PDUMP 53102b7329SReshma Pattan #include <rte_pdump.h> 54102b7329SReshma Pattan #endif 55938a184aSAdrien Mazarguil #include <rte_flow.h> 567e4441c8SRemy Horton #include <rte_metrics.h> 577e4441c8SRemy Horton #ifdef RTE_LIBRTE_BITRATE 587e4441c8SRemy Horton #include <rte_bitrate.h> 597e4441c8SRemy Horton #endif 6062d3216dSReshma Pattan #ifdef RTE_LIBRTE_LATENCY_STATS 6162d3216dSReshma Pattan #include <rte_latencystats.h> 6262d3216dSReshma Pattan #endif 63af75078fSIntel 64af75078fSIntel #include "testpmd.h" 65af75078fSIntel 66af75078fSIntel uint16_t verbose_level = 0; /**< Silent by default. */ 67285fd101SOlivier Matz int testpmd_logtype; /**< Log type for testpmd logs */ 68af75078fSIntel 69af75078fSIntel /* use master core for command line ? */ 70af75078fSIntel uint8_t interactive = 0; 71ca7feb22SCyril Chemparathy uint8_t auto_start = 0; 7299cabef0SPablo de Lara uint8_t tx_first; 7381ef862bSAllain Legacy char cmdline_filename[PATH_MAX] = {0}; 74af75078fSIntel 75af75078fSIntel /* 76af75078fSIntel * NUMA support configuration. 77af75078fSIntel * When set, the NUMA support attempts to dispatch the allocation of the 78af75078fSIntel * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the 79af75078fSIntel * probed ports among the CPU sockets 0 and 1. 80af75078fSIntel * Otherwise, all memory is allocated from CPU socket 0. 81af75078fSIntel */ 82999b2ee0SBruce Richardson uint8_t numa_support = 1; /**< numa enabled by default */ 83af75078fSIntel 84af75078fSIntel /* 85b6ea6408SIntel * In UMA mode,all memory is allocated from socket 0 if --socket-num is 86b6ea6408SIntel * not configured. 87b6ea6408SIntel */ 88b6ea6408SIntel uint8_t socket_num = UMA_NO_CONFIG; 89b6ea6408SIntel 90b6ea6408SIntel /* 91148f963fSBruce Richardson * Use ANONYMOUS mapped memory (might be not physically continuous) for mbufs. 92148f963fSBruce Richardson */ 93148f963fSBruce Richardson uint8_t mp_anon = 0; 94148f963fSBruce Richardson 95148f963fSBruce Richardson /* 9663531389SGeorgios Katsikas * Store specified sockets on which memory pool to be used by ports 9763531389SGeorgios Katsikas * is allocated. 9863531389SGeorgios Katsikas */ 9963531389SGeorgios Katsikas uint8_t port_numa[RTE_MAX_ETHPORTS]; 10063531389SGeorgios Katsikas 10163531389SGeorgios Katsikas /* 10263531389SGeorgios Katsikas * Store specified sockets on which RX ring to be used by ports 10363531389SGeorgios Katsikas * is allocated. 10463531389SGeorgios Katsikas */ 10563531389SGeorgios Katsikas uint8_t rxring_numa[RTE_MAX_ETHPORTS]; 10663531389SGeorgios Katsikas 10763531389SGeorgios Katsikas /* 10863531389SGeorgios Katsikas * Store specified sockets on which TX ring to be used by ports 10963531389SGeorgios Katsikas * is allocated. 11063531389SGeorgios Katsikas */ 11163531389SGeorgios Katsikas uint8_t txring_numa[RTE_MAX_ETHPORTS]; 11263531389SGeorgios Katsikas 11363531389SGeorgios Katsikas /* 114af75078fSIntel * Record the Ethernet address of peer target ports to which packets are 115af75078fSIntel * forwarded. 116547d946cSNirmoy Das * Must be instantiated with the ethernet addresses of peer traffic generator 117af75078fSIntel * ports. 118af75078fSIntel */ 119af75078fSIntel struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS]; 120af75078fSIntel portid_t nb_peer_eth_addrs = 0; 121af75078fSIntel 122af75078fSIntel /* 123af75078fSIntel * Probed Target Environment. 124af75078fSIntel */ 125af75078fSIntel struct rte_port *ports; /**< For all probed ethernet ports. */ 126af75078fSIntel portid_t nb_ports; /**< Number of probed ethernet ports. */ 127af75078fSIntel struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */ 128af75078fSIntel lcoreid_t nb_lcores; /**< Number of probed logical cores. */ 129af75078fSIntel 130af75078fSIntel /* 131af75078fSIntel * Test Forwarding Configuration. 132af75078fSIntel * nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores 133af75078fSIntel * nb_fwd_ports <= nb_cfg_ports <= nb_ports 134af75078fSIntel */ 135af75078fSIntel lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */ 136af75078fSIntel lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */ 137af75078fSIntel portid_t nb_cfg_ports; /**< Number of configured ports. */ 138af75078fSIntel portid_t nb_fwd_ports; /**< Number of forwarding ports. */ 139af75078fSIntel 140af75078fSIntel unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */ 141af75078fSIntel portid_t fwd_ports_ids[RTE_MAX_ETHPORTS]; /**< Port ids configuration. */ 142af75078fSIntel 143af75078fSIntel struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */ 144af75078fSIntel streamid_t nb_fwd_streams; /**< Is equal to (nb_ports * nb_rxq). */ 145af75078fSIntel 146af75078fSIntel /* 147af75078fSIntel * Forwarding engines. 148af75078fSIntel */ 149af75078fSIntel struct fwd_engine * fwd_engines[] = { 150af75078fSIntel &io_fwd_engine, 151af75078fSIntel &mac_fwd_engine, 152d47388f1SCyril Chemparathy &mac_swap_engine, 153e9e23a61SCyril Chemparathy &flow_gen_engine, 154af75078fSIntel &rx_only_engine, 155af75078fSIntel &tx_only_engine, 156af75078fSIntel &csum_fwd_engine, 157168dfa61SIvan Boule &icmp_echo_engine, 1585b590fbeSJasvinder Singh #if defined RTE_LIBRTE_PMD_SOFTNIC && defined RTE_LIBRTE_SCHED 1595b590fbeSJasvinder Singh &softnic_tm_engine, 1605b590fbeSJasvinder Singh &softnic_tm_bypass_engine, 1615b590fbeSJasvinder Singh #endif 162af75078fSIntel #ifdef RTE_LIBRTE_IEEE1588 163af75078fSIntel &ieee1588_fwd_engine, 164af75078fSIntel #endif 165af75078fSIntel NULL, 166af75078fSIntel }; 167af75078fSIntel 168af75078fSIntel struct fwd_config cur_fwd_config; 169af75078fSIntel struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */ 170bf56fce1SZhihong Wang uint32_t retry_enabled; 171bf56fce1SZhihong Wang uint32_t burst_tx_delay_time = BURST_TX_WAIT_US; 172bf56fce1SZhihong Wang uint32_t burst_tx_retry_num = BURST_TX_RETRIES; 173af75078fSIntel 174af75078fSIntel uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */ 175c8798818SIntel uint32_t param_total_num_mbufs = 0; /**< number of mbufs in all pools - if 176c8798818SIntel * specified on command-line. */ 177cfea1f30SPablo de Lara uint16_t stats_period; /**< Period to show statistics (disabled by default) */ 178d9a191a0SPhil Yang 179d9a191a0SPhil Yang /* 180d9a191a0SPhil Yang * In container, it cannot terminate the process which running with 'stats-period' 181d9a191a0SPhil Yang * option. Set flag to exit stats period loop after received SIGINT/SIGTERM. 182d9a191a0SPhil Yang */ 183d9a191a0SPhil Yang uint8_t f_quit; 184d9a191a0SPhil Yang 185af75078fSIntel /* 186af75078fSIntel * Configuration of packet segments used by the "txonly" processing engine. 187af75078fSIntel */ 188af75078fSIntel uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */ 189af75078fSIntel uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = { 190af75078fSIntel TXONLY_DEF_PACKET_LEN, 191af75078fSIntel }; 192af75078fSIntel uint8_t tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */ 193af75078fSIntel 19479bec05bSKonstantin Ananyev enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF; 19579bec05bSKonstantin Ananyev /**< Split policy for packets to TX. */ 19679bec05bSKonstantin Ananyev 197af75078fSIntel uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */ 198e9378bbcSCunming Liang uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */ 199af75078fSIntel 200900550deSIntel /* current configuration is in DCB or not,0 means it is not in DCB mode */ 201900550deSIntel uint8_t dcb_config = 0; 202900550deSIntel 203900550deSIntel /* Whether the dcb is in testing status */ 204900550deSIntel uint8_t dcb_test = 0; 205900550deSIntel 206af75078fSIntel /* 207af75078fSIntel * Configurable number of RX/TX queues. 208af75078fSIntel */ 209af75078fSIntel queueid_t nb_rxq = 1; /**< Number of RX queues per port. */ 210af75078fSIntel queueid_t nb_txq = 1; /**< Number of TX queues per port. */ 211af75078fSIntel 212af75078fSIntel /* 213af75078fSIntel * Configurable number of RX/TX ring descriptors. 2148599ed31SRemy Horton * Defaults are supplied by drivers via ethdev. 215af75078fSIntel */ 2168599ed31SRemy Horton #define RTE_TEST_RX_DESC_DEFAULT 0 2178599ed31SRemy Horton #define RTE_TEST_TX_DESC_DEFAULT 0 218af75078fSIntel uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */ 219af75078fSIntel uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */ 220af75078fSIntel 221f2c5125aSPablo de Lara #define RTE_PMD_PARAM_UNSET -1 222af75078fSIntel /* 223af75078fSIntel * Configurable values of RX and TX ring threshold registers. 224af75078fSIntel */ 225af75078fSIntel 226f2c5125aSPablo de Lara int8_t rx_pthresh = RTE_PMD_PARAM_UNSET; 227f2c5125aSPablo de Lara int8_t rx_hthresh = RTE_PMD_PARAM_UNSET; 228f2c5125aSPablo de Lara int8_t rx_wthresh = RTE_PMD_PARAM_UNSET; 229af75078fSIntel 230f2c5125aSPablo de Lara int8_t tx_pthresh = RTE_PMD_PARAM_UNSET; 231f2c5125aSPablo de Lara int8_t tx_hthresh = RTE_PMD_PARAM_UNSET; 232f2c5125aSPablo de Lara int8_t tx_wthresh = RTE_PMD_PARAM_UNSET; 233af75078fSIntel 234af75078fSIntel /* 235af75078fSIntel * Configurable value of RX free threshold. 236af75078fSIntel */ 237f2c5125aSPablo de Lara int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET; 238af75078fSIntel 239af75078fSIntel /* 240ce8d5614SIntel * Configurable value of RX drop enable. 241ce8d5614SIntel */ 242f2c5125aSPablo de Lara int8_t rx_drop_en = RTE_PMD_PARAM_UNSET; 243ce8d5614SIntel 244ce8d5614SIntel /* 245af75078fSIntel * Configurable value of TX free threshold. 246af75078fSIntel */ 247f2c5125aSPablo de Lara int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET; 248af75078fSIntel 249af75078fSIntel /* 250af75078fSIntel * Configurable value of TX RS bit threshold. 251af75078fSIntel */ 252f2c5125aSPablo de Lara int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET; 253af75078fSIntel 254af75078fSIntel /* 255af75078fSIntel * Receive Side Scaling (RSS) configuration. 256af75078fSIntel */ 2578a387fa8SHelin Zhang uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */ 258af75078fSIntel 259af75078fSIntel /* 260af75078fSIntel * Port topology configuration 261af75078fSIntel */ 262af75078fSIntel uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */ 263af75078fSIntel 2647741e4cfSIntel /* 2657741e4cfSIntel * Avoids to flush all the RX streams before starts forwarding. 2667741e4cfSIntel */ 2677741e4cfSIntel uint8_t no_flush_rx = 0; /* flush by default */ 2687741e4cfSIntel 269af75078fSIntel /* 2707ee3e944SVasily Philipov * Flow API isolated mode. 2717ee3e944SVasily Philipov */ 2727ee3e944SVasily Philipov uint8_t flow_isolate_all; 2737ee3e944SVasily Philipov 2747ee3e944SVasily Philipov /* 275bc202406SDavid Marchand * Avoids to check link status when starting/stopping a port. 276bc202406SDavid Marchand */ 277bc202406SDavid Marchand uint8_t no_link_check = 0; /* check by default */ 278bc202406SDavid Marchand 279bc202406SDavid Marchand /* 2808ea656f8SGaetan Rivet * Enable link status change notification 2818ea656f8SGaetan Rivet */ 2828ea656f8SGaetan Rivet uint8_t lsc_interrupt = 1; /* enabled by default */ 2838ea656f8SGaetan Rivet 2848ea656f8SGaetan Rivet /* 285284c908cSGaetan Rivet * Enable device removal notification. 286284c908cSGaetan Rivet */ 287284c908cSGaetan Rivet uint8_t rmv_interrupt = 1; /* enabled by default */ 288284c908cSGaetan Rivet 289fb73e096SJeff Guo uint8_t hot_plug = 0; /**< hotplug disabled by default. */ 290fb73e096SJeff Guo 291284c908cSGaetan Rivet /* 2923af72783SGaetan Rivet * Display or mask ether events 2933af72783SGaetan Rivet * Default to all events except VF_MBOX 2943af72783SGaetan Rivet */ 2953af72783SGaetan Rivet uint32_t event_print_mask = (UINT32_C(1) << RTE_ETH_EVENT_UNKNOWN) | 2963af72783SGaetan Rivet (UINT32_C(1) << RTE_ETH_EVENT_INTR_LSC) | 2973af72783SGaetan Rivet (UINT32_C(1) << RTE_ETH_EVENT_QUEUE_STATE) | 2983af72783SGaetan Rivet (UINT32_C(1) << RTE_ETH_EVENT_INTR_RESET) | 2993af72783SGaetan Rivet (UINT32_C(1) << RTE_ETH_EVENT_MACSEC) | 3003af72783SGaetan Rivet (UINT32_C(1) << RTE_ETH_EVENT_INTR_RMV); 3013af72783SGaetan Rivet 3023af72783SGaetan Rivet /* 3037b7e5ba7SIntel * NIC bypass mode configuration options. 3047b7e5ba7SIntel */ 3057b7e5ba7SIntel 30650c4440eSThomas Monjalon #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS 3077b7e5ba7SIntel /* The NIC bypass watchdog timeout. */ 308e261265eSRadu Nicolau uint32_t bypass_timeout = RTE_PMD_IXGBE_BYPASS_TMT_OFF; 3097b7e5ba7SIntel #endif 3107b7e5ba7SIntel 311e261265eSRadu Nicolau 31262d3216dSReshma Pattan #ifdef RTE_LIBRTE_LATENCY_STATS 31362d3216dSReshma Pattan 31462d3216dSReshma Pattan /* 31562d3216dSReshma Pattan * Set when latency stats is enabled in the commandline 31662d3216dSReshma Pattan */ 31762d3216dSReshma Pattan uint8_t latencystats_enabled; 31862d3216dSReshma Pattan 31962d3216dSReshma Pattan /* 32062d3216dSReshma Pattan * Lcore ID to serive latency statistics. 32162d3216dSReshma Pattan */ 32262d3216dSReshma Pattan lcoreid_t latencystats_lcore_id = -1; 32362d3216dSReshma Pattan 32462d3216dSReshma Pattan #endif 32562d3216dSReshma Pattan 3267b7e5ba7SIntel /* 327af75078fSIntel * Ethernet device configuration. 328af75078fSIntel */ 329af75078fSIntel struct rte_eth_rxmode rx_mode = { 330af75078fSIntel .max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */ 3318b9bd0efSMoti Haimovsky .offloads = DEV_RX_OFFLOAD_CRC_STRIP, 3320074d02fSShahaf Shuler .ignore_offload_bitfield = 1, 333af75078fSIntel }; 334af75078fSIntel 33507e5f7bdSShahaf Shuler struct rte_eth_txmode tx_mode = { 33607e5f7bdSShahaf Shuler .offloads = DEV_TX_OFFLOAD_MBUF_FAST_FREE, 33707e5f7bdSShahaf Shuler }; 338fd8c20aaSShahaf Shuler 339af75078fSIntel struct rte_fdir_conf fdir_conf = { 340af75078fSIntel .mode = RTE_FDIR_MODE_NONE, 341af75078fSIntel .pballoc = RTE_FDIR_PBALLOC_64K, 342af75078fSIntel .status = RTE_FDIR_REPORT_STATUS, 343d9d5e6f2SJingjing Wu .mask = { 344d9d5e6f2SJingjing Wu .vlan_tci_mask = 0x0, 345d9d5e6f2SJingjing Wu .ipv4_mask = { 346d9d5e6f2SJingjing Wu .src_ip = 0xFFFFFFFF, 347d9d5e6f2SJingjing Wu .dst_ip = 0xFFFFFFFF, 348d9d5e6f2SJingjing Wu }, 349d9d5e6f2SJingjing Wu .ipv6_mask = { 350d9d5e6f2SJingjing Wu .src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF}, 351d9d5e6f2SJingjing Wu .dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF}, 352d9d5e6f2SJingjing Wu }, 353d9d5e6f2SJingjing Wu .src_port_mask = 0xFFFF, 354d9d5e6f2SJingjing Wu .dst_port_mask = 0xFFFF, 35547b3ac6bSWenzhuo Lu .mac_addr_byte_mask = 0xFF, 35647b3ac6bSWenzhuo Lu .tunnel_type_mask = 1, 35747b3ac6bSWenzhuo Lu .tunnel_id_mask = 0xFFFFFFFF, 358d9d5e6f2SJingjing Wu }, 359af75078fSIntel .drop_queue = 127, 360af75078fSIntel }; 361af75078fSIntel 3622950a769SDeclan Doherty volatile int test_done = 1; /* stop packet forwarding when set to 1. */ 363af75078fSIntel 364ed30d9b6SIntel struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS]; 365ed30d9b6SIntel struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS]; 366ed30d9b6SIntel 367ed30d9b6SIntel struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array; 368ed30d9b6SIntel struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array; 369ed30d9b6SIntel 370ed30d9b6SIntel uint16_t nb_tx_queue_stats_mappings = 0; 371ed30d9b6SIntel uint16_t nb_rx_queue_stats_mappings = 0; 372ed30d9b6SIntel 373a4fd5eeeSElza Mathew /* 374a4fd5eeeSElza Mathew * Display zero values by default for xstats 375a4fd5eeeSElza Mathew */ 376a4fd5eeeSElza Mathew uint8_t xstats_hide_zero; 377a4fd5eeeSElza Mathew 378c9cafcc8SShahaf Shuler unsigned int num_sockets = 0; 379c9cafcc8SShahaf Shuler unsigned int socket_ids[RTE_MAX_NUMA_NODES]; 3807acf894dSStephen Hurd 381e25e6c70SRemy Horton #ifdef RTE_LIBRTE_BITRATE 3827e4441c8SRemy Horton /* Bitrate statistics */ 3837e4441c8SRemy Horton struct rte_stats_bitrates *bitrate_data; 384e25e6c70SRemy Horton lcoreid_t bitrate_lcore_id; 385e25e6c70SRemy Horton uint8_t bitrate_enabled; 386e25e6c70SRemy Horton #endif 3877e4441c8SRemy Horton 388b40f8d78SJiayu Hu struct gro_status gro_ports[RTE_MAX_ETHPORTS]; 389b7091f1dSJiayu Hu uint8_t gro_flush_cycles = GRO_DEFAULT_FLUSH_CYCLES; 390b40f8d78SJiayu Hu 391ed30d9b6SIntel /* Forward function declarations */ 39228caa76aSZhiyong Yang static void map_port_queue_stats_mapping_registers(portid_t pi, 39328caa76aSZhiyong Yang struct rte_port *port); 394edab33b1STetsuya Mukawa static void check_all_ports_link_status(uint32_t port_mask); 395f8244c63SZhiyong Yang static int eth_event_callback(portid_t port_id, 39676ad4a2dSGaetan Rivet enum rte_eth_event_type type, 397d6af1a13SBernard Iremonger void *param, void *ret_param); 398fb73e096SJeff Guo static void eth_dev_event_callback(char *device_name, 399fb73e096SJeff Guo enum rte_dev_event_type type, 400fb73e096SJeff Guo void *param); 401fb73e096SJeff Guo static int eth_dev_event_callback_register(void); 402fb73e096SJeff Guo static int eth_dev_event_callback_unregister(void); 403fb73e096SJeff Guo 404ce8d5614SIntel 405ce8d5614SIntel /* 406ce8d5614SIntel * Check if all the ports are started. 407ce8d5614SIntel * If yes, return positive value. If not, return zero. 408ce8d5614SIntel */ 409ce8d5614SIntel static int all_ports_started(void); 410ed30d9b6SIntel 41152f38a20SJiayu Hu struct gso_status gso_ports[RTE_MAX_ETHPORTS]; 41252f38a20SJiayu Hu uint16_t gso_max_segment_size = ETHER_MAX_LEN - ETHER_CRC_LEN; 41352f38a20SJiayu Hu 414af75078fSIntel /* 41598a7ea33SJerin Jacob * Helper function to check if socket is already discovered. 416c9cafcc8SShahaf Shuler * If yes, return positive value. If not, return zero. 417c9cafcc8SShahaf Shuler */ 418c9cafcc8SShahaf Shuler int 419c9cafcc8SShahaf Shuler new_socket_id(unsigned int socket_id) 420c9cafcc8SShahaf Shuler { 421c9cafcc8SShahaf Shuler unsigned int i; 422c9cafcc8SShahaf Shuler 423c9cafcc8SShahaf Shuler for (i = 0; i < num_sockets; i++) { 424c9cafcc8SShahaf Shuler if (socket_ids[i] == socket_id) 425c9cafcc8SShahaf Shuler return 0; 426c9cafcc8SShahaf Shuler } 427c9cafcc8SShahaf Shuler return 1; 428c9cafcc8SShahaf Shuler } 429c9cafcc8SShahaf Shuler 430c9cafcc8SShahaf Shuler /* 431af75078fSIntel * Setup default configuration. 432af75078fSIntel */ 433af75078fSIntel static void 434af75078fSIntel set_default_fwd_lcores_config(void) 435af75078fSIntel { 436af75078fSIntel unsigned int i; 437af75078fSIntel unsigned int nb_lc; 4387acf894dSStephen Hurd unsigned int sock_num; 439af75078fSIntel 440af75078fSIntel nb_lc = 0; 441af75078fSIntel for (i = 0; i < RTE_MAX_LCORE; i++) { 442c9cafcc8SShahaf Shuler sock_num = rte_lcore_to_socket_id(i); 443c9cafcc8SShahaf Shuler if (new_socket_id(sock_num)) { 444c9cafcc8SShahaf Shuler if (num_sockets >= RTE_MAX_NUMA_NODES) { 445c9cafcc8SShahaf Shuler rte_exit(EXIT_FAILURE, 446c9cafcc8SShahaf Shuler "Total sockets greater than %u\n", 447c9cafcc8SShahaf Shuler RTE_MAX_NUMA_NODES); 448c9cafcc8SShahaf Shuler } 449c9cafcc8SShahaf Shuler socket_ids[num_sockets++] = sock_num; 4507acf894dSStephen Hurd } 451f54fe5eeSStephen Hurd if (!rte_lcore_is_enabled(i)) 452f54fe5eeSStephen Hurd continue; 453f54fe5eeSStephen Hurd if (i == rte_get_master_lcore()) 454f54fe5eeSStephen Hurd continue; 455f54fe5eeSStephen Hurd fwd_lcores_cpuids[nb_lc++] = i; 456af75078fSIntel } 457af75078fSIntel nb_lcores = (lcoreid_t) nb_lc; 458af75078fSIntel nb_cfg_lcores = nb_lcores; 459af75078fSIntel nb_fwd_lcores = 1; 460af75078fSIntel } 461af75078fSIntel 462af75078fSIntel static void 463af75078fSIntel set_def_peer_eth_addrs(void) 464af75078fSIntel { 465af75078fSIntel portid_t i; 466af75078fSIntel 467af75078fSIntel for (i = 0; i < RTE_MAX_ETHPORTS; i++) { 468af75078fSIntel peer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR; 469af75078fSIntel peer_eth_addrs[i].addr_bytes[5] = i; 470af75078fSIntel } 471af75078fSIntel } 472af75078fSIntel 473af75078fSIntel static void 474af75078fSIntel set_default_fwd_ports_config(void) 475af75078fSIntel { 476af75078fSIntel portid_t pt_id; 47765a7360cSMatan Azrad int i = 0; 478af75078fSIntel 47965a7360cSMatan Azrad RTE_ETH_FOREACH_DEV(pt_id) 48065a7360cSMatan Azrad fwd_ports_ids[i++] = pt_id; 481af75078fSIntel 482af75078fSIntel nb_cfg_ports = nb_ports; 483af75078fSIntel nb_fwd_ports = nb_ports; 484af75078fSIntel } 485af75078fSIntel 486af75078fSIntel void 487af75078fSIntel set_def_fwd_config(void) 488af75078fSIntel { 489af75078fSIntel set_default_fwd_lcores_config(); 490af75078fSIntel set_def_peer_eth_addrs(); 491af75078fSIntel set_default_fwd_ports_config(); 492af75078fSIntel } 493af75078fSIntel 494af75078fSIntel /* 495af75078fSIntel * Configuration initialisation done once at init time. 496af75078fSIntel */ 497af75078fSIntel static void 498af75078fSIntel mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf, 499af75078fSIntel unsigned int socket_id) 500af75078fSIntel { 501af75078fSIntel char pool_name[RTE_MEMPOOL_NAMESIZE]; 502bece7b6cSChristian Ehrhardt struct rte_mempool *rte_mp = NULL; 503af75078fSIntel uint32_t mb_size; 504af75078fSIntel 505dfb03bbeSOlivier Matz mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size; 506af75078fSIntel mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name)); 507148f963fSBruce Richardson 508285fd101SOlivier Matz TESTPMD_LOG(INFO, 509d1eb542eSOlivier Matz "create a new mbuf pool <%s>: n=%u, size=%u, socket=%u\n", 510d1eb542eSOlivier Matz pool_name, nb_mbuf, mbuf_seg_size, socket_id); 511d1eb542eSOlivier Matz 512b19a0c75SOlivier Matz if (mp_anon != 0) { 513b19a0c75SOlivier Matz rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf, 514bece7b6cSChristian Ehrhardt mb_size, (unsigned) mb_mempool_cache, 515148f963fSBruce Richardson sizeof(struct rte_pktmbuf_pool_private), 516148f963fSBruce Richardson socket_id, 0); 51724427bb9SOlivier Matz if (rte_mp == NULL) 51824427bb9SOlivier Matz goto err; 519b19a0c75SOlivier Matz 520b19a0c75SOlivier Matz if (rte_mempool_populate_anon(rte_mp) == 0) { 521b19a0c75SOlivier Matz rte_mempool_free(rte_mp); 522b19a0c75SOlivier Matz rte_mp = NULL; 52324427bb9SOlivier Matz goto err; 524b19a0c75SOlivier Matz } 525b19a0c75SOlivier Matz rte_pktmbuf_pool_init(rte_mp, NULL); 526b19a0c75SOlivier Matz rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL); 527b19a0c75SOlivier Matz } else { 528ea0c20eaSOlivier Matz /* wrapper to rte_mempool_create() */ 5290e798567SPavan Nikhilesh TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n", 5300e798567SPavan Nikhilesh rte_mbuf_best_mempool_ops()); 531ea0c20eaSOlivier Matz rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf, 532ea0c20eaSOlivier Matz mb_mempool_cache, 0, mbuf_seg_size, socket_id); 533bece7b6cSChristian Ehrhardt } 534148f963fSBruce Richardson 53524427bb9SOlivier Matz err: 536af75078fSIntel if (rte_mp == NULL) { 537d1eb542eSOlivier Matz rte_exit(EXIT_FAILURE, 538d1eb542eSOlivier Matz "Creation of mbuf pool for socket %u failed: %s\n", 539d1eb542eSOlivier Matz socket_id, rte_strerror(rte_errno)); 540148f963fSBruce Richardson } else if (verbose_level > 0) { 541591a9d79SStephen Hemminger rte_mempool_dump(stdout, rte_mp); 542af75078fSIntel } 543af75078fSIntel } 544af75078fSIntel 54520a0286fSLiu Xiaofeng /* 54620a0286fSLiu Xiaofeng * Check given socket id is valid or not with NUMA mode, 54720a0286fSLiu Xiaofeng * if valid, return 0, else return -1 54820a0286fSLiu Xiaofeng */ 54920a0286fSLiu Xiaofeng static int 55020a0286fSLiu Xiaofeng check_socket_id(const unsigned int socket_id) 55120a0286fSLiu Xiaofeng { 55220a0286fSLiu Xiaofeng static int warning_once = 0; 55320a0286fSLiu Xiaofeng 554c9cafcc8SShahaf Shuler if (new_socket_id(socket_id)) { 55520a0286fSLiu Xiaofeng if (!warning_once && numa_support) 55620a0286fSLiu Xiaofeng printf("Warning: NUMA should be configured manually by" 55720a0286fSLiu Xiaofeng " using --port-numa-config and" 55820a0286fSLiu Xiaofeng " --ring-numa-config parameters along with" 55920a0286fSLiu Xiaofeng " --numa.\n"); 56020a0286fSLiu Xiaofeng warning_once = 1; 56120a0286fSLiu Xiaofeng return -1; 56220a0286fSLiu Xiaofeng } 56320a0286fSLiu Xiaofeng return 0; 56420a0286fSLiu Xiaofeng } 56520a0286fSLiu Xiaofeng 5663f7311baSWei Dai /* 5673f7311baSWei Dai * Get the allowed maximum number of RX queues. 5683f7311baSWei Dai * *pid return the port id which has minimal value of 5693f7311baSWei Dai * max_rx_queues in all ports. 5703f7311baSWei Dai */ 5713f7311baSWei Dai queueid_t 5723f7311baSWei Dai get_allowed_max_nb_rxq(portid_t *pid) 5733f7311baSWei Dai { 5743f7311baSWei Dai queueid_t allowed_max_rxq = MAX_QUEUE_ID; 5753f7311baSWei Dai portid_t pi; 5763f7311baSWei Dai struct rte_eth_dev_info dev_info; 5773f7311baSWei Dai 5783f7311baSWei Dai RTE_ETH_FOREACH_DEV(pi) { 5793f7311baSWei Dai rte_eth_dev_info_get(pi, &dev_info); 5803f7311baSWei Dai if (dev_info.max_rx_queues < allowed_max_rxq) { 5813f7311baSWei Dai allowed_max_rxq = dev_info.max_rx_queues; 5823f7311baSWei Dai *pid = pi; 5833f7311baSWei Dai } 5843f7311baSWei Dai } 5853f7311baSWei Dai return allowed_max_rxq; 5863f7311baSWei Dai } 5873f7311baSWei Dai 5883f7311baSWei Dai /* 5893f7311baSWei Dai * Check input rxq is valid or not. 5903f7311baSWei Dai * If input rxq is not greater than any of maximum number 5913f7311baSWei Dai * of RX queues of all ports, it is valid. 5923f7311baSWei Dai * if valid, return 0, else return -1 5933f7311baSWei Dai */ 5943f7311baSWei Dai int 5953f7311baSWei Dai check_nb_rxq(queueid_t rxq) 5963f7311baSWei Dai { 5973f7311baSWei Dai queueid_t allowed_max_rxq; 5983f7311baSWei Dai portid_t pid = 0; 5993f7311baSWei Dai 6003f7311baSWei Dai allowed_max_rxq = get_allowed_max_nb_rxq(&pid); 6013f7311baSWei Dai if (rxq > allowed_max_rxq) { 6023f7311baSWei Dai printf("Fail: input rxq (%u) can't be greater " 6033f7311baSWei Dai "than max_rx_queues (%u) of port %u\n", 6043f7311baSWei Dai rxq, 6053f7311baSWei Dai allowed_max_rxq, 6063f7311baSWei Dai pid); 6073f7311baSWei Dai return -1; 6083f7311baSWei Dai } 6093f7311baSWei Dai return 0; 6103f7311baSWei Dai } 6113f7311baSWei Dai 61236db4f6cSWei Dai /* 61336db4f6cSWei Dai * Get the allowed maximum number of TX queues. 61436db4f6cSWei Dai * *pid return the port id which has minimal value of 61536db4f6cSWei Dai * max_tx_queues in all ports. 61636db4f6cSWei Dai */ 61736db4f6cSWei Dai queueid_t 61836db4f6cSWei Dai get_allowed_max_nb_txq(portid_t *pid) 61936db4f6cSWei Dai { 62036db4f6cSWei Dai queueid_t allowed_max_txq = MAX_QUEUE_ID; 62136db4f6cSWei Dai portid_t pi; 62236db4f6cSWei Dai struct rte_eth_dev_info dev_info; 62336db4f6cSWei Dai 62436db4f6cSWei Dai RTE_ETH_FOREACH_DEV(pi) { 62536db4f6cSWei Dai rte_eth_dev_info_get(pi, &dev_info); 62636db4f6cSWei Dai if (dev_info.max_tx_queues < allowed_max_txq) { 62736db4f6cSWei Dai allowed_max_txq = dev_info.max_tx_queues; 62836db4f6cSWei Dai *pid = pi; 62936db4f6cSWei Dai } 63036db4f6cSWei Dai } 63136db4f6cSWei Dai return allowed_max_txq; 63236db4f6cSWei Dai } 63336db4f6cSWei Dai 63436db4f6cSWei Dai /* 63536db4f6cSWei Dai * Check input txq is valid or not. 63636db4f6cSWei Dai * If input txq is not greater than any of maximum number 63736db4f6cSWei Dai * of TX queues of all ports, it is valid. 63836db4f6cSWei Dai * if valid, return 0, else return -1 63936db4f6cSWei Dai */ 64036db4f6cSWei Dai int 64136db4f6cSWei Dai check_nb_txq(queueid_t txq) 64236db4f6cSWei Dai { 64336db4f6cSWei Dai queueid_t allowed_max_txq; 64436db4f6cSWei Dai portid_t pid = 0; 64536db4f6cSWei Dai 64636db4f6cSWei Dai allowed_max_txq = get_allowed_max_nb_txq(&pid); 64736db4f6cSWei Dai if (txq > allowed_max_txq) { 64836db4f6cSWei Dai printf("Fail: input txq (%u) can't be greater " 64936db4f6cSWei Dai "than max_tx_queues (%u) of port %u\n", 65036db4f6cSWei Dai txq, 65136db4f6cSWei Dai allowed_max_txq, 65236db4f6cSWei Dai pid); 65336db4f6cSWei Dai return -1; 65436db4f6cSWei Dai } 65536db4f6cSWei Dai return 0; 65636db4f6cSWei Dai } 65736db4f6cSWei Dai 658af75078fSIntel static void 659af75078fSIntel init_config(void) 660af75078fSIntel { 661ce8d5614SIntel portid_t pid; 662af75078fSIntel struct rte_port *port; 663af75078fSIntel struct rte_mempool *mbp; 664af75078fSIntel unsigned int nb_mbuf_per_pool; 665af75078fSIntel lcoreid_t lc_id; 6667acf894dSStephen Hurd uint8_t port_per_socket[RTE_MAX_NUMA_NODES]; 667b7091f1dSJiayu Hu struct rte_gro_param gro_param; 66852f38a20SJiayu Hu uint32_t gso_types; 669af75078fSIntel 6707acf894dSStephen Hurd memset(port_per_socket,0,RTE_MAX_NUMA_NODES); 671487f9a59SYulong Pei 672487f9a59SYulong Pei if (numa_support) { 673487f9a59SYulong Pei memset(port_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS); 674487f9a59SYulong Pei memset(rxring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS); 675487f9a59SYulong Pei memset(txring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS); 676487f9a59SYulong Pei } 677487f9a59SYulong Pei 678af75078fSIntel /* Configuration of logical cores. */ 679af75078fSIntel fwd_lcores = rte_zmalloc("testpmd: fwd_lcores", 680af75078fSIntel sizeof(struct fwd_lcore *) * nb_lcores, 681fdf20fa7SSergio Gonzalez Monroy RTE_CACHE_LINE_SIZE); 682af75078fSIntel if (fwd_lcores == NULL) { 683ce8d5614SIntel rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) " 684ce8d5614SIntel "failed\n", nb_lcores); 685af75078fSIntel } 686af75078fSIntel for (lc_id = 0; lc_id < nb_lcores; lc_id++) { 687af75078fSIntel fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore", 688af75078fSIntel sizeof(struct fwd_lcore), 689fdf20fa7SSergio Gonzalez Monroy RTE_CACHE_LINE_SIZE); 690af75078fSIntel if (fwd_lcores[lc_id] == NULL) { 691ce8d5614SIntel rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) " 692ce8d5614SIntel "failed\n"); 693af75078fSIntel } 694af75078fSIntel fwd_lcores[lc_id]->cpuid_idx = lc_id; 695af75078fSIntel } 696af75078fSIntel 6977d89b261SGaetan Rivet RTE_ETH_FOREACH_DEV(pid) { 698ce8d5614SIntel port = &ports[pid]; 6998b9bd0efSMoti Haimovsky /* Apply default TxRx configuration for all ports */ 700fd8c20aaSShahaf Shuler port->dev_conf.txmode = tx_mode; 701384161e0SShahaf Shuler port->dev_conf.rxmode = rx_mode; 702ce8d5614SIntel rte_eth_dev_info_get(pid, &port->dev_info); 70307e5f7bdSShahaf Shuler if (!(port->dev_info.tx_offload_capa & 70407e5f7bdSShahaf Shuler DEV_TX_OFFLOAD_MBUF_FAST_FREE)) 70507e5f7bdSShahaf Shuler port->dev_conf.txmode.offloads &= 70607e5f7bdSShahaf Shuler ~DEV_TX_OFFLOAD_MBUF_FAST_FREE; 707b6ea6408SIntel if (numa_support) { 708b6ea6408SIntel if (port_numa[pid] != NUMA_NO_CONFIG) 709b6ea6408SIntel port_per_socket[port_numa[pid]]++; 710b6ea6408SIntel else { 711b6ea6408SIntel uint32_t socket_id = rte_eth_dev_socket_id(pid); 71220a0286fSLiu Xiaofeng 71320a0286fSLiu Xiaofeng /* if socket_id is invalid, set to 0 */ 71420a0286fSLiu Xiaofeng if (check_socket_id(socket_id) < 0) 71520a0286fSLiu Xiaofeng socket_id = 0; 716b6ea6408SIntel port_per_socket[socket_id]++; 717b6ea6408SIntel } 718b6ea6408SIntel } 719b6ea6408SIntel 720ce8d5614SIntel /* set flag to initialize port/queue */ 721ce8d5614SIntel port->need_reconfig = 1; 722ce8d5614SIntel port->need_reconfig_queues = 1; 723ce8d5614SIntel } 724ce8d5614SIntel 7253ab64341SOlivier Matz /* 7263ab64341SOlivier Matz * Create pools of mbuf. 7273ab64341SOlivier Matz * If NUMA support is disabled, create a single pool of mbuf in 7283ab64341SOlivier Matz * socket 0 memory by default. 7293ab64341SOlivier Matz * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1. 7303ab64341SOlivier Matz * 7313ab64341SOlivier Matz * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and 7323ab64341SOlivier Matz * nb_txd can be configured at run time. 7333ab64341SOlivier Matz */ 7343ab64341SOlivier Matz if (param_total_num_mbufs) 7353ab64341SOlivier Matz nb_mbuf_per_pool = param_total_num_mbufs; 7363ab64341SOlivier Matz else { 7373ab64341SOlivier Matz nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX + 7383ab64341SOlivier Matz (nb_lcores * mb_mempool_cache) + 7393ab64341SOlivier Matz RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST; 7403ab64341SOlivier Matz nb_mbuf_per_pool *= RTE_MAX_ETHPORTS; 7413ab64341SOlivier Matz } 7423ab64341SOlivier Matz 743b6ea6408SIntel if (numa_support) { 744b6ea6408SIntel uint8_t i; 745ce8d5614SIntel 746c9cafcc8SShahaf Shuler for (i = 0; i < num_sockets; i++) 747c9cafcc8SShahaf Shuler mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 748c9cafcc8SShahaf Shuler socket_ids[i]); 7493ab64341SOlivier Matz } else { 7503ab64341SOlivier Matz if (socket_num == UMA_NO_CONFIG) 7513ab64341SOlivier Matz mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0); 7523ab64341SOlivier Matz else 7533ab64341SOlivier Matz mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 7543ab64341SOlivier Matz socket_num); 7553ab64341SOlivier Matz } 756b6ea6408SIntel 757b6ea6408SIntel init_port_config(); 7585886ae07SAdrien Mazarguil 75952f38a20SJiayu Hu gso_types = DEV_TX_OFFLOAD_TCP_TSO | DEV_TX_OFFLOAD_VXLAN_TNL_TSO | 76052f38a20SJiayu Hu DEV_TX_OFFLOAD_GRE_TNL_TSO; 7615886ae07SAdrien Mazarguil /* 7625886ae07SAdrien Mazarguil * Records which Mbuf pool to use by each logical core, if needed. 7635886ae07SAdrien Mazarguil */ 7645886ae07SAdrien Mazarguil for (lc_id = 0; lc_id < nb_lcores; lc_id++) { 7658fd8bebcSAdrien Mazarguil mbp = mbuf_pool_find( 7668fd8bebcSAdrien Mazarguil rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id])); 7678fd8bebcSAdrien Mazarguil 7685886ae07SAdrien Mazarguil if (mbp == NULL) 7695886ae07SAdrien Mazarguil mbp = mbuf_pool_find(0); 7705886ae07SAdrien Mazarguil fwd_lcores[lc_id]->mbp = mbp; 77152f38a20SJiayu Hu /* initialize GSO context */ 77252f38a20SJiayu Hu fwd_lcores[lc_id]->gso_ctx.direct_pool = mbp; 77352f38a20SJiayu Hu fwd_lcores[lc_id]->gso_ctx.indirect_pool = mbp; 77452f38a20SJiayu Hu fwd_lcores[lc_id]->gso_ctx.gso_types = gso_types; 77552f38a20SJiayu Hu fwd_lcores[lc_id]->gso_ctx.gso_size = ETHER_MAX_LEN - 77652f38a20SJiayu Hu ETHER_CRC_LEN; 77752f38a20SJiayu Hu fwd_lcores[lc_id]->gso_ctx.flag = 0; 7785886ae07SAdrien Mazarguil } 7795886ae07SAdrien Mazarguil 780ce8d5614SIntel /* Configuration of packet forwarding streams. */ 781ce8d5614SIntel if (init_fwd_streams() < 0) 782ce8d5614SIntel rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n"); 7830c0db76fSBernard Iremonger 7840c0db76fSBernard Iremonger fwd_config_setup(); 785b7091f1dSJiayu Hu 786b7091f1dSJiayu Hu /* create a gro context for each lcore */ 787b7091f1dSJiayu Hu gro_param.gro_types = RTE_GRO_TCP_IPV4; 788b7091f1dSJiayu Hu gro_param.max_flow_num = GRO_MAX_FLUSH_CYCLES; 789b7091f1dSJiayu Hu gro_param.max_item_per_flow = MAX_PKT_BURST; 790b7091f1dSJiayu Hu for (lc_id = 0; lc_id < nb_lcores; lc_id++) { 791b7091f1dSJiayu Hu gro_param.socket_id = rte_lcore_to_socket_id( 792b7091f1dSJiayu Hu fwd_lcores_cpuids[lc_id]); 793b7091f1dSJiayu Hu fwd_lcores[lc_id]->gro_ctx = rte_gro_ctx_create(&gro_param); 794b7091f1dSJiayu Hu if (fwd_lcores[lc_id]->gro_ctx == NULL) { 795b7091f1dSJiayu Hu rte_exit(EXIT_FAILURE, 796b7091f1dSJiayu Hu "rte_gro_ctx_create() failed\n"); 797b7091f1dSJiayu Hu } 798b7091f1dSJiayu Hu } 799ce8d5614SIntel } 800ce8d5614SIntel 8012950a769SDeclan Doherty 8022950a769SDeclan Doherty void 803a21d5a4bSDeclan Doherty reconfig(portid_t new_port_id, unsigned socket_id) 8042950a769SDeclan Doherty { 8052950a769SDeclan Doherty struct rte_port *port; 8062950a769SDeclan Doherty 8072950a769SDeclan Doherty /* Reconfiguration of Ethernet ports. */ 8082950a769SDeclan Doherty port = &ports[new_port_id]; 8092950a769SDeclan Doherty rte_eth_dev_info_get(new_port_id, &port->dev_info); 8102950a769SDeclan Doherty 8112950a769SDeclan Doherty /* set flag to initialize port/queue */ 8122950a769SDeclan Doherty port->need_reconfig = 1; 8132950a769SDeclan Doherty port->need_reconfig_queues = 1; 814a21d5a4bSDeclan Doherty port->socket_id = socket_id; 8152950a769SDeclan Doherty 8162950a769SDeclan Doherty init_port_config(); 8172950a769SDeclan Doherty } 8182950a769SDeclan Doherty 8192950a769SDeclan Doherty 820ce8d5614SIntel int 821ce8d5614SIntel init_fwd_streams(void) 822ce8d5614SIntel { 823ce8d5614SIntel portid_t pid; 824ce8d5614SIntel struct rte_port *port; 825ce8d5614SIntel streamid_t sm_id, nb_fwd_streams_new; 8265a8fb55cSReshma Pattan queueid_t q; 827ce8d5614SIntel 828ce8d5614SIntel /* set socket id according to numa or not */ 8297d89b261SGaetan Rivet RTE_ETH_FOREACH_DEV(pid) { 830ce8d5614SIntel port = &ports[pid]; 831ce8d5614SIntel if (nb_rxq > port->dev_info.max_rx_queues) { 832ce8d5614SIntel printf("Fail: nb_rxq(%d) is greater than " 833ce8d5614SIntel "max_rx_queues(%d)\n", nb_rxq, 834ce8d5614SIntel port->dev_info.max_rx_queues); 835ce8d5614SIntel return -1; 836ce8d5614SIntel } 837ce8d5614SIntel if (nb_txq > port->dev_info.max_tx_queues) { 838ce8d5614SIntel printf("Fail: nb_txq(%d) is greater than " 839ce8d5614SIntel "max_tx_queues(%d)\n", nb_txq, 840ce8d5614SIntel port->dev_info.max_tx_queues); 841ce8d5614SIntel return -1; 842ce8d5614SIntel } 84320a0286fSLiu Xiaofeng if (numa_support) { 84420a0286fSLiu Xiaofeng if (port_numa[pid] != NUMA_NO_CONFIG) 84520a0286fSLiu Xiaofeng port->socket_id = port_numa[pid]; 84620a0286fSLiu Xiaofeng else { 847b6ea6408SIntel port->socket_id = rte_eth_dev_socket_id(pid); 84820a0286fSLiu Xiaofeng 84920a0286fSLiu Xiaofeng /* if socket_id is invalid, set to 0 */ 85020a0286fSLiu Xiaofeng if (check_socket_id(port->socket_id) < 0) 85120a0286fSLiu Xiaofeng port->socket_id = 0; 85220a0286fSLiu Xiaofeng } 85320a0286fSLiu Xiaofeng } 854b6ea6408SIntel else { 855b6ea6408SIntel if (socket_num == UMA_NO_CONFIG) 856af75078fSIntel port->socket_id = 0; 857b6ea6408SIntel else 858b6ea6408SIntel port->socket_id = socket_num; 859b6ea6408SIntel } 860af75078fSIntel } 861af75078fSIntel 8625a8fb55cSReshma Pattan q = RTE_MAX(nb_rxq, nb_txq); 8635a8fb55cSReshma Pattan if (q == 0) { 8645a8fb55cSReshma Pattan printf("Fail: Cannot allocate fwd streams as number of queues is 0\n"); 8655a8fb55cSReshma Pattan return -1; 8665a8fb55cSReshma Pattan } 8675a8fb55cSReshma Pattan nb_fwd_streams_new = (streamid_t)(nb_ports * q); 868ce8d5614SIntel if (nb_fwd_streams_new == nb_fwd_streams) 869ce8d5614SIntel return 0; 870ce8d5614SIntel /* clear the old */ 871ce8d5614SIntel if (fwd_streams != NULL) { 872ce8d5614SIntel for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) { 873ce8d5614SIntel if (fwd_streams[sm_id] == NULL) 874ce8d5614SIntel continue; 875ce8d5614SIntel rte_free(fwd_streams[sm_id]); 876ce8d5614SIntel fwd_streams[sm_id] = NULL; 877af75078fSIntel } 878ce8d5614SIntel rte_free(fwd_streams); 879ce8d5614SIntel fwd_streams = NULL; 880ce8d5614SIntel } 881ce8d5614SIntel 882ce8d5614SIntel /* init new */ 883ce8d5614SIntel nb_fwd_streams = nb_fwd_streams_new; 884ce8d5614SIntel fwd_streams = rte_zmalloc("testpmd: fwd_streams", 885fdf20fa7SSergio Gonzalez Monroy sizeof(struct fwd_stream *) * nb_fwd_streams, RTE_CACHE_LINE_SIZE); 886ce8d5614SIntel if (fwd_streams == NULL) 887ce8d5614SIntel rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_stream *)) " 888ce8d5614SIntel "failed\n", nb_fwd_streams); 889ce8d5614SIntel 890af75078fSIntel for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) { 891af75078fSIntel fwd_streams[sm_id] = rte_zmalloc("testpmd: struct fwd_stream", 892fdf20fa7SSergio Gonzalez Monroy sizeof(struct fwd_stream), RTE_CACHE_LINE_SIZE); 893ce8d5614SIntel if (fwd_streams[sm_id] == NULL) 894ce8d5614SIntel rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_stream)" 895ce8d5614SIntel " failed\n"); 896af75078fSIntel } 897ce8d5614SIntel 898ce8d5614SIntel return 0; 899af75078fSIntel } 900af75078fSIntel 901af75078fSIntel #ifdef RTE_TEST_PMD_RECORD_BURST_STATS 902af75078fSIntel static void 903af75078fSIntel pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs) 904af75078fSIntel { 905af75078fSIntel unsigned int total_burst; 906af75078fSIntel unsigned int nb_burst; 907af75078fSIntel unsigned int burst_stats[3]; 908af75078fSIntel uint16_t pktnb_stats[3]; 909af75078fSIntel uint16_t nb_pkt; 910af75078fSIntel int burst_percent[3]; 911af75078fSIntel 912af75078fSIntel /* 913af75078fSIntel * First compute the total number of packet bursts and the 914af75078fSIntel * two highest numbers of bursts of the same number of packets. 915af75078fSIntel */ 916af75078fSIntel total_burst = 0; 917af75078fSIntel burst_stats[0] = burst_stats[1] = burst_stats[2] = 0; 918af75078fSIntel pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0; 919af75078fSIntel for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) { 920af75078fSIntel nb_burst = pbs->pkt_burst_spread[nb_pkt]; 921af75078fSIntel if (nb_burst == 0) 922af75078fSIntel continue; 923af75078fSIntel total_burst += nb_burst; 924af75078fSIntel if (nb_burst > burst_stats[0]) { 925af75078fSIntel burst_stats[1] = burst_stats[0]; 926af75078fSIntel pktnb_stats[1] = pktnb_stats[0]; 927af75078fSIntel burst_stats[0] = nb_burst; 928af75078fSIntel pktnb_stats[0] = nb_pkt; 929af75078fSIntel } 930af75078fSIntel } 931af75078fSIntel if (total_burst == 0) 932af75078fSIntel return; 933af75078fSIntel burst_percent[0] = (burst_stats[0] * 100) / total_burst; 934af75078fSIntel printf(" %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst, 935af75078fSIntel burst_percent[0], (int) pktnb_stats[0]); 936af75078fSIntel if (burst_stats[0] == total_burst) { 937af75078fSIntel printf("]\n"); 938af75078fSIntel return; 939af75078fSIntel } 940af75078fSIntel if (burst_stats[0] + burst_stats[1] == total_burst) { 941af75078fSIntel printf(" + %d%% of %d pkts]\n", 942af75078fSIntel 100 - burst_percent[0], pktnb_stats[1]); 943af75078fSIntel return; 944af75078fSIntel } 945af75078fSIntel burst_percent[1] = (burst_stats[1] * 100) / total_burst; 946af75078fSIntel burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]); 947af75078fSIntel if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) { 948af75078fSIntel printf(" + %d%% of others]\n", 100 - burst_percent[0]); 949af75078fSIntel return; 950af75078fSIntel } 951af75078fSIntel printf(" + %d%% of %d pkts + %d%% of others]\n", 952af75078fSIntel burst_percent[1], (int) pktnb_stats[1], burst_percent[2]); 953af75078fSIntel } 954af75078fSIntel #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */ 955af75078fSIntel 956af75078fSIntel static void 957af75078fSIntel fwd_port_stats_display(portid_t port_id, struct rte_eth_stats *stats) 958af75078fSIntel { 959af75078fSIntel struct rte_port *port; 960013af9b6SIntel uint8_t i; 961af75078fSIntel 962af75078fSIntel static const char *fwd_stats_border = "----------------------"; 963af75078fSIntel 964af75078fSIntel port = &ports[port_id]; 965af75078fSIntel printf("\n %s Forward statistics for port %-2d %s\n", 966af75078fSIntel fwd_stats_border, port_id, fwd_stats_border); 967013af9b6SIntel 968013af9b6SIntel if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) { 969af75078fSIntel printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: " 970af75078fSIntel "%-"PRIu64"\n", 97170bdb186SIvan Boule stats->ipackets, stats->imissed, 97270bdb186SIvan Boule (uint64_t) (stats->ipackets + stats->imissed)); 973af75078fSIntel 974af75078fSIntel if (cur_fwd_eng == &csum_fwd_engine) 975af75078fSIntel printf(" Bad-ipcsum: %-14"PRIu64" Bad-l4csum: %-14"PRIu64" \n", 976af75078fSIntel port->rx_bad_ip_csum, port->rx_bad_l4_csum); 97786057c99SIgor Ryzhov if ((stats->ierrors + stats->rx_nombuf) > 0) { 978f72a0fa6SStephen Hemminger printf(" RX-error: %-"PRIu64"\n", stats->ierrors); 97970bdb186SIvan Boule printf(" RX-nombufs: %-14"PRIu64"\n", stats->rx_nombuf); 98070bdb186SIvan Boule } 981af75078fSIntel 982af75078fSIntel printf(" TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: " 983af75078fSIntel "%-"PRIu64"\n", 984af75078fSIntel stats->opackets, port->tx_dropped, 985af75078fSIntel (uint64_t) (stats->opackets + port->tx_dropped)); 986013af9b6SIntel } 987013af9b6SIntel else { 988013af9b6SIntel printf(" RX-packets: %14"PRIu64" RX-dropped:%14"PRIu64" RX-total:" 989013af9b6SIntel "%14"PRIu64"\n", 99070bdb186SIvan Boule stats->ipackets, stats->imissed, 99170bdb186SIvan Boule (uint64_t) (stats->ipackets + stats->imissed)); 992013af9b6SIntel 993013af9b6SIntel if (cur_fwd_eng == &csum_fwd_engine) 994013af9b6SIntel printf(" Bad-ipcsum:%14"PRIu64" Bad-l4csum:%14"PRIu64"\n", 995013af9b6SIntel port->rx_bad_ip_csum, port->rx_bad_l4_csum); 99686057c99SIgor Ryzhov if ((stats->ierrors + stats->rx_nombuf) > 0) { 997f72a0fa6SStephen Hemminger printf(" RX-error:%"PRIu64"\n", stats->ierrors); 99870bdb186SIvan Boule printf(" RX-nombufs: %14"PRIu64"\n", 99970bdb186SIvan Boule stats->rx_nombuf); 100070bdb186SIvan Boule } 1001013af9b6SIntel 1002013af9b6SIntel printf(" TX-packets: %14"PRIu64" TX-dropped:%14"PRIu64" TX-total:" 1003013af9b6SIntel "%14"PRIu64"\n", 1004013af9b6SIntel stats->opackets, port->tx_dropped, 1005013af9b6SIntel (uint64_t) (stats->opackets + port->tx_dropped)); 1006013af9b6SIntel } 1007e659b6b4SIvan Boule 1008af75078fSIntel #ifdef RTE_TEST_PMD_RECORD_BURST_STATS 1009af75078fSIntel if (port->rx_stream) 1010013af9b6SIntel pkt_burst_stats_display("RX", 1011013af9b6SIntel &port->rx_stream->rx_burst_stats); 1012af75078fSIntel if (port->tx_stream) 1013013af9b6SIntel pkt_burst_stats_display("TX", 1014013af9b6SIntel &port->tx_stream->tx_burst_stats); 1015af75078fSIntel #endif 1016af75078fSIntel 1017013af9b6SIntel if (port->rx_queue_stats_mapping_enabled) { 1018013af9b6SIntel printf("\n"); 1019013af9b6SIntel for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) { 1020013af9b6SIntel printf(" Stats reg %2d RX-packets:%14"PRIu64 1021013af9b6SIntel " RX-errors:%14"PRIu64 1022013af9b6SIntel " RX-bytes:%14"PRIu64"\n", 1023013af9b6SIntel i, stats->q_ipackets[i], stats->q_errors[i], stats->q_ibytes[i]); 1024013af9b6SIntel } 1025013af9b6SIntel printf("\n"); 1026013af9b6SIntel } 1027013af9b6SIntel if (port->tx_queue_stats_mapping_enabled) { 1028013af9b6SIntel for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) { 1029013af9b6SIntel printf(" Stats reg %2d TX-packets:%14"PRIu64 1030013af9b6SIntel " TX-bytes:%14"PRIu64"\n", 1031013af9b6SIntel i, stats->q_opackets[i], stats->q_obytes[i]); 1032013af9b6SIntel } 1033013af9b6SIntel } 1034013af9b6SIntel 1035af75078fSIntel printf(" %s--------------------------------%s\n", 1036af75078fSIntel fwd_stats_border, fwd_stats_border); 1037af75078fSIntel } 1038af75078fSIntel 1039af75078fSIntel static void 1040af75078fSIntel fwd_stream_stats_display(streamid_t stream_id) 1041af75078fSIntel { 1042af75078fSIntel struct fwd_stream *fs; 1043af75078fSIntel static const char *fwd_top_stats_border = "-------"; 1044af75078fSIntel 1045af75078fSIntel fs = fwd_streams[stream_id]; 1046af75078fSIntel if ((fs->rx_packets == 0) && (fs->tx_packets == 0) && 1047af75078fSIntel (fs->fwd_dropped == 0)) 1048af75078fSIntel return; 1049af75078fSIntel printf("\n %s Forward Stats for RX Port=%2d/Queue=%2d -> " 1050af75078fSIntel "TX Port=%2d/Queue=%2d %s\n", 1051af75078fSIntel fwd_top_stats_border, fs->rx_port, fs->rx_queue, 1052af75078fSIntel fs->tx_port, fs->tx_queue, fwd_top_stats_border); 1053af75078fSIntel printf(" RX-packets: %-14u TX-packets: %-14u TX-dropped: %-14u", 1054af75078fSIntel fs->rx_packets, fs->tx_packets, fs->fwd_dropped); 1055af75078fSIntel 1056af75078fSIntel /* if checksum mode */ 1057af75078fSIntel if (cur_fwd_eng == &csum_fwd_engine) { 1058013af9b6SIntel printf(" RX- bad IP checksum: %-14u Rx- bad L4 checksum: " 1059013af9b6SIntel "%-14u\n", fs->rx_bad_ip_csum, fs->rx_bad_l4_csum); 1060af75078fSIntel } 1061af75078fSIntel 1062af75078fSIntel #ifdef RTE_TEST_PMD_RECORD_BURST_STATS 1063af75078fSIntel pkt_burst_stats_display("RX", &fs->rx_burst_stats); 1064af75078fSIntel pkt_burst_stats_display("TX", &fs->tx_burst_stats); 1065af75078fSIntel #endif 1066af75078fSIntel } 1067af75078fSIntel 1068af75078fSIntel static void 10697741e4cfSIntel flush_fwd_rx_queues(void) 1070af75078fSIntel { 1071af75078fSIntel struct rte_mbuf *pkts_burst[MAX_PKT_BURST]; 1072af75078fSIntel portid_t rxp; 10737741e4cfSIntel portid_t port_id; 1074af75078fSIntel queueid_t rxq; 1075af75078fSIntel uint16_t nb_rx; 1076af75078fSIntel uint16_t i; 1077af75078fSIntel uint8_t j; 1078f487715fSReshma Pattan uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0; 1079594302c7SJames Poole uint64_t timer_period; 1080f487715fSReshma Pattan 1081f487715fSReshma Pattan /* convert to number of cycles */ 1082594302c7SJames Poole timer_period = rte_get_timer_hz(); /* 1 second timeout */ 1083af75078fSIntel 1084af75078fSIntel for (j = 0; j < 2; j++) { 10857741e4cfSIntel for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) { 1086af75078fSIntel for (rxq = 0; rxq < nb_rxq; rxq++) { 10877741e4cfSIntel port_id = fwd_ports_ids[rxp]; 1088f487715fSReshma Pattan /** 1089f487715fSReshma Pattan * testpmd can stuck in the below do while loop 1090f487715fSReshma Pattan * if rte_eth_rx_burst() always returns nonzero 1091f487715fSReshma Pattan * packets. So timer is added to exit this loop 1092f487715fSReshma Pattan * after 1sec timer expiry. 1093f487715fSReshma Pattan */ 1094f487715fSReshma Pattan prev_tsc = rte_rdtsc(); 1095af75078fSIntel do { 10967741e4cfSIntel nb_rx = rte_eth_rx_burst(port_id, rxq, 1097013af9b6SIntel pkts_burst, MAX_PKT_BURST); 1098af75078fSIntel for (i = 0; i < nb_rx; i++) 1099af75078fSIntel rte_pktmbuf_free(pkts_burst[i]); 1100f487715fSReshma Pattan 1101f487715fSReshma Pattan cur_tsc = rte_rdtsc(); 1102f487715fSReshma Pattan diff_tsc = cur_tsc - prev_tsc; 1103f487715fSReshma Pattan timer_tsc += diff_tsc; 1104f487715fSReshma Pattan } while ((nb_rx > 0) && 1105f487715fSReshma Pattan (timer_tsc < timer_period)); 1106f487715fSReshma Pattan timer_tsc = 0; 1107af75078fSIntel } 1108af75078fSIntel } 1109af75078fSIntel rte_delay_ms(10); /* wait 10 milli-seconds before retrying */ 1110af75078fSIntel } 1111af75078fSIntel } 1112af75078fSIntel 1113af75078fSIntel static void 1114af75078fSIntel run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd) 1115af75078fSIntel { 1116af75078fSIntel struct fwd_stream **fsm; 1117af75078fSIntel streamid_t nb_fs; 1118af75078fSIntel streamid_t sm_id; 11197e4441c8SRemy Horton #ifdef RTE_LIBRTE_BITRATE 11207e4441c8SRemy Horton uint64_t tics_per_1sec; 11217e4441c8SRemy Horton uint64_t tics_datum; 11227e4441c8SRemy Horton uint64_t tics_current; 1123*8728ccf3SThomas Monjalon uint16_t idx_port; 1124af75078fSIntel 11257e4441c8SRemy Horton tics_datum = rte_rdtsc(); 11267e4441c8SRemy Horton tics_per_1sec = rte_get_timer_hz(); 11277e4441c8SRemy Horton #endif 1128af75078fSIntel fsm = &fwd_streams[fc->stream_idx]; 1129af75078fSIntel nb_fs = fc->stream_nb; 1130af75078fSIntel do { 1131af75078fSIntel for (sm_id = 0; sm_id < nb_fs; sm_id++) 1132af75078fSIntel (*pkt_fwd)(fsm[sm_id]); 11337e4441c8SRemy Horton #ifdef RTE_LIBRTE_BITRATE 1134e25e6c70SRemy Horton if (bitrate_enabled != 0 && 1135e25e6c70SRemy Horton bitrate_lcore_id == rte_lcore_id()) { 11367e4441c8SRemy Horton tics_current = rte_rdtsc(); 11377e4441c8SRemy Horton if (tics_current - tics_datum >= tics_per_1sec) { 11387e4441c8SRemy Horton /* Periodic bitrate calculation */ 1139*8728ccf3SThomas Monjalon RTE_ETH_FOREACH_DEV(idx_port) 1140e25e6c70SRemy Horton rte_stats_bitrate_calc(bitrate_data, 1141e25e6c70SRemy Horton idx_port); 11427e4441c8SRemy Horton tics_datum = tics_current; 11437e4441c8SRemy Horton } 1144e25e6c70SRemy Horton } 11457e4441c8SRemy Horton #endif 114662d3216dSReshma Pattan #ifdef RTE_LIBRTE_LATENCY_STATS 114765eb1e54SPablo de Lara if (latencystats_enabled != 0 && 114865eb1e54SPablo de Lara latencystats_lcore_id == rte_lcore_id()) 114962d3216dSReshma Pattan rte_latencystats_update(); 115062d3216dSReshma Pattan #endif 115162d3216dSReshma Pattan 1152af75078fSIntel } while (! fc->stopped); 1153af75078fSIntel } 1154af75078fSIntel 1155af75078fSIntel static int 1156af75078fSIntel start_pkt_forward_on_core(void *fwd_arg) 1157af75078fSIntel { 1158af75078fSIntel run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg, 1159af75078fSIntel cur_fwd_config.fwd_eng->packet_fwd); 1160af75078fSIntel return 0; 1161af75078fSIntel } 1162af75078fSIntel 1163af75078fSIntel /* 1164af75078fSIntel * Run the TXONLY packet forwarding engine to send a single burst of packets. 1165af75078fSIntel * Used to start communication flows in network loopback test configurations. 1166af75078fSIntel */ 1167af75078fSIntel static int 1168af75078fSIntel run_one_txonly_burst_on_core(void *fwd_arg) 1169af75078fSIntel { 1170af75078fSIntel struct fwd_lcore *fwd_lc; 1171af75078fSIntel struct fwd_lcore tmp_lcore; 1172af75078fSIntel 1173af75078fSIntel fwd_lc = (struct fwd_lcore *) fwd_arg; 1174af75078fSIntel tmp_lcore = *fwd_lc; 1175af75078fSIntel tmp_lcore.stopped = 1; 1176af75078fSIntel run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd); 1177af75078fSIntel return 0; 1178af75078fSIntel } 1179af75078fSIntel 1180af75078fSIntel /* 1181af75078fSIntel * Launch packet forwarding: 1182af75078fSIntel * - Setup per-port forwarding context. 1183af75078fSIntel * - launch logical cores with their forwarding configuration. 1184af75078fSIntel */ 1185af75078fSIntel static void 1186af75078fSIntel launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore) 1187af75078fSIntel { 1188af75078fSIntel port_fwd_begin_t port_fwd_begin; 1189af75078fSIntel unsigned int i; 1190af75078fSIntel unsigned int lc_id; 1191af75078fSIntel int diag; 1192af75078fSIntel 1193af75078fSIntel port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin; 1194af75078fSIntel if (port_fwd_begin != NULL) { 1195af75078fSIntel for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) 1196af75078fSIntel (*port_fwd_begin)(fwd_ports_ids[i]); 1197af75078fSIntel } 1198af75078fSIntel for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) { 1199af75078fSIntel lc_id = fwd_lcores_cpuids[i]; 1200af75078fSIntel if ((interactive == 0) || (lc_id != rte_lcore_id())) { 1201af75078fSIntel fwd_lcores[i]->stopped = 0; 1202af75078fSIntel diag = rte_eal_remote_launch(pkt_fwd_on_lcore, 1203af75078fSIntel fwd_lcores[i], lc_id); 1204af75078fSIntel if (diag != 0) 1205af75078fSIntel printf("launch lcore %u failed - diag=%d\n", 1206af75078fSIntel lc_id, diag); 1207af75078fSIntel } 1208af75078fSIntel } 1209af75078fSIntel } 1210af75078fSIntel 1211af75078fSIntel /* 1212af75078fSIntel * Launch packet forwarding configuration. 1213af75078fSIntel */ 1214af75078fSIntel void 1215af75078fSIntel start_packet_forwarding(int with_tx_first) 1216af75078fSIntel { 1217af75078fSIntel port_fwd_begin_t port_fwd_begin; 1218af75078fSIntel port_fwd_end_t port_fwd_end; 1219af75078fSIntel struct rte_port *port; 1220af75078fSIntel unsigned int i; 1221af75078fSIntel portid_t pt_id; 1222af75078fSIntel streamid_t sm_id; 1223af75078fSIntel 12245a8fb55cSReshma Pattan if (strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") == 0 && !nb_rxq) 12255a8fb55cSReshma Pattan rte_exit(EXIT_FAILURE, "rxq are 0, cannot use rxonly fwd mode\n"); 12265a8fb55cSReshma Pattan 12275a8fb55cSReshma Pattan if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 && !nb_txq) 12285a8fb55cSReshma Pattan rte_exit(EXIT_FAILURE, "txq are 0, cannot use txonly fwd mode\n"); 12295a8fb55cSReshma Pattan 12305a8fb55cSReshma Pattan if ((strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") != 0 && 12315a8fb55cSReshma Pattan strcmp(cur_fwd_eng->fwd_mode_name, "txonly") != 0) && 12325a8fb55cSReshma Pattan (!nb_rxq || !nb_txq)) 12335a8fb55cSReshma Pattan rte_exit(EXIT_FAILURE, 12345a8fb55cSReshma Pattan "Either rxq or txq are 0, cannot use %s fwd mode\n", 12355a8fb55cSReshma Pattan cur_fwd_eng->fwd_mode_name); 12365a8fb55cSReshma Pattan 1237ce8d5614SIntel if (all_ports_started() == 0) { 1238ce8d5614SIntel printf("Not all ports were started\n"); 1239ce8d5614SIntel return; 1240ce8d5614SIntel } 1241af75078fSIntel if (test_done == 0) { 1242af75078fSIntel printf("Packet forwarding already started\n"); 1243af75078fSIntel return; 1244af75078fSIntel } 1245edf87b4aSBernard Iremonger 1246edf87b4aSBernard Iremonger if (init_fwd_streams() < 0) { 1247edf87b4aSBernard Iremonger printf("Fail from init_fwd_streams()\n"); 1248edf87b4aSBernard Iremonger return; 1249edf87b4aSBernard Iremonger } 1250edf87b4aSBernard Iremonger 12517741e4cfSIntel if(dcb_test) { 12527741e4cfSIntel for (i = 0; i < nb_fwd_ports; i++) { 12537741e4cfSIntel pt_id = fwd_ports_ids[i]; 12547741e4cfSIntel port = &ports[pt_id]; 12557741e4cfSIntel if (!port->dcb_flag) { 12567741e4cfSIntel printf("In DCB mode, all forwarding ports must " 12577741e4cfSIntel "be configured in this mode.\n"); 1258013af9b6SIntel return; 1259013af9b6SIntel } 12607741e4cfSIntel } 12617741e4cfSIntel if (nb_fwd_lcores == 1) { 12627741e4cfSIntel printf("In DCB mode,the nb forwarding cores " 12637741e4cfSIntel "should be larger than 1.\n"); 12647741e4cfSIntel return; 12657741e4cfSIntel } 12667741e4cfSIntel } 1267af75078fSIntel test_done = 0; 12687741e4cfSIntel 12697741e4cfSIntel if(!no_flush_rx) 12707741e4cfSIntel flush_fwd_rx_queues(); 12717741e4cfSIntel 1272af75078fSIntel fwd_config_setup(); 1273933617d8SZhihong Wang pkt_fwd_config_display(&cur_fwd_config); 1274af75078fSIntel rxtx_config_display(); 1275af75078fSIntel 1276af75078fSIntel for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) { 1277af75078fSIntel pt_id = fwd_ports_ids[i]; 1278af75078fSIntel port = &ports[pt_id]; 1279af75078fSIntel rte_eth_stats_get(pt_id, &port->stats); 1280af75078fSIntel port->tx_dropped = 0; 1281013af9b6SIntel 1282013af9b6SIntel map_port_queue_stats_mapping_registers(pt_id, port); 1283af75078fSIntel } 1284af75078fSIntel for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) { 1285af75078fSIntel fwd_streams[sm_id]->rx_packets = 0; 1286af75078fSIntel fwd_streams[sm_id]->tx_packets = 0; 1287af75078fSIntel fwd_streams[sm_id]->fwd_dropped = 0; 1288af75078fSIntel fwd_streams[sm_id]->rx_bad_ip_csum = 0; 1289af75078fSIntel fwd_streams[sm_id]->rx_bad_l4_csum = 0; 1290af75078fSIntel 1291af75078fSIntel #ifdef RTE_TEST_PMD_RECORD_BURST_STATS 1292af75078fSIntel memset(&fwd_streams[sm_id]->rx_burst_stats, 0, 1293af75078fSIntel sizeof(fwd_streams[sm_id]->rx_burst_stats)); 1294af75078fSIntel memset(&fwd_streams[sm_id]->tx_burst_stats, 0, 1295af75078fSIntel sizeof(fwd_streams[sm_id]->tx_burst_stats)); 1296af75078fSIntel #endif 1297af75078fSIntel #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES 1298af75078fSIntel fwd_streams[sm_id]->core_cycles = 0; 1299af75078fSIntel #endif 1300af75078fSIntel } 1301af75078fSIntel if (with_tx_first) { 1302af75078fSIntel port_fwd_begin = tx_only_engine.port_fwd_begin; 1303af75078fSIntel if (port_fwd_begin != NULL) { 1304af75078fSIntel for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) 1305af75078fSIntel (*port_fwd_begin)(fwd_ports_ids[i]); 1306af75078fSIntel } 1307acbf77a6SZhihong Wang while (with_tx_first--) { 1308acbf77a6SZhihong Wang launch_packet_forwarding( 1309acbf77a6SZhihong Wang run_one_txonly_burst_on_core); 1310af75078fSIntel rte_eal_mp_wait_lcore(); 1311acbf77a6SZhihong Wang } 1312af75078fSIntel port_fwd_end = tx_only_engine.port_fwd_end; 1313af75078fSIntel if (port_fwd_end != NULL) { 1314af75078fSIntel for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) 1315af75078fSIntel (*port_fwd_end)(fwd_ports_ids[i]); 1316af75078fSIntel } 1317af75078fSIntel } 1318af75078fSIntel launch_packet_forwarding(start_pkt_forward_on_core); 1319af75078fSIntel } 1320af75078fSIntel 1321af75078fSIntel void 1322af75078fSIntel stop_packet_forwarding(void) 1323af75078fSIntel { 1324af75078fSIntel struct rte_eth_stats stats; 1325af75078fSIntel struct rte_port *port; 1326af75078fSIntel port_fwd_end_t port_fwd_end; 1327af75078fSIntel int i; 1328af75078fSIntel portid_t pt_id; 1329af75078fSIntel streamid_t sm_id; 1330af75078fSIntel lcoreid_t lc_id; 1331af75078fSIntel uint64_t total_recv; 1332af75078fSIntel uint64_t total_xmit; 1333af75078fSIntel uint64_t total_rx_dropped; 1334af75078fSIntel uint64_t total_tx_dropped; 1335af75078fSIntel uint64_t total_rx_nombuf; 1336af75078fSIntel uint64_t tx_dropped; 1337af75078fSIntel uint64_t rx_bad_ip_csum; 1338af75078fSIntel uint64_t rx_bad_l4_csum; 1339af75078fSIntel #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES 1340af75078fSIntel uint64_t fwd_cycles; 1341af75078fSIntel #endif 1342b7091f1dSJiayu Hu 1343af75078fSIntel static const char *acc_stats_border = "+++++++++++++++"; 1344af75078fSIntel 1345af75078fSIntel if (test_done) { 1346af75078fSIntel printf("Packet forwarding not started\n"); 1347af75078fSIntel return; 1348af75078fSIntel } 1349af75078fSIntel printf("Telling cores to stop..."); 1350af75078fSIntel for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) 1351af75078fSIntel fwd_lcores[lc_id]->stopped = 1; 1352af75078fSIntel printf("\nWaiting for lcores to finish...\n"); 1353af75078fSIntel rte_eal_mp_wait_lcore(); 1354af75078fSIntel port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end; 1355af75078fSIntel if (port_fwd_end != NULL) { 1356af75078fSIntel for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) { 1357af75078fSIntel pt_id = fwd_ports_ids[i]; 1358af75078fSIntel (*port_fwd_end)(pt_id); 1359af75078fSIntel } 1360af75078fSIntel } 1361af75078fSIntel #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES 1362af75078fSIntel fwd_cycles = 0; 1363af75078fSIntel #endif 1364af75078fSIntel for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) { 1365af75078fSIntel if (cur_fwd_config.nb_fwd_streams > 1366af75078fSIntel cur_fwd_config.nb_fwd_ports) { 1367af75078fSIntel fwd_stream_stats_display(sm_id); 1368af75078fSIntel ports[fwd_streams[sm_id]->tx_port].tx_stream = NULL; 1369af75078fSIntel ports[fwd_streams[sm_id]->rx_port].rx_stream = NULL; 1370af75078fSIntel } else { 1371af75078fSIntel ports[fwd_streams[sm_id]->tx_port].tx_stream = 1372af75078fSIntel fwd_streams[sm_id]; 1373af75078fSIntel ports[fwd_streams[sm_id]->rx_port].rx_stream = 1374af75078fSIntel fwd_streams[sm_id]; 1375af75078fSIntel } 1376af75078fSIntel tx_dropped = ports[fwd_streams[sm_id]->tx_port].tx_dropped; 1377af75078fSIntel tx_dropped = (uint64_t) (tx_dropped + 1378af75078fSIntel fwd_streams[sm_id]->fwd_dropped); 1379af75078fSIntel ports[fwd_streams[sm_id]->tx_port].tx_dropped = tx_dropped; 1380af75078fSIntel 1381013af9b6SIntel rx_bad_ip_csum = 1382013af9b6SIntel ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum; 1383af75078fSIntel rx_bad_ip_csum = (uint64_t) (rx_bad_ip_csum + 1384af75078fSIntel fwd_streams[sm_id]->rx_bad_ip_csum); 1385013af9b6SIntel ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum = 1386013af9b6SIntel rx_bad_ip_csum; 1387af75078fSIntel 1388013af9b6SIntel rx_bad_l4_csum = 1389013af9b6SIntel ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum; 1390af75078fSIntel rx_bad_l4_csum = (uint64_t) (rx_bad_l4_csum + 1391af75078fSIntel fwd_streams[sm_id]->rx_bad_l4_csum); 1392013af9b6SIntel ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum = 1393013af9b6SIntel rx_bad_l4_csum; 1394af75078fSIntel 1395af75078fSIntel #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES 1396af75078fSIntel fwd_cycles = (uint64_t) (fwd_cycles + 1397af75078fSIntel fwd_streams[sm_id]->core_cycles); 1398af75078fSIntel #endif 1399af75078fSIntel } 1400af75078fSIntel total_recv = 0; 1401af75078fSIntel total_xmit = 0; 1402af75078fSIntel total_rx_dropped = 0; 1403af75078fSIntel total_tx_dropped = 0; 1404af75078fSIntel total_rx_nombuf = 0; 14057741e4cfSIntel for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) { 1406af75078fSIntel pt_id = fwd_ports_ids[i]; 1407af75078fSIntel 1408af75078fSIntel port = &ports[pt_id]; 1409af75078fSIntel rte_eth_stats_get(pt_id, &stats); 1410af75078fSIntel stats.ipackets -= port->stats.ipackets; 1411af75078fSIntel port->stats.ipackets = 0; 1412af75078fSIntel stats.opackets -= port->stats.opackets; 1413af75078fSIntel port->stats.opackets = 0; 1414af75078fSIntel stats.ibytes -= port->stats.ibytes; 1415af75078fSIntel port->stats.ibytes = 0; 1416af75078fSIntel stats.obytes -= port->stats.obytes; 1417af75078fSIntel port->stats.obytes = 0; 141870bdb186SIvan Boule stats.imissed -= port->stats.imissed; 141970bdb186SIvan Boule port->stats.imissed = 0; 1420af75078fSIntel stats.oerrors -= port->stats.oerrors; 1421af75078fSIntel port->stats.oerrors = 0; 1422af75078fSIntel stats.rx_nombuf -= port->stats.rx_nombuf; 1423af75078fSIntel port->stats.rx_nombuf = 0; 1424af75078fSIntel 1425af75078fSIntel total_recv += stats.ipackets; 1426af75078fSIntel total_xmit += stats.opackets; 142770bdb186SIvan Boule total_rx_dropped += stats.imissed; 1428af75078fSIntel total_tx_dropped += port->tx_dropped; 1429af75078fSIntel total_rx_nombuf += stats.rx_nombuf; 1430af75078fSIntel 1431af75078fSIntel fwd_port_stats_display(pt_id, &stats); 1432af75078fSIntel } 1433b7091f1dSJiayu Hu 1434af75078fSIntel printf("\n %s Accumulated forward statistics for all ports" 1435af75078fSIntel "%s\n", 1436af75078fSIntel acc_stats_border, acc_stats_border); 1437af75078fSIntel printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: " 1438af75078fSIntel "%-"PRIu64"\n" 1439af75078fSIntel " TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: " 1440af75078fSIntel "%-"PRIu64"\n", 1441af75078fSIntel total_recv, total_rx_dropped, total_recv + total_rx_dropped, 1442af75078fSIntel total_xmit, total_tx_dropped, total_xmit + total_tx_dropped); 1443af75078fSIntel if (total_rx_nombuf > 0) 1444af75078fSIntel printf(" RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf); 1445af75078fSIntel printf(" %s++++++++++++++++++++++++++++++++++++++++++++++" 1446af75078fSIntel "%s\n", 1447af75078fSIntel acc_stats_border, acc_stats_border); 1448af75078fSIntel #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES 1449af75078fSIntel if (total_recv > 0) 1450af75078fSIntel printf("\n CPU cycles/packet=%u (total cycles=" 1451af75078fSIntel "%"PRIu64" / total RX packets=%"PRIu64")\n", 1452af75078fSIntel (unsigned int)(fwd_cycles / total_recv), 1453af75078fSIntel fwd_cycles, total_recv); 1454af75078fSIntel #endif 1455af75078fSIntel printf("\nDone.\n"); 1456af75078fSIntel test_done = 1; 1457af75078fSIntel } 1458af75078fSIntel 1459cfae07fdSOuyang Changchun void 1460cfae07fdSOuyang Changchun dev_set_link_up(portid_t pid) 1461cfae07fdSOuyang Changchun { 1462492ab604SZhiyong Yang if (rte_eth_dev_set_link_up(pid) < 0) 1463cfae07fdSOuyang Changchun printf("\nSet link up fail.\n"); 1464cfae07fdSOuyang Changchun } 1465cfae07fdSOuyang Changchun 1466cfae07fdSOuyang Changchun void 1467cfae07fdSOuyang Changchun dev_set_link_down(portid_t pid) 1468cfae07fdSOuyang Changchun { 1469492ab604SZhiyong Yang if (rte_eth_dev_set_link_down(pid) < 0) 1470cfae07fdSOuyang Changchun printf("\nSet link down fail.\n"); 1471cfae07fdSOuyang Changchun } 1472cfae07fdSOuyang Changchun 1473ce8d5614SIntel static int 1474ce8d5614SIntel all_ports_started(void) 1475ce8d5614SIntel { 1476ce8d5614SIntel portid_t pi; 1477ce8d5614SIntel struct rte_port *port; 1478ce8d5614SIntel 14797d89b261SGaetan Rivet RTE_ETH_FOREACH_DEV(pi) { 1480ce8d5614SIntel port = &ports[pi]; 1481ce8d5614SIntel /* Check if there is a port which is not started */ 148241b05095SBernard Iremonger if ((port->port_status != RTE_PORT_STARTED) && 148341b05095SBernard Iremonger (port->slave_flag == 0)) 1484ce8d5614SIntel return 0; 1485ce8d5614SIntel } 1486ce8d5614SIntel 1487ce8d5614SIntel /* No port is not started */ 1488ce8d5614SIntel return 1; 1489ce8d5614SIntel } 1490ce8d5614SIntel 1491148f963fSBruce Richardson int 14926018eb8cSShahaf Shuler port_is_stopped(portid_t port_id) 14936018eb8cSShahaf Shuler { 14946018eb8cSShahaf Shuler struct rte_port *port = &ports[port_id]; 14956018eb8cSShahaf Shuler 14966018eb8cSShahaf Shuler if ((port->port_status != RTE_PORT_STOPPED) && 14976018eb8cSShahaf Shuler (port->slave_flag == 0)) 14986018eb8cSShahaf Shuler return 0; 14996018eb8cSShahaf Shuler return 1; 15006018eb8cSShahaf Shuler } 15016018eb8cSShahaf Shuler 15026018eb8cSShahaf Shuler int 1503edab33b1STetsuya Mukawa all_ports_stopped(void) 1504edab33b1STetsuya Mukawa { 1505edab33b1STetsuya Mukawa portid_t pi; 1506edab33b1STetsuya Mukawa 15077d89b261SGaetan Rivet RTE_ETH_FOREACH_DEV(pi) { 15086018eb8cSShahaf Shuler if (!port_is_stopped(pi)) 1509edab33b1STetsuya Mukawa return 0; 1510edab33b1STetsuya Mukawa } 1511edab33b1STetsuya Mukawa 1512edab33b1STetsuya Mukawa return 1; 1513edab33b1STetsuya Mukawa } 1514edab33b1STetsuya Mukawa 1515edab33b1STetsuya Mukawa int 1516edab33b1STetsuya Mukawa port_is_started(portid_t port_id) 1517edab33b1STetsuya Mukawa { 1518edab33b1STetsuya Mukawa if (port_id_is_invalid(port_id, ENABLED_WARN)) 1519edab33b1STetsuya Mukawa return 0; 1520edab33b1STetsuya Mukawa 1521edab33b1STetsuya Mukawa if (ports[port_id].port_status != RTE_PORT_STARTED) 1522edab33b1STetsuya Mukawa return 0; 1523edab33b1STetsuya Mukawa 1524edab33b1STetsuya Mukawa return 1; 1525edab33b1STetsuya Mukawa } 1526edab33b1STetsuya Mukawa 1527edab33b1STetsuya Mukawa static int 1528edab33b1STetsuya Mukawa port_is_closed(portid_t port_id) 1529edab33b1STetsuya Mukawa { 1530edab33b1STetsuya Mukawa if (port_id_is_invalid(port_id, ENABLED_WARN)) 1531edab33b1STetsuya Mukawa return 0; 1532edab33b1STetsuya Mukawa 1533edab33b1STetsuya Mukawa if (ports[port_id].port_status != RTE_PORT_CLOSED) 1534edab33b1STetsuya Mukawa return 0; 1535edab33b1STetsuya Mukawa 1536edab33b1STetsuya Mukawa return 1; 1537edab33b1STetsuya Mukawa } 1538edab33b1STetsuya Mukawa 1539edab33b1STetsuya Mukawa int 1540ce8d5614SIntel start_port(portid_t pid) 1541ce8d5614SIntel { 154292d2703eSMichael Qiu int diag, need_check_link_status = -1; 1543ce8d5614SIntel portid_t pi; 1544ce8d5614SIntel queueid_t qi; 1545ce8d5614SIntel struct rte_port *port; 15462950a769SDeclan Doherty struct ether_addr mac_addr; 154776ad4a2dSGaetan Rivet enum rte_eth_event_type event_type; 1548ce8d5614SIntel 15494468635fSMichael Qiu if (port_id_is_invalid(pid, ENABLED_WARN)) 15504468635fSMichael Qiu return 0; 15514468635fSMichael Qiu 1552ce8d5614SIntel if(dcb_config) 1553ce8d5614SIntel dcb_test = 1; 15547d89b261SGaetan Rivet RTE_ETH_FOREACH_DEV(pi) { 1555edab33b1STetsuya Mukawa if (pid != pi && pid != (portid_t)RTE_PORT_ALL) 1556ce8d5614SIntel continue; 1557ce8d5614SIntel 155892d2703eSMichael Qiu need_check_link_status = 0; 1559ce8d5614SIntel port = &ports[pi]; 1560ce8d5614SIntel if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED, 1561ce8d5614SIntel RTE_PORT_HANDLING) == 0) { 1562ce8d5614SIntel printf("Port %d is now not stopped\n", pi); 1563ce8d5614SIntel continue; 1564ce8d5614SIntel } 1565ce8d5614SIntel 1566ce8d5614SIntel if (port->need_reconfig > 0) { 1567ce8d5614SIntel port->need_reconfig = 0; 1568ce8d5614SIntel 15697ee3e944SVasily Philipov if (flow_isolate_all) { 15707ee3e944SVasily Philipov int ret = port_flow_isolate(pi, 1); 15717ee3e944SVasily Philipov if (ret) { 15727ee3e944SVasily Philipov printf("Failed to apply isolated" 15737ee3e944SVasily Philipov " mode on port %d\n", pi); 15747ee3e944SVasily Philipov return -1; 15757ee3e944SVasily Philipov } 15767ee3e944SVasily Philipov } 15777ee3e944SVasily Philipov 15785706de65SJulien Cretin printf("Configuring Port %d (socket %u)\n", pi, 157920a0286fSLiu Xiaofeng port->socket_id); 1580ce8d5614SIntel /* configure port */ 1581ce8d5614SIntel diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq, 1582ce8d5614SIntel &(port->dev_conf)); 1583ce8d5614SIntel if (diag != 0) { 1584ce8d5614SIntel if (rte_atomic16_cmpset(&(port->port_status), 1585ce8d5614SIntel RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0) 1586ce8d5614SIntel printf("Port %d can not be set back " 1587ce8d5614SIntel "to stopped\n", pi); 1588ce8d5614SIntel printf("Fail to configure port %d\n", pi); 1589ce8d5614SIntel /* try to reconfigure port next time */ 1590ce8d5614SIntel port->need_reconfig = 1; 1591148f963fSBruce Richardson return -1; 1592ce8d5614SIntel } 1593ce8d5614SIntel } 1594ce8d5614SIntel if (port->need_reconfig_queues > 0) { 1595ce8d5614SIntel port->need_reconfig_queues = 0; 1596597f9fafSShahaf Shuler port->tx_conf.txq_flags = ETH_TXQ_FLAGS_IGNORE; 1597597f9fafSShahaf Shuler /* Apply Tx offloads configuration */ 1598597f9fafSShahaf Shuler port->tx_conf.offloads = port->dev_conf.txmode.offloads; 1599ce8d5614SIntel /* setup tx queues */ 1600ce8d5614SIntel for (qi = 0; qi < nb_txq; qi++) { 1601b6ea6408SIntel if ((numa_support) && 1602b6ea6408SIntel (txring_numa[pi] != NUMA_NO_CONFIG)) 1603b6ea6408SIntel diag = rte_eth_tx_queue_setup(pi, qi, 1604b6ea6408SIntel nb_txd,txring_numa[pi], 1605b6ea6408SIntel &(port->tx_conf)); 1606b6ea6408SIntel else 1607b6ea6408SIntel diag = rte_eth_tx_queue_setup(pi, qi, 1608b6ea6408SIntel nb_txd,port->socket_id, 1609b6ea6408SIntel &(port->tx_conf)); 1610b6ea6408SIntel 1611ce8d5614SIntel if (diag == 0) 1612ce8d5614SIntel continue; 1613ce8d5614SIntel 1614ce8d5614SIntel /* Fail to setup tx queue, return */ 1615ce8d5614SIntel if (rte_atomic16_cmpset(&(port->port_status), 1616ce8d5614SIntel RTE_PORT_HANDLING, 1617ce8d5614SIntel RTE_PORT_STOPPED) == 0) 1618ce8d5614SIntel printf("Port %d can not be set back " 1619ce8d5614SIntel "to stopped\n", pi); 1620ce8d5614SIntel printf("Fail to configure port %d tx queues\n", pi); 1621ce8d5614SIntel /* try to reconfigure queues next time */ 1622ce8d5614SIntel port->need_reconfig_queues = 1; 1623148f963fSBruce Richardson return -1; 1624ce8d5614SIntel } 16250074d02fSShahaf Shuler /* Apply Rx offloads configuration */ 16260074d02fSShahaf Shuler port->rx_conf.offloads = port->dev_conf.rxmode.offloads; 1627ce8d5614SIntel /* setup rx queues */ 1628ce8d5614SIntel for (qi = 0; qi < nb_rxq; qi++) { 1629b6ea6408SIntel if ((numa_support) && 1630b6ea6408SIntel (rxring_numa[pi] != NUMA_NO_CONFIG)) { 1631b6ea6408SIntel struct rte_mempool * mp = 1632b6ea6408SIntel mbuf_pool_find(rxring_numa[pi]); 1633b6ea6408SIntel if (mp == NULL) { 1634b6ea6408SIntel printf("Failed to setup RX queue:" 1635b6ea6408SIntel "No mempool allocation" 1636b6ea6408SIntel " on the socket %d\n", 1637b6ea6408SIntel rxring_numa[pi]); 1638148f963fSBruce Richardson return -1; 1639b6ea6408SIntel } 1640b6ea6408SIntel 1641b6ea6408SIntel diag = rte_eth_rx_queue_setup(pi, qi, 1642b6ea6408SIntel nb_rxd,rxring_numa[pi], 1643b6ea6408SIntel &(port->rx_conf),mp); 16441e1d6bddSBernard Iremonger } else { 16451e1d6bddSBernard Iremonger struct rte_mempool *mp = 16461e1d6bddSBernard Iremonger mbuf_pool_find(port->socket_id); 16471e1d6bddSBernard Iremonger if (mp == NULL) { 16481e1d6bddSBernard Iremonger printf("Failed to setup RX queue:" 16491e1d6bddSBernard Iremonger "No mempool allocation" 16501e1d6bddSBernard Iremonger " on the socket %d\n", 16511e1d6bddSBernard Iremonger port->socket_id); 16521e1d6bddSBernard Iremonger return -1; 1653b6ea6408SIntel } 1654b6ea6408SIntel diag = rte_eth_rx_queue_setup(pi, qi, 1655b6ea6408SIntel nb_rxd,port->socket_id, 16561e1d6bddSBernard Iremonger &(port->rx_conf), mp); 16571e1d6bddSBernard Iremonger } 1658ce8d5614SIntel if (diag == 0) 1659ce8d5614SIntel continue; 1660ce8d5614SIntel 1661ce8d5614SIntel /* Fail to setup rx queue, return */ 1662ce8d5614SIntel if (rte_atomic16_cmpset(&(port->port_status), 1663ce8d5614SIntel RTE_PORT_HANDLING, 1664ce8d5614SIntel RTE_PORT_STOPPED) == 0) 1665ce8d5614SIntel printf("Port %d can not be set back " 1666ce8d5614SIntel "to stopped\n", pi); 1667ce8d5614SIntel printf("Fail to configure port %d rx queues\n", pi); 1668ce8d5614SIntel /* try to reconfigure queues next time */ 1669ce8d5614SIntel port->need_reconfig_queues = 1; 1670148f963fSBruce Richardson return -1; 1671ce8d5614SIntel } 1672ce8d5614SIntel } 167376ad4a2dSGaetan Rivet 1674ce8d5614SIntel /* start port */ 1675ce8d5614SIntel if (rte_eth_dev_start(pi) < 0) { 1676ce8d5614SIntel printf("Fail to start port %d\n", pi); 1677ce8d5614SIntel 1678ce8d5614SIntel /* Fail to setup rx queue, return */ 1679ce8d5614SIntel if (rte_atomic16_cmpset(&(port->port_status), 1680ce8d5614SIntel RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0) 1681ce8d5614SIntel printf("Port %d can not be set back to " 1682ce8d5614SIntel "stopped\n", pi); 1683ce8d5614SIntel continue; 1684ce8d5614SIntel } 1685ce8d5614SIntel 1686ce8d5614SIntel if (rte_atomic16_cmpset(&(port->port_status), 1687ce8d5614SIntel RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0) 1688ce8d5614SIntel printf("Port %d can not be set into started\n", pi); 1689ce8d5614SIntel 16902950a769SDeclan Doherty rte_eth_macaddr_get(pi, &mac_addr); 1691d8c89163SZijie Pan printf("Port %d: %02X:%02X:%02X:%02X:%02X:%02X\n", pi, 16922950a769SDeclan Doherty mac_addr.addr_bytes[0], mac_addr.addr_bytes[1], 16932950a769SDeclan Doherty mac_addr.addr_bytes[2], mac_addr.addr_bytes[3], 16942950a769SDeclan Doherty mac_addr.addr_bytes[4], mac_addr.addr_bytes[5]); 1695d8c89163SZijie Pan 1696ce8d5614SIntel /* at least one port started, need checking link status */ 1697ce8d5614SIntel need_check_link_status = 1; 1698ce8d5614SIntel } 1699ce8d5614SIntel 17004fb82244SMatan Azrad for (event_type = RTE_ETH_EVENT_UNKNOWN; 17014fb82244SMatan Azrad event_type < RTE_ETH_EVENT_MAX; 17024fb82244SMatan Azrad event_type++) { 17034fb82244SMatan Azrad diag = rte_eth_dev_callback_register(RTE_ETH_ALL, 17044fb82244SMatan Azrad event_type, 17054fb82244SMatan Azrad eth_event_callback, 17064fb82244SMatan Azrad NULL); 17074fb82244SMatan Azrad if (diag) { 17084fb82244SMatan Azrad printf("Failed to setup even callback for event %d\n", 17094fb82244SMatan Azrad event_type); 17104fb82244SMatan Azrad return -1; 17114fb82244SMatan Azrad } 17124fb82244SMatan Azrad } 17134fb82244SMatan Azrad 171492d2703eSMichael Qiu if (need_check_link_status == 1 && !no_link_check) 1715edab33b1STetsuya Mukawa check_all_ports_link_status(RTE_PORT_ALL); 171692d2703eSMichael Qiu else if (need_check_link_status == 0) 1717ce8d5614SIntel printf("Please stop the ports first\n"); 1718ce8d5614SIntel 1719ce8d5614SIntel printf("Done\n"); 1720148f963fSBruce Richardson return 0; 1721ce8d5614SIntel } 1722ce8d5614SIntel 1723ce8d5614SIntel void 1724ce8d5614SIntel stop_port(portid_t pid) 1725ce8d5614SIntel { 1726ce8d5614SIntel portid_t pi; 1727ce8d5614SIntel struct rte_port *port; 1728ce8d5614SIntel int need_check_link_status = 0; 1729ce8d5614SIntel 1730ce8d5614SIntel if (dcb_test) { 1731ce8d5614SIntel dcb_test = 0; 1732ce8d5614SIntel dcb_config = 0; 1733ce8d5614SIntel } 17344468635fSMichael Qiu 17354468635fSMichael Qiu if (port_id_is_invalid(pid, ENABLED_WARN)) 17364468635fSMichael Qiu return; 17374468635fSMichael Qiu 1738ce8d5614SIntel printf("Stopping ports...\n"); 1739ce8d5614SIntel 17407d89b261SGaetan Rivet RTE_ETH_FOREACH_DEV(pi) { 17414468635fSMichael Qiu if (pid != pi && pid != (portid_t)RTE_PORT_ALL) 1742ce8d5614SIntel continue; 1743ce8d5614SIntel 1744a8ef3e3aSBernard Iremonger if (port_is_forwarding(pi) != 0 && test_done == 0) { 1745a8ef3e3aSBernard Iremonger printf("Please remove port %d from forwarding configuration.\n", pi); 1746a8ef3e3aSBernard Iremonger continue; 1747a8ef3e3aSBernard Iremonger } 1748a8ef3e3aSBernard Iremonger 17490e545d30SBernard Iremonger if (port_is_bonding_slave(pi)) { 17500e545d30SBernard Iremonger printf("Please remove port %d from bonded device.\n", pi); 17510e545d30SBernard Iremonger continue; 17520e545d30SBernard Iremonger } 17530e545d30SBernard Iremonger 1754ce8d5614SIntel port = &ports[pi]; 1755ce8d5614SIntel if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED, 1756ce8d5614SIntel RTE_PORT_HANDLING) == 0) 1757ce8d5614SIntel continue; 1758ce8d5614SIntel 1759ce8d5614SIntel rte_eth_dev_stop(pi); 1760ce8d5614SIntel 1761ce8d5614SIntel if (rte_atomic16_cmpset(&(port->port_status), 1762ce8d5614SIntel RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0) 1763ce8d5614SIntel printf("Port %d can not be set into stopped\n", pi); 1764ce8d5614SIntel need_check_link_status = 1; 1765ce8d5614SIntel } 1766bc202406SDavid Marchand if (need_check_link_status && !no_link_check) 1767edab33b1STetsuya Mukawa check_all_ports_link_status(RTE_PORT_ALL); 1768ce8d5614SIntel 1769ce8d5614SIntel printf("Done\n"); 1770ce8d5614SIntel } 1771ce8d5614SIntel 1772ce8d5614SIntel void 1773ce8d5614SIntel close_port(portid_t pid) 1774ce8d5614SIntel { 1775ce8d5614SIntel portid_t pi; 1776ce8d5614SIntel struct rte_port *port; 1777ce8d5614SIntel 17784468635fSMichael Qiu if (port_id_is_invalid(pid, ENABLED_WARN)) 17794468635fSMichael Qiu return; 17804468635fSMichael Qiu 1781ce8d5614SIntel printf("Closing ports...\n"); 1782ce8d5614SIntel 17837d89b261SGaetan Rivet RTE_ETH_FOREACH_DEV(pi) { 17844468635fSMichael Qiu if (pid != pi && pid != (portid_t)RTE_PORT_ALL) 1785ce8d5614SIntel continue; 1786ce8d5614SIntel 1787a8ef3e3aSBernard Iremonger if (port_is_forwarding(pi) != 0 && test_done == 0) { 1788a8ef3e3aSBernard Iremonger printf("Please remove port %d from forwarding configuration.\n", pi); 1789a8ef3e3aSBernard Iremonger continue; 1790a8ef3e3aSBernard Iremonger } 1791a8ef3e3aSBernard Iremonger 17920e545d30SBernard Iremonger if (port_is_bonding_slave(pi)) { 17930e545d30SBernard Iremonger printf("Please remove port %d from bonded device.\n", pi); 17940e545d30SBernard Iremonger continue; 17950e545d30SBernard Iremonger } 17960e545d30SBernard Iremonger 1797ce8d5614SIntel port = &ports[pi]; 1798ce8d5614SIntel if (rte_atomic16_cmpset(&(port->port_status), 1799d4e8ad64SMichael Qiu RTE_PORT_CLOSED, RTE_PORT_CLOSED) == 1) { 1800d4e8ad64SMichael Qiu printf("Port %d is already closed\n", pi); 1801d4e8ad64SMichael Qiu continue; 1802d4e8ad64SMichael Qiu } 1803d4e8ad64SMichael Qiu 1804d4e8ad64SMichael Qiu if (rte_atomic16_cmpset(&(port->port_status), 1805ce8d5614SIntel RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) { 1806ce8d5614SIntel printf("Port %d is now not stopped\n", pi); 1807ce8d5614SIntel continue; 1808ce8d5614SIntel } 1809ce8d5614SIntel 1810938a184aSAdrien Mazarguil if (port->flow_list) 1811938a184aSAdrien Mazarguil port_flow_flush(pi); 1812ce8d5614SIntel rte_eth_dev_close(pi); 1813ce8d5614SIntel 1814ce8d5614SIntel if (rte_atomic16_cmpset(&(port->port_status), 1815ce8d5614SIntel RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0) 1816b38bb262SPablo de Lara printf("Port %d cannot be set to closed\n", pi); 1817ce8d5614SIntel } 1818ce8d5614SIntel 1819ce8d5614SIntel printf("Done\n"); 1820ce8d5614SIntel } 1821ce8d5614SIntel 1822edab33b1STetsuya Mukawa void 182397f1e196SWei Dai reset_port(portid_t pid) 182497f1e196SWei Dai { 182597f1e196SWei Dai int diag; 182697f1e196SWei Dai portid_t pi; 182797f1e196SWei Dai struct rte_port *port; 182897f1e196SWei Dai 182997f1e196SWei Dai if (port_id_is_invalid(pid, ENABLED_WARN)) 183097f1e196SWei Dai return; 183197f1e196SWei Dai 183297f1e196SWei Dai printf("Resetting ports...\n"); 183397f1e196SWei Dai 183497f1e196SWei Dai RTE_ETH_FOREACH_DEV(pi) { 183597f1e196SWei Dai if (pid != pi && pid != (portid_t)RTE_PORT_ALL) 183697f1e196SWei Dai continue; 183797f1e196SWei Dai 183897f1e196SWei Dai if (port_is_forwarding(pi) != 0 && test_done == 0) { 183997f1e196SWei Dai printf("Please remove port %d from forwarding " 184097f1e196SWei Dai "configuration.\n", pi); 184197f1e196SWei Dai continue; 184297f1e196SWei Dai } 184397f1e196SWei Dai 184497f1e196SWei Dai if (port_is_bonding_slave(pi)) { 184597f1e196SWei Dai printf("Please remove port %d from bonded device.\n", 184697f1e196SWei Dai pi); 184797f1e196SWei Dai continue; 184897f1e196SWei Dai } 184997f1e196SWei Dai 185097f1e196SWei Dai diag = rte_eth_dev_reset(pi); 185197f1e196SWei Dai if (diag == 0) { 185297f1e196SWei Dai port = &ports[pi]; 185397f1e196SWei Dai port->need_reconfig = 1; 185497f1e196SWei Dai port->need_reconfig_queues = 1; 185597f1e196SWei Dai } else { 185697f1e196SWei Dai printf("Failed to reset port %d. diag=%d\n", pi, diag); 185797f1e196SWei Dai } 185897f1e196SWei Dai } 185997f1e196SWei Dai 186097f1e196SWei Dai printf("Done\n"); 186197f1e196SWei Dai } 186297f1e196SWei Dai 1863fb73e096SJeff Guo static int 1864fb73e096SJeff Guo eth_dev_event_callback_register(void) 1865fb73e096SJeff Guo { 1866fb73e096SJeff Guo int ret; 1867fb73e096SJeff Guo 1868fb73e096SJeff Guo /* register the device event callback */ 1869fb73e096SJeff Guo ret = rte_dev_event_callback_register(NULL, 1870fb73e096SJeff Guo eth_dev_event_callback, NULL); 1871fb73e096SJeff Guo if (ret) { 1872fb73e096SJeff Guo printf("Failed to register device event callback\n"); 1873fb73e096SJeff Guo return -1; 1874fb73e096SJeff Guo } 1875fb73e096SJeff Guo 1876fb73e096SJeff Guo return 0; 1877fb73e096SJeff Guo } 1878fb73e096SJeff Guo 1879fb73e096SJeff Guo 1880fb73e096SJeff Guo static int 1881fb73e096SJeff Guo eth_dev_event_callback_unregister(void) 1882fb73e096SJeff Guo { 1883fb73e096SJeff Guo int ret; 1884fb73e096SJeff Guo 1885fb73e096SJeff Guo /* unregister the device event callback */ 1886fb73e096SJeff Guo ret = rte_dev_event_callback_unregister(NULL, 1887fb73e096SJeff Guo eth_dev_event_callback, NULL); 1888fb73e096SJeff Guo if (ret < 0) { 1889fb73e096SJeff Guo printf("Failed to unregister device event callback\n"); 1890fb73e096SJeff Guo return -1; 1891fb73e096SJeff Guo } 1892fb73e096SJeff Guo 1893fb73e096SJeff Guo return 0; 1894fb73e096SJeff Guo } 1895fb73e096SJeff Guo 189697f1e196SWei Dai void 1897edab33b1STetsuya Mukawa attach_port(char *identifier) 1898ce8d5614SIntel { 1899ebf5e9b7SBernard Iremonger portid_t pi = 0; 1900931126baSBernard Iremonger unsigned int socket_id; 1901ce8d5614SIntel 1902edab33b1STetsuya Mukawa printf("Attaching a new port...\n"); 1903edab33b1STetsuya Mukawa 1904edab33b1STetsuya Mukawa if (identifier == NULL) { 1905edab33b1STetsuya Mukawa printf("Invalid parameters are specified\n"); 1906edab33b1STetsuya Mukawa return; 1907ce8d5614SIntel } 1908ce8d5614SIntel 1909edab33b1STetsuya Mukawa if (rte_eth_dev_attach(identifier, &pi)) 1910edab33b1STetsuya Mukawa return; 1911edab33b1STetsuya Mukawa 1912931126baSBernard Iremonger socket_id = (unsigned)rte_eth_dev_socket_id(pi); 1913931126baSBernard Iremonger /* if socket_id is invalid, set to 0 */ 1914931126baSBernard Iremonger if (check_socket_id(socket_id) < 0) 1915931126baSBernard Iremonger socket_id = 0; 1916931126baSBernard Iremonger reconfig(pi, socket_id); 1917edab33b1STetsuya Mukawa rte_eth_promiscuous_enable(pi); 1918edab33b1STetsuya Mukawa 1919edab33b1STetsuya Mukawa nb_ports = rte_eth_dev_count(); 1920edab33b1STetsuya Mukawa 1921edab33b1STetsuya Mukawa ports[pi].port_status = RTE_PORT_STOPPED; 1922edab33b1STetsuya Mukawa 1923edab33b1STetsuya Mukawa printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports); 1924edab33b1STetsuya Mukawa printf("Done\n"); 1925edab33b1STetsuya Mukawa } 1926edab33b1STetsuya Mukawa 1927edab33b1STetsuya Mukawa void 192828caa76aSZhiyong Yang detach_port(portid_t port_id) 19295f4ec54fSChen Jing D(Mark) { 1930edab33b1STetsuya Mukawa char name[RTE_ETH_NAME_MAX_LEN]; 19315f4ec54fSChen Jing D(Mark) 1932edab33b1STetsuya Mukawa printf("Detaching a port...\n"); 19335f4ec54fSChen Jing D(Mark) 1934edab33b1STetsuya Mukawa if (!port_is_closed(port_id)) { 1935edab33b1STetsuya Mukawa printf("Please close port first\n"); 1936edab33b1STetsuya Mukawa return; 1937edab33b1STetsuya Mukawa } 1938edab33b1STetsuya Mukawa 1939938a184aSAdrien Mazarguil if (ports[port_id].flow_list) 1940938a184aSAdrien Mazarguil port_flow_flush(port_id); 1941938a184aSAdrien Mazarguil 19423070419eSGaetan Rivet if (rte_eth_dev_detach(port_id, name)) { 1943285fd101SOlivier Matz TESTPMD_LOG(ERR, "Failed to detach port '%s'\n", name); 1944edab33b1STetsuya Mukawa return; 19453070419eSGaetan Rivet } 1946edab33b1STetsuya Mukawa 1947edab33b1STetsuya Mukawa nb_ports = rte_eth_dev_count(); 1948edab33b1STetsuya Mukawa 1949edab33b1STetsuya Mukawa printf("Port '%s' is detached. Now total ports is %d\n", 1950edab33b1STetsuya Mukawa name, nb_ports); 1951edab33b1STetsuya Mukawa printf("Done\n"); 1952edab33b1STetsuya Mukawa return; 19535f4ec54fSChen Jing D(Mark) } 19545f4ec54fSChen Jing D(Mark) 1955af75078fSIntel void 1956af75078fSIntel pmd_test_exit(void) 1957af75078fSIntel { 1958af75078fSIntel portid_t pt_id; 1959fb73e096SJeff Guo int ret; 1960af75078fSIntel 19618210ec25SPablo de Lara if (test_done == 0) 19628210ec25SPablo de Lara stop_packet_forwarding(); 19638210ec25SPablo de Lara 1964d3a274ceSZhihong Wang if (ports != NULL) { 1965d3a274ceSZhihong Wang no_link_check = 1; 19667d89b261SGaetan Rivet RTE_ETH_FOREACH_DEV(pt_id) { 1967d3a274ceSZhihong Wang printf("\nShutting down port %d...\n", pt_id); 1968af75078fSIntel fflush(stdout); 1969d3a274ceSZhihong Wang stop_port(pt_id); 1970d3a274ceSZhihong Wang close_port(pt_id); 1971af75078fSIntel } 1972d3a274ceSZhihong Wang } 1973fb73e096SJeff Guo 1974fb73e096SJeff Guo if (hot_plug) { 1975fb73e096SJeff Guo ret = rte_dev_event_monitor_stop(); 1976fb73e096SJeff Guo if (ret) 1977fb73e096SJeff Guo RTE_LOG(ERR, EAL, 1978fb73e096SJeff Guo "fail to stop device event monitor."); 1979fb73e096SJeff Guo 1980fb73e096SJeff Guo ret = eth_dev_event_callback_unregister(); 1981fb73e096SJeff Guo if (ret) 1982fb73e096SJeff Guo RTE_LOG(ERR, EAL, 1983fb73e096SJeff Guo "fail to unregister all event callbacks."); 1984fb73e096SJeff Guo } 1985fb73e096SJeff Guo 1986d3a274ceSZhihong Wang printf("\nBye...\n"); 1987af75078fSIntel } 1988af75078fSIntel 1989af75078fSIntel typedef void (*cmd_func_t)(void); 1990af75078fSIntel struct pmd_test_command { 1991af75078fSIntel const char *cmd_name; 1992af75078fSIntel cmd_func_t cmd_func; 1993af75078fSIntel }; 1994af75078fSIntel 1995af75078fSIntel #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0])) 1996af75078fSIntel 1997ce8d5614SIntel /* Check the link status of all ports in up to 9s, and print them finally */ 1998af75078fSIntel static void 1999edab33b1STetsuya Mukawa check_all_ports_link_status(uint32_t port_mask) 2000af75078fSIntel { 2001ce8d5614SIntel #define CHECK_INTERVAL 100 /* 100ms */ 2002ce8d5614SIntel #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */ 2003f8244c63SZhiyong Yang portid_t portid; 2004f8244c63SZhiyong Yang uint8_t count, all_ports_up, print_flag = 0; 2005ce8d5614SIntel struct rte_eth_link link; 2006ce8d5614SIntel 2007ce8d5614SIntel printf("Checking link statuses...\n"); 2008ce8d5614SIntel fflush(stdout); 2009ce8d5614SIntel for (count = 0; count <= MAX_CHECK_TIME; count++) { 2010ce8d5614SIntel all_ports_up = 1; 20117d89b261SGaetan Rivet RTE_ETH_FOREACH_DEV(portid) { 2012ce8d5614SIntel if ((port_mask & (1 << portid)) == 0) 2013ce8d5614SIntel continue; 2014ce8d5614SIntel memset(&link, 0, sizeof(link)); 2015ce8d5614SIntel rte_eth_link_get_nowait(portid, &link); 2016ce8d5614SIntel /* print link status if flag set */ 2017ce8d5614SIntel if (print_flag == 1) { 2018ce8d5614SIntel if (link.link_status) 2019f8244c63SZhiyong Yang printf( 2020f8244c63SZhiyong Yang "Port%d Link Up. speed %u Mbps- %s\n", 2021f8244c63SZhiyong Yang portid, link.link_speed, 2022ce8d5614SIntel (link.link_duplex == ETH_LINK_FULL_DUPLEX) ? 2023ce8d5614SIntel ("full-duplex") : ("half-duplex\n")); 2024ce8d5614SIntel else 2025f8244c63SZhiyong Yang printf("Port %d Link Down\n", portid); 2026ce8d5614SIntel continue; 2027ce8d5614SIntel } 2028ce8d5614SIntel /* clear all_ports_up flag if any link down */ 202909419f23SThomas Monjalon if (link.link_status == ETH_LINK_DOWN) { 2030ce8d5614SIntel all_ports_up = 0; 2031ce8d5614SIntel break; 2032ce8d5614SIntel } 2033ce8d5614SIntel } 2034ce8d5614SIntel /* after finally printing all link status, get out */ 2035ce8d5614SIntel if (print_flag == 1) 2036ce8d5614SIntel break; 2037ce8d5614SIntel 2038ce8d5614SIntel if (all_ports_up == 0) { 2039ce8d5614SIntel fflush(stdout); 2040ce8d5614SIntel rte_delay_ms(CHECK_INTERVAL); 2041ce8d5614SIntel } 2042ce8d5614SIntel 2043ce8d5614SIntel /* set the print_flag if all ports up or timeout */ 2044ce8d5614SIntel if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) { 2045ce8d5614SIntel print_flag = 1; 2046ce8d5614SIntel } 20478ea656f8SGaetan Rivet 20488ea656f8SGaetan Rivet if (lsc_interrupt) 20498ea656f8SGaetan Rivet break; 2050ce8d5614SIntel } 2051af75078fSIntel } 2052af75078fSIntel 2053284c908cSGaetan Rivet static void 2054284c908cSGaetan Rivet rmv_event_callback(void *arg) 2055284c908cSGaetan Rivet { 2056284c908cSGaetan Rivet struct rte_eth_dev *dev; 205728caa76aSZhiyong Yang portid_t port_id = (intptr_t)arg; 2058284c908cSGaetan Rivet 2059284c908cSGaetan Rivet RTE_ETH_VALID_PORTID_OR_RET(port_id); 2060284c908cSGaetan Rivet dev = &rte_eth_devices[port_id]; 2061284c908cSGaetan Rivet 2062284c908cSGaetan Rivet stop_port(port_id); 2063284c908cSGaetan Rivet close_port(port_id); 2064f3a1188cSGaetan Rivet printf("removing device %s\n", dev->device->name); 20653070419eSGaetan Rivet if (rte_eal_dev_detach(dev->device)) 2066285fd101SOlivier Matz TESTPMD_LOG(ERR, "Failed to detach device %s\n", 20673070419eSGaetan Rivet dev->device->name); 2068284c908cSGaetan Rivet } 2069284c908cSGaetan Rivet 207076ad4a2dSGaetan Rivet /* This function is used by the interrupt thread */ 2071d6af1a13SBernard Iremonger static int 2072f8244c63SZhiyong Yang eth_event_callback(portid_t port_id, enum rte_eth_event_type type, void *param, 2073d6af1a13SBernard Iremonger void *ret_param) 207476ad4a2dSGaetan Rivet { 207576ad4a2dSGaetan Rivet static const char * const event_desc[] = { 207676ad4a2dSGaetan Rivet [RTE_ETH_EVENT_UNKNOWN] = "Unknown", 207776ad4a2dSGaetan Rivet [RTE_ETH_EVENT_INTR_LSC] = "LSC", 207876ad4a2dSGaetan Rivet [RTE_ETH_EVENT_QUEUE_STATE] = "Queue state", 207976ad4a2dSGaetan Rivet [RTE_ETH_EVENT_INTR_RESET] = "Interrupt reset", 208076ad4a2dSGaetan Rivet [RTE_ETH_EVENT_VF_MBOX] = "VF Mbox", 208176ad4a2dSGaetan Rivet [RTE_ETH_EVENT_MACSEC] = "MACsec", 208276ad4a2dSGaetan Rivet [RTE_ETH_EVENT_INTR_RMV] = "device removal", 20834fb82244SMatan Azrad [RTE_ETH_EVENT_NEW] = "device probed", 20844fb82244SMatan Azrad [RTE_ETH_EVENT_DESTROY] = "device released", 208576ad4a2dSGaetan Rivet [RTE_ETH_EVENT_MAX] = NULL, 208676ad4a2dSGaetan Rivet }; 208776ad4a2dSGaetan Rivet 208876ad4a2dSGaetan Rivet RTE_SET_USED(param); 2089d6af1a13SBernard Iremonger RTE_SET_USED(ret_param); 209076ad4a2dSGaetan Rivet 209176ad4a2dSGaetan Rivet if (type >= RTE_ETH_EVENT_MAX) { 209276ad4a2dSGaetan Rivet fprintf(stderr, "\nPort %" PRIu8 ": %s called upon invalid event %d\n", 209376ad4a2dSGaetan Rivet port_id, __func__, type); 209476ad4a2dSGaetan Rivet fflush(stderr); 20953af72783SGaetan Rivet } else if (event_print_mask & (UINT32_C(1) << type)) { 209676ad4a2dSGaetan Rivet printf("\nPort %" PRIu8 ": %s event\n", port_id, 209776ad4a2dSGaetan Rivet event_desc[type]); 209876ad4a2dSGaetan Rivet fflush(stdout); 209976ad4a2dSGaetan Rivet } 2100284c908cSGaetan Rivet 21010e45c64dSMatan Azrad if (port_id_is_invalid(port_id, DISABLED_WARN)) 21020e45c64dSMatan Azrad return 0; 21030e45c64dSMatan Azrad 2104284c908cSGaetan Rivet switch (type) { 2105284c908cSGaetan Rivet case RTE_ETH_EVENT_INTR_RMV: 2106284c908cSGaetan Rivet if (rte_eal_alarm_set(100000, 2107284c908cSGaetan Rivet rmv_event_callback, (void *)(intptr_t)port_id)) 2108284c908cSGaetan Rivet fprintf(stderr, "Could not set up deferred device removal\n"); 2109284c908cSGaetan Rivet break; 2110284c908cSGaetan Rivet default: 2111284c908cSGaetan Rivet break; 2112284c908cSGaetan Rivet } 2113d6af1a13SBernard Iremonger return 0; 211476ad4a2dSGaetan Rivet } 211576ad4a2dSGaetan Rivet 2116fb73e096SJeff Guo /* This function is used by the interrupt thread */ 2117fb73e096SJeff Guo static void 2118fb73e096SJeff Guo eth_dev_event_callback(char *device_name, enum rte_dev_event_type type, 2119fb73e096SJeff Guo __rte_unused void *arg) 2120fb73e096SJeff Guo { 2121fb73e096SJeff Guo if (type >= RTE_DEV_EVENT_MAX) { 2122fb73e096SJeff Guo fprintf(stderr, "%s called upon invalid event %d\n", 2123fb73e096SJeff Guo __func__, type); 2124fb73e096SJeff Guo fflush(stderr); 2125fb73e096SJeff Guo } 2126fb73e096SJeff Guo 2127fb73e096SJeff Guo switch (type) { 2128fb73e096SJeff Guo case RTE_DEV_EVENT_REMOVE: 2129fb73e096SJeff Guo RTE_LOG(ERR, EAL, "The device: %s has been removed!\n", 2130fb73e096SJeff Guo device_name); 2131fb73e096SJeff Guo /* TODO: After finish failure handle, begin to stop 2132fb73e096SJeff Guo * packet forward, stop port, close port, detach port. 2133fb73e096SJeff Guo */ 2134fb73e096SJeff Guo break; 2135fb73e096SJeff Guo case RTE_DEV_EVENT_ADD: 2136fb73e096SJeff Guo RTE_LOG(ERR, EAL, "The device: %s has been added!\n", 2137fb73e096SJeff Guo device_name); 2138fb73e096SJeff Guo /* TODO: After finish kernel driver binding, 2139fb73e096SJeff Guo * begin to attach port. 2140fb73e096SJeff Guo */ 2141fb73e096SJeff Guo break; 2142fb73e096SJeff Guo default: 2143fb73e096SJeff Guo break; 2144fb73e096SJeff Guo } 2145fb73e096SJeff Guo } 2146fb73e096SJeff Guo 2147013af9b6SIntel static int 214828caa76aSZhiyong Yang set_tx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port) 2149af75078fSIntel { 2150013af9b6SIntel uint16_t i; 2151af75078fSIntel int diag; 2152013af9b6SIntel uint8_t mapping_found = 0; 2153af75078fSIntel 2154013af9b6SIntel for (i = 0; i < nb_tx_queue_stats_mappings; i++) { 2155013af9b6SIntel if ((tx_queue_stats_mappings[i].port_id == port_id) && 2156013af9b6SIntel (tx_queue_stats_mappings[i].queue_id < nb_txq )) { 2157013af9b6SIntel diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id, 2158013af9b6SIntel tx_queue_stats_mappings[i].queue_id, 2159013af9b6SIntel tx_queue_stats_mappings[i].stats_counter_id); 2160013af9b6SIntel if (diag != 0) 2161013af9b6SIntel return diag; 2162013af9b6SIntel mapping_found = 1; 2163af75078fSIntel } 2164013af9b6SIntel } 2165013af9b6SIntel if (mapping_found) 2166013af9b6SIntel port->tx_queue_stats_mapping_enabled = 1; 2167013af9b6SIntel return 0; 2168013af9b6SIntel } 2169013af9b6SIntel 2170013af9b6SIntel static int 217128caa76aSZhiyong Yang set_rx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port) 2172013af9b6SIntel { 2173013af9b6SIntel uint16_t i; 2174013af9b6SIntel int diag; 2175013af9b6SIntel uint8_t mapping_found = 0; 2176013af9b6SIntel 2177013af9b6SIntel for (i = 0; i < nb_rx_queue_stats_mappings; i++) { 2178013af9b6SIntel if ((rx_queue_stats_mappings[i].port_id == port_id) && 2179013af9b6SIntel (rx_queue_stats_mappings[i].queue_id < nb_rxq )) { 2180013af9b6SIntel diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id, 2181013af9b6SIntel rx_queue_stats_mappings[i].queue_id, 2182013af9b6SIntel rx_queue_stats_mappings[i].stats_counter_id); 2183013af9b6SIntel if (diag != 0) 2184013af9b6SIntel return diag; 2185013af9b6SIntel mapping_found = 1; 2186013af9b6SIntel } 2187013af9b6SIntel } 2188013af9b6SIntel if (mapping_found) 2189013af9b6SIntel port->rx_queue_stats_mapping_enabled = 1; 2190013af9b6SIntel return 0; 2191013af9b6SIntel } 2192013af9b6SIntel 2193013af9b6SIntel static void 219428caa76aSZhiyong Yang map_port_queue_stats_mapping_registers(portid_t pi, struct rte_port *port) 2195013af9b6SIntel { 2196013af9b6SIntel int diag = 0; 2197013af9b6SIntel 2198013af9b6SIntel diag = set_tx_queue_stats_mapping_registers(pi, port); 2199af75078fSIntel if (diag != 0) { 2200013af9b6SIntel if (diag == -ENOTSUP) { 2201013af9b6SIntel port->tx_queue_stats_mapping_enabled = 0; 2202013af9b6SIntel printf("TX queue stats mapping not supported port id=%d\n", pi); 2203013af9b6SIntel } 2204013af9b6SIntel else 2205013af9b6SIntel rte_exit(EXIT_FAILURE, 2206013af9b6SIntel "set_tx_queue_stats_mapping_registers " 2207013af9b6SIntel "failed for port id=%d diag=%d\n", 2208af75078fSIntel pi, diag); 2209af75078fSIntel } 2210013af9b6SIntel 2211013af9b6SIntel diag = set_rx_queue_stats_mapping_registers(pi, port); 2212af75078fSIntel if (diag != 0) { 2213013af9b6SIntel if (diag == -ENOTSUP) { 2214013af9b6SIntel port->rx_queue_stats_mapping_enabled = 0; 2215013af9b6SIntel printf("RX queue stats mapping not supported port id=%d\n", pi); 2216013af9b6SIntel } 2217013af9b6SIntel else 2218013af9b6SIntel rte_exit(EXIT_FAILURE, 2219013af9b6SIntel "set_rx_queue_stats_mapping_registers " 2220013af9b6SIntel "failed for port id=%d diag=%d\n", 2221af75078fSIntel pi, diag); 2222af75078fSIntel } 2223af75078fSIntel } 2224af75078fSIntel 2225f2c5125aSPablo de Lara static void 2226f2c5125aSPablo de Lara rxtx_port_config(struct rte_port *port) 2227f2c5125aSPablo de Lara { 2228f2c5125aSPablo de Lara port->rx_conf = port->dev_info.default_rxconf; 2229f2c5125aSPablo de Lara port->tx_conf = port->dev_info.default_txconf; 2230f2c5125aSPablo de Lara 2231f2c5125aSPablo de Lara /* Check if any RX/TX parameters have been passed */ 2232f2c5125aSPablo de Lara if (rx_pthresh != RTE_PMD_PARAM_UNSET) 2233f2c5125aSPablo de Lara port->rx_conf.rx_thresh.pthresh = rx_pthresh; 2234f2c5125aSPablo de Lara 2235f2c5125aSPablo de Lara if (rx_hthresh != RTE_PMD_PARAM_UNSET) 2236f2c5125aSPablo de Lara port->rx_conf.rx_thresh.hthresh = rx_hthresh; 2237f2c5125aSPablo de Lara 2238f2c5125aSPablo de Lara if (rx_wthresh != RTE_PMD_PARAM_UNSET) 2239f2c5125aSPablo de Lara port->rx_conf.rx_thresh.wthresh = rx_wthresh; 2240f2c5125aSPablo de Lara 2241f2c5125aSPablo de Lara if (rx_free_thresh != RTE_PMD_PARAM_UNSET) 2242f2c5125aSPablo de Lara port->rx_conf.rx_free_thresh = rx_free_thresh; 2243f2c5125aSPablo de Lara 2244f2c5125aSPablo de Lara if (rx_drop_en != RTE_PMD_PARAM_UNSET) 2245f2c5125aSPablo de Lara port->rx_conf.rx_drop_en = rx_drop_en; 2246f2c5125aSPablo de Lara 2247f2c5125aSPablo de Lara if (tx_pthresh != RTE_PMD_PARAM_UNSET) 2248f2c5125aSPablo de Lara port->tx_conf.tx_thresh.pthresh = tx_pthresh; 2249f2c5125aSPablo de Lara 2250f2c5125aSPablo de Lara if (tx_hthresh != RTE_PMD_PARAM_UNSET) 2251f2c5125aSPablo de Lara port->tx_conf.tx_thresh.hthresh = tx_hthresh; 2252f2c5125aSPablo de Lara 2253f2c5125aSPablo de Lara if (tx_wthresh != RTE_PMD_PARAM_UNSET) 2254f2c5125aSPablo de Lara port->tx_conf.tx_thresh.wthresh = tx_wthresh; 2255f2c5125aSPablo de Lara 2256f2c5125aSPablo de Lara if (tx_rs_thresh != RTE_PMD_PARAM_UNSET) 2257f2c5125aSPablo de Lara port->tx_conf.tx_rs_thresh = tx_rs_thresh; 2258f2c5125aSPablo de Lara 2259f2c5125aSPablo de Lara if (tx_free_thresh != RTE_PMD_PARAM_UNSET) 2260f2c5125aSPablo de Lara port->tx_conf.tx_free_thresh = tx_free_thresh; 2261f2c5125aSPablo de Lara } 2262f2c5125aSPablo de Lara 2263013af9b6SIntel void 2264013af9b6SIntel init_port_config(void) 2265013af9b6SIntel { 2266013af9b6SIntel portid_t pid; 2267013af9b6SIntel struct rte_port *port; 2268013af9b6SIntel 22697d89b261SGaetan Rivet RTE_ETH_FOREACH_DEV(pid) { 2270013af9b6SIntel port = &ports[pid]; 2271013af9b6SIntel port->dev_conf.fdir_conf = fdir_conf; 22723ce690d3SBruce Richardson if (nb_rxq > 1) { 2273013af9b6SIntel port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL; 2274013af9b6SIntel port->dev_conf.rx_adv_conf.rss_conf.rss_hf = rss_hf; 2275af75078fSIntel } else { 2276013af9b6SIntel port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL; 2277013af9b6SIntel port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0; 2278af75078fSIntel } 22793ce690d3SBruce Richardson 22805f592039SJingjing Wu if (port->dcb_flag == 0) { 22813ce690d3SBruce Richardson if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0) 22823ce690d3SBruce Richardson port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_RSS; 22833ce690d3SBruce Richardson else 22843ce690d3SBruce Richardson port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE; 22853ce690d3SBruce Richardson } 22863ce690d3SBruce Richardson 2287f2c5125aSPablo de Lara rxtx_port_config(port); 2288013af9b6SIntel 2289013af9b6SIntel rte_eth_macaddr_get(pid, &port->eth_addr); 2290013af9b6SIntel 2291013af9b6SIntel map_port_queue_stats_mapping_registers(pid, port); 229250c4440eSThomas Monjalon #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS 2293e261265eSRadu Nicolau rte_pmd_ixgbe_bypass_init(pid); 22947b7e5ba7SIntel #endif 22958ea656f8SGaetan Rivet 22968ea656f8SGaetan Rivet if (lsc_interrupt && 22978ea656f8SGaetan Rivet (rte_eth_devices[pid].data->dev_flags & 22988ea656f8SGaetan Rivet RTE_ETH_DEV_INTR_LSC)) 22998ea656f8SGaetan Rivet port->dev_conf.intr_conf.lsc = 1; 2300284c908cSGaetan Rivet if (rmv_interrupt && 2301284c908cSGaetan Rivet (rte_eth_devices[pid].data->dev_flags & 2302284c908cSGaetan Rivet RTE_ETH_DEV_INTR_RMV)) 2303284c908cSGaetan Rivet port->dev_conf.intr_conf.rmv = 1; 23045b590fbeSJasvinder Singh 23055b590fbeSJasvinder Singh #if defined RTE_LIBRTE_PMD_SOFTNIC && defined RTE_LIBRTE_SCHED 23065b590fbeSJasvinder Singh /* Detect softnic port */ 23075b590fbeSJasvinder Singh if (!strcmp(port->dev_info.driver_name, "net_softnic")) { 23085b590fbeSJasvinder Singh port->softnic_enable = 1; 23095b590fbeSJasvinder Singh memset(&port->softport, 0, sizeof(struct softnic_port)); 23105b590fbeSJasvinder Singh 23115b590fbeSJasvinder Singh if (!strcmp(cur_fwd_eng->fwd_mode_name, "tm")) 23125b590fbeSJasvinder Singh port->softport.tm_flag = 1; 23135b590fbeSJasvinder Singh } 23145b590fbeSJasvinder Singh #endif 2315013af9b6SIntel } 2316013af9b6SIntel } 2317013af9b6SIntel 231841b05095SBernard Iremonger void set_port_slave_flag(portid_t slave_pid) 231941b05095SBernard Iremonger { 232041b05095SBernard Iremonger struct rte_port *port; 232141b05095SBernard Iremonger 232241b05095SBernard Iremonger port = &ports[slave_pid]; 232341b05095SBernard Iremonger port->slave_flag = 1; 232441b05095SBernard Iremonger } 232541b05095SBernard Iremonger 232641b05095SBernard Iremonger void clear_port_slave_flag(portid_t slave_pid) 232741b05095SBernard Iremonger { 232841b05095SBernard Iremonger struct rte_port *port; 232941b05095SBernard Iremonger 233041b05095SBernard Iremonger port = &ports[slave_pid]; 233141b05095SBernard Iremonger port->slave_flag = 0; 233241b05095SBernard Iremonger } 233341b05095SBernard Iremonger 23340e545d30SBernard Iremonger uint8_t port_is_bonding_slave(portid_t slave_pid) 23350e545d30SBernard Iremonger { 23360e545d30SBernard Iremonger struct rte_port *port; 23370e545d30SBernard Iremonger 23380e545d30SBernard Iremonger port = &ports[slave_pid]; 23390e545d30SBernard Iremonger return port->slave_flag; 23400e545d30SBernard Iremonger } 23410e545d30SBernard Iremonger 2342013af9b6SIntel const uint16_t vlan_tags[] = { 2343013af9b6SIntel 0, 1, 2, 3, 4, 5, 6, 7, 2344013af9b6SIntel 8, 9, 10, 11, 12, 13, 14, 15, 2345013af9b6SIntel 16, 17, 18, 19, 20, 21, 22, 23, 2346013af9b6SIntel 24, 25, 26, 27, 28, 29, 30, 31 2347013af9b6SIntel }; 2348013af9b6SIntel 2349013af9b6SIntel static int 23501a572499SJingjing Wu get_eth_dcb_conf(struct rte_eth_conf *eth_conf, 23511a572499SJingjing Wu enum dcb_mode_enable dcb_mode, 23521a572499SJingjing Wu enum rte_eth_nb_tcs num_tcs, 23531a572499SJingjing Wu uint8_t pfc_en) 2354013af9b6SIntel { 2355013af9b6SIntel uint8_t i; 2356af75078fSIntel 2357af75078fSIntel /* 2358013af9b6SIntel * Builds up the correct configuration for dcb+vt based on the vlan tags array 2359013af9b6SIntel * given above, and the number of traffic classes available for use. 2360af75078fSIntel */ 23611a572499SJingjing Wu if (dcb_mode == DCB_VT_ENABLED) { 23621a572499SJingjing Wu struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf = 23631a572499SJingjing Wu ð_conf->rx_adv_conf.vmdq_dcb_conf; 23641a572499SJingjing Wu struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf = 23651a572499SJingjing Wu ð_conf->tx_adv_conf.vmdq_dcb_tx_conf; 2366013af9b6SIntel 2367547d946cSNirmoy Das /* VMDQ+DCB RX and TX configurations */ 23681a572499SJingjing Wu vmdq_rx_conf->enable_default_pool = 0; 23691a572499SJingjing Wu vmdq_rx_conf->default_pool = 0; 23701a572499SJingjing Wu vmdq_rx_conf->nb_queue_pools = 23711a572499SJingjing Wu (num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS); 23721a572499SJingjing Wu vmdq_tx_conf->nb_queue_pools = 23731a572499SJingjing Wu (num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS); 2374013af9b6SIntel 23751a572499SJingjing Wu vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools; 23761a572499SJingjing Wu for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) { 23771a572499SJingjing Wu vmdq_rx_conf->pool_map[i].vlan_id = vlan_tags[i]; 23781a572499SJingjing Wu vmdq_rx_conf->pool_map[i].pools = 23791a572499SJingjing Wu 1 << (i % vmdq_rx_conf->nb_queue_pools); 2380af75078fSIntel } 2381013af9b6SIntel for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) { 2382f59908feSWei Dai vmdq_rx_conf->dcb_tc[i] = i % num_tcs; 2383f59908feSWei Dai vmdq_tx_conf->dcb_tc[i] = i % num_tcs; 2384013af9b6SIntel } 2385013af9b6SIntel 2386013af9b6SIntel /* set DCB mode of RX and TX of multiple queues */ 238732e7aa0bSIntel eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB; 238832e7aa0bSIntel eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB; 23891a572499SJingjing Wu } else { 23901a572499SJingjing Wu struct rte_eth_dcb_rx_conf *rx_conf = 23911a572499SJingjing Wu ð_conf->rx_adv_conf.dcb_rx_conf; 23921a572499SJingjing Wu struct rte_eth_dcb_tx_conf *tx_conf = 23931a572499SJingjing Wu ð_conf->tx_adv_conf.dcb_tx_conf; 2394013af9b6SIntel 23951a572499SJingjing Wu rx_conf->nb_tcs = num_tcs; 23961a572499SJingjing Wu tx_conf->nb_tcs = num_tcs; 23971a572499SJingjing Wu 2398bcd0e432SJingjing Wu for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) { 2399bcd0e432SJingjing Wu rx_conf->dcb_tc[i] = i % num_tcs; 2400bcd0e432SJingjing Wu tx_conf->dcb_tc[i] = i % num_tcs; 2401013af9b6SIntel } 24021a572499SJingjing Wu eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB_RSS; 24031a572499SJingjing Wu eth_conf->rx_adv_conf.rss_conf.rss_hf = rss_hf; 240432e7aa0bSIntel eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB; 24051a572499SJingjing Wu } 24061a572499SJingjing Wu 24071a572499SJingjing Wu if (pfc_en) 24081a572499SJingjing Wu eth_conf->dcb_capability_en = 24091a572499SJingjing Wu ETH_DCB_PG_SUPPORT | ETH_DCB_PFC_SUPPORT; 2410013af9b6SIntel else 2411013af9b6SIntel eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT; 2412013af9b6SIntel 2413013af9b6SIntel return 0; 2414013af9b6SIntel } 2415013af9b6SIntel 2416013af9b6SIntel int 24171a572499SJingjing Wu init_port_dcb_config(portid_t pid, 24181a572499SJingjing Wu enum dcb_mode_enable dcb_mode, 24191a572499SJingjing Wu enum rte_eth_nb_tcs num_tcs, 24201a572499SJingjing Wu uint8_t pfc_en) 2421013af9b6SIntel { 2422013af9b6SIntel struct rte_eth_conf port_conf; 2423013af9b6SIntel struct rte_port *rte_port; 2424013af9b6SIntel int retval; 2425013af9b6SIntel uint16_t i; 2426013af9b6SIntel 24272a977b89SWenzhuo Lu rte_port = &ports[pid]; 2428013af9b6SIntel 2429013af9b6SIntel memset(&port_conf, 0, sizeof(struct rte_eth_conf)); 2430013af9b6SIntel /* Enter DCB configuration status */ 2431013af9b6SIntel dcb_config = 1; 2432013af9b6SIntel 2433d5354e89SYanglong Wu port_conf.rxmode = rte_port->dev_conf.rxmode; 2434d5354e89SYanglong Wu port_conf.txmode = rte_port->dev_conf.txmode; 2435d5354e89SYanglong Wu 2436013af9b6SIntel /*set configuration of DCB in vt mode and DCB in non-vt mode*/ 24371a572499SJingjing Wu retval = get_eth_dcb_conf(&port_conf, dcb_mode, num_tcs, pfc_en); 2438013af9b6SIntel if (retval < 0) 2439013af9b6SIntel return retval; 24400074d02fSShahaf Shuler port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER; 2441013af9b6SIntel 24422a977b89SWenzhuo Lu /** 24432a977b89SWenzhuo Lu * Write the configuration into the device. 24442a977b89SWenzhuo Lu * Set the numbers of RX & TX queues to 0, so 24452a977b89SWenzhuo Lu * the RX & TX queues will not be setup. 24462a977b89SWenzhuo Lu */ 2447c947ef89SStephen Hemminger rte_eth_dev_configure(pid, 0, 0, &port_conf); 24482a977b89SWenzhuo Lu 24492a977b89SWenzhuo Lu rte_eth_dev_info_get(pid, &rte_port->dev_info); 24502a977b89SWenzhuo Lu 24512a977b89SWenzhuo Lu /* If dev_info.vmdq_pool_base is greater than 0, 24522a977b89SWenzhuo Lu * the queue id of vmdq pools is started after pf queues. 24532a977b89SWenzhuo Lu */ 24542a977b89SWenzhuo Lu if (dcb_mode == DCB_VT_ENABLED && 24552a977b89SWenzhuo Lu rte_port->dev_info.vmdq_pool_base > 0) { 24562a977b89SWenzhuo Lu printf("VMDQ_DCB multi-queue mode is nonsensical" 24572a977b89SWenzhuo Lu " for port %d.", pid); 24582a977b89SWenzhuo Lu return -1; 24592a977b89SWenzhuo Lu } 24602a977b89SWenzhuo Lu 24612a977b89SWenzhuo Lu /* Assume the ports in testpmd have the same dcb capability 24622a977b89SWenzhuo Lu * and has the same number of rxq and txq in dcb mode 24632a977b89SWenzhuo Lu */ 24642a977b89SWenzhuo Lu if (dcb_mode == DCB_VT_ENABLED) { 246586ef65eeSBernard Iremonger if (rte_port->dev_info.max_vfs > 0) { 246686ef65eeSBernard Iremonger nb_rxq = rte_port->dev_info.nb_rx_queues; 246786ef65eeSBernard Iremonger nb_txq = rte_port->dev_info.nb_tx_queues; 246886ef65eeSBernard Iremonger } else { 24692a977b89SWenzhuo Lu nb_rxq = rte_port->dev_info.max_rx_queues; 24702a977b89SWenzhuo Lu nb_txq = rte_port->dev_info.max_tx_queues; 247186ef65eeSBernard Iremonger } 24722a977b89SWenzhuo Lu } else { 24732a977b89SWenzhuo Lu /*if vt is disabled, use all pf queues */ 24742a977b89SWenzhuo Lu if (rte_port->dev_info.vmdq_pool_base == 0) { 24752a977b89SWenzhuo Lu nb_rxq = rte_port->dev_info.max_rx_queues; 24762a977b89SWenzhuo Lu nb_txq = rte_port->dev_info.max_tx_queues; 24772a977b89SWenzhuo Lu } else { 24782a977b89SWenzhuo Lu nb_rxq = (queueid_t)num_tcs; 24792a977b89SWenzhuo Lu nb_txq = (queueid_t)num_tcs; 24802a977b89SWenzhuo Lu 24812a977b89SWenzhuo Lu } 24822a977b89SWenzhuo Lu } 24832a977b89SWenzhuo Lu rx_free_thresh = 64; 24842a977b89SWenzhuo Lu 2485013af9b6SIntel memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf)); 2486013af9b6SIntel 2487f2c5125aSPablo de Lara rxtx_port_config(rte_port); 2488013af9b6SIntel /* VLAN filter */ 24890074d02fSShahaf Shuler rte_port->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER; 24901a572499SJingjing Wu for (i = 0; i < RTE_DIM(vlan_tags); i++) 2491013af9b6SIntel rx_vft_set(pid, vlan_tags[i], 1); 2492013af9b6SIntel 2493013af9b6SIntel rte_eth_macaddr_get(pid, &rte_port->eth_addr); 2494013af9b6SIntel map_port_queue_stats_mapping_registers(pid, rte_port); 2495013af9b6SIntel 24967741e4cfSIntel rte_port->dcb_flag = 1; 24977741e4cfSIntel 2498013af9b6SIntel return 0; 2499af75078fSIntel } 2500af75078fSIntel 2501ffc468ffSTetsuya Mukawa static void 2502ffc468ffSTetsuya Mukawa init_port(void) 2503ffc468ffSTetsuya Mukawa { 2504ffc468ffSTetsuya Mukawa /* Configuration of Ethernet ports. */ 2505ffc468ffSTetsuya Mukawa ports = rte_zmalloc("testpmd: ports", 2506ffc468ffSTetsuya Mukawa sizeof(struct rte_port) * RTE_MAX_ETHPORTS, 2507ffc468ffSTetsuya Mukawa RTE_CACHE_LINE_SIZE); 2508ffc468ffSTetsuya Mukawa if (ports == NULL) { 2509ffc468ffSTetsuya Mukawa rte_exit(EXIT_FAILURE, 2510ffc468ffSTetsuya Mukawa "rte_zmalloc(%d struct rte_port) failed\n", 2511ffc468ffSTetsuya Mukawa RTE_MAX_ETHPORTS); 2512ffc468ffSTetsuya Mukawa } 2513ffc468ffSTetsuya Mukawa } 2514ffc468ffSTetsuya Mukawa 2515d3a274ceSZhihong Wang static void 2516d3a274ceSZhihong Wang force_quit(void) 2517d3a274ceSZhihong Wang { 2518d3a274ceSZhihong Wang pmd_test_exit(); 2519d3a274ceSZhihong Wang prompt_exit(); 2520d3a274ceSZhihong Wang } 2521d3a274ceSZhihong Wang 2522d3a274ceSZhihong Wang static void 2523cfea1f30SPablo de Lara print_stats(void) 2524cfea1f30SPablo de Lara { 2525cfea1f30SPablo de Lara uint8_t i; 2526cfea1f30SPablo de Lara const char clr[] = { 27, '[', '2', 'J', '\0' }; 2527cfea1f30SPablo de Lara const char top_left[] = { 27, '[', '1', ';', '1', 'H', '\0' }; 2528cfea1f30SPablo de Lara 2529cfea1f30SPablo de Lara /* Clear screen and move to top left */ 2530cfea1f30SPablo de Lara printf("%s%s", clr, top_left); 2531cfea1f30SPablo de Lara 2532cfea1f30SPablo de Lara printf("\nPort statistics ===================================="); 2533cfea1f30SPablo de Lara for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) 2534cfea1f30SPablo de Lara nic_stats_display(fwd_ports_ids[i]); 2535cfea1f30SPablo de Lara } 2536cfea1f30SPablo de Lara 2537cfea1f30SPablo de Lara static void 2538d3a274ceSZhihong Wang signal_handler(int signum) 2539d3a274ceSZhihong Wang { 2540d3a274ceSZhihong Wang if (signum == SIGINT || signum == SIGTERM) { 2541d3a274ceSZhihong Wang printf("\nSignal %d received, preparing to exit...\n", 2542d3a274ceSZhihong Wang signum); 2543102b7329SReshma Pattan #ifdef RTE_LIBRTE_PDUMP 2544102b7329SReshma Pattan /* uninitialize packet capture framework */ 2545102b7329SReshma Pattan rte_pdump_uninit(); 2546102b7329SReshma Pattan #endif 254762d3216dSReshma Pattan #ifdef RTE_LIBRTE_LATENCY_STATS 254862d3216dSReshma Pattan rte_latencystats_uninit(); 254962d3216dSReshma Pattan #endif 2550d3a274ceSZhihong Wang force_quit(); 2551d9a191a0SPhil Yang /* Set flag to indicate the force termination. */ 2552d9a191a0SPhil Yang f_quit = 1; 2553d3a274ceSZhihong Wang /* exit with the expected status */ 2554d3a274ceSZhihong Wang signal(signum, SIG_DFL); 2555d3a274ceSZhihong Wang kill(getpid(), signum); 2556d3a274ceSZhihong Wang } 2557d3a274ceSZhihong Wang } 2558d3a274ceSZhihong Wang 2559af75078fSIntel int 2560af75078fSIntel main(int argc, char** argv) 2561af75078fSIntel { 2562af75078fSIntel int diag; 2563f8244c63SZhiyong Yang portid_t port_id; 2564fb73e096SJeff Guo int ret; 2565af75078fSIntel 2566d3a274ceSZhihong Wang signal(SIGINT, signal_handler); 2567d3a274ceSZhihong Wang signal(SIGTERM, signal_handler); 2568d3a274ceSZhihong Wang 2569af75078fSIntel diag = rte_eal_init(argc, argv); 2570af75078fSIntel if (diag < 0) 2571af75078fSIntel rte_panic("Cannot init EAL\n"); 2572af75078fSIntel 2573285fd101SOlivier Matz testpmd_logtype = rte_log_register("testpmd"); 2574285fd101SOlivier Matz if (testpmd_logtype < 0) 2575285fd101SOlivier Matz rte_panic("Cannot register log type"); 2576285fd101SOlivier Matz rte_log_set_level(testpmd_logtype, RTE_LOG_DEBUG); 2577285fd101SOlivier Matz 25781c036b16SEelco Chaudron if (mlockall(MCL_CURRENT | MCL_FUTURE)) { 2579285fd101SOlivier Matz TESTPMD_LOG(NOTICE, "mlockall() failed with error \"%s\"\n", 25801c036b16SEelco Chaudron strerror(errno)); 25811c036b16SEelco Chaudron } 25821c036b16SEelco Chaudron 2583102b7329SReshma Pattan #ifdef RTE_LIBRTE_PDUMP 2584102b7329SReshma Pattan /* initialize packet capture framework */ 2585102b7329SReshma Pattan rte_pdump_init(NULL); 2586102b7329SReshma Pattan #endif 2587102b7329SReshma Pattan 2588af75078fSIntel nb_ports = (portid_t) rte_eth_dev_count(); 2589af75078fSIntel if (nb_ports == 0) 2590285fd101SOlivier Matz TESTPMD_LOG(WARNING, "No probed ethernet devices\n"); 2591af75078fSIntel 2592ffc468ffSTetsuya Mukawa /* allocate port structures, and init them */ 2593ffc468ffSTetsuya Mukawa init_port(); 2594ffc468ffSTetsuya Mukawa 2595af75078fSIntel set_def_fwd_config(); 2596af75078fSIntel if (nb_lcores == 0) 2597af75078fSIntel rte_panic("Empty set of forwarding logical cores - check the " 2598af75078fSIntel "core mask supplied in the command parameters\n"); 2599af75078fSIntel 260065eb1e54SPablo de Lara /* Bitrate/latency stats disabled by default */ 260130bcc68cSPablo de Lara #ifdef RTE_LIBRTE_BITRATE 2602e25e6c70SRemy Horton bitrate_enabled = 0; 260330bcc68cSPablo de Lara #endif 260465eb1e54SPablo de Lara #ifdef RTE_LIBRTE_LATENCY_STATS 260565eb1e54SPablo de Lara latencystats_enabled = 0; 260665eb1e54SPablo de Lara #endif 2607e25e6c70SRemy Horton 2608af75078fSIntel argc -= diag; 2609af75078fSIntel argv += diag; 2610af75078fSIntel if (argc > 1) 2611af75078fSIntel launch_args_parse(argc, argv); 2612af75078fSIntel 261399cabef0SPablo de Lara if (tx_first && interactive) 261499cabef0SPablo de Lara rte_exit(EXIT_FAILURE, "--tx-first cannot be used on " 261599cabef0SPablo de Lara "interactive mode.\n"); 26168820cba4SDavid Hunt 26178820cba4SDavid Hunt if (tx_first && lsc_interrupt) { 26188820cba4SDavid Hunt printf("Warning: lsc_interrupt needs to be off when " 26198820cba4SDavid Hunt " using tx_first. Disabling.\n"); 26208820cba4SDavid Hunt lsc_interrupt = 0; 26218820cba4SDavid Hunt } 26228820cba4SDavid Hunt 26235a8fb55cSReshma Pattan if (!nb_rxq && !nb_txq) 26245a8fb55cSReshma Pattan printf("Warning: Either rx or tx queues should be non-zero\n"); 26255a8fb55cSReshma Pattan 26265a8fb55cSReshma Pattan if (nb_rxq > 1 && nb_rxq > nb_txq) 2627af75078fSIntel printf("Warning: nb_rxq=%d enables RSS configuration, " 2628af75078fSIntel "but nb_txq=%d will prevent to fully test it.\n", 2629af75078fSIntel nb_rxq, nb_txq); 2630af75078fSIntel 2631af75078fSIntel init_config(); 2632fb73e096SJeff Guo 2633fb73e096SJeff Guo if (hot_plug) { 2634fb73e096SJeff Guo /* enable hot plug monitoring */ 2635fb73e096SJeff Guo ret = rte_dev_event_monitor_start(); 2636fb73e096SJeff Guo if (ret) { 2637fb73e096SJeff Guo rte_errno = EINVAL; 2638fb73e096SJeff Guo return -1; 2639fb73e096SJeff Guo } 2640fb73e096SJeff Guo eth_dev_event_callback_register(); 2641fb73e096SJeff Guo 2642fb73e096SJeff Guo } 2643fb73e096SJeff Guo 2644148f963fSBruce Richardson if (start_port(RTE_PORT_ALL) != 0) 2645148f963fSBruce Richardson rte_exit(EXIT_FAILURE, "Start ports failed\n"); 2646af75078fSIntel 2647ce8d5614SIntel /* set all ports to promiscuous mode by default */ 26487d89b261SGaetan Rivet RTE_ETH_FOREACH_DEV(port_id) 2649ce8d5614SIntel rte_eth_promiscuous_enable(port_id); 2650af75078fSIntel 26517e4441c8SRemy Horton /* Init metrics library */ 26527e4441c8SRemy Horton rte_metrics_init(rte_socket_id()); 26537e4441c8SRemy Horton 265462d3216dSReshma Pattan #ifdef RTE_LIBRTE_LATENCY_STATS 265562d3216dSReshma Pattan if (latencystats_enabled != 0) { 265662d3216dSReshma Pattan int ret = rte_latencystats_init(1, NULL); 265762d3216dSReshma Pattan if (ret) 265862d3216dSReshma Pattan printf("Warning: latencystats init()" 265962d3216dSReshma Pattan " returned error %d\n", ret); 266062d3216dSReshma Pattan printf("Latencystats running on lcore %d\n", 266162d3216dSReshma Pattan latencystats_lcore_id); 266262d3216dSReshma Pattan } 266362d3216dSReshma Pattan #endif 266462d3216dSReshma Pattan 26657e4441c8SRemy Horton /* Setup bitrate stats */ 26667e4441c8SRemy Horton #ifdef RTE_LIBRTE_BITRATE 2667e25e6c70SRemy Horton if (bitrate_enabled != 0) { 26687e4441c8SRemy Horton bitrate_data = rte_stats_bitrate_create(); 26697e4441c8SRemy Horton if (bitrate_data == NULL) 2670e25e6c70SRemy Horton rte_exit(EXIT_FAILURE, 2671e25e6c70SRemy Horton "Could not allocate bitrate data.\n"); 26727e4441c8SRemy Horton rte_stats_bitrate_reg(bitrate_data); 2673e25e6c70SRemy Horton } 26747e4441c8SRemy Horton #endif 26757e4441c8SRemy Horton 26760d56cb81SThomas Monjalon #ifdef RTE_LIBRTE_CMDLINE 267781ef862bSAllain Legacy if (strlen(cmdline_filename) != 0) 267881ef862bSAllain Legacy cmdline_read_from_file(cmdline_filename); 267981ef862bSAllain Legacy 2680ca7feb22SCyril Chemparathy if (interactive == 1) { 2681ca7feb22SCyril Chemparathy if (auto_start) { 2682ca7feb22SCyril Chemparathy printf("Start automatic packet forwarding\n"); 2683ca7feb22SCyril Chemparathy start_packet_forwarding(0); 2684ca7feb22SCyril Chemparathy } 2685af75078fSIntel prompt(); 26860de738cfSJiayu Hu pmd_test_exit(); 2687ca7feb22SCyril Chemparathy } else 26880d56cb81SThomas Monjalon #endif 26890d56cb81SThomas Monjalon { 2690af75078fSIntel char c; 2691af75078fSIntel int rc; 2692af75078fSIntel 2693d9a191a0SPhil Yang f_quit = 0; 2694d9a191a0SPhil Yang 2695af75078fSIntel printf("No commandline core given, start packet forwarding\n"); 269699cabef0SPablo de Lara start_packet_forwarding(tx_first); 2697cfea1f30SPablo de Lara if (stats_period != 0) { 2698cfea1f30SPablo de Lara uint64_t prev_time = 0, cur_time, diff_time = 0; 2699cfea1f30SPablo de Lara uint64_t timer_period; 2700cfea1f30SPablo de Lara 2701cfea1f30SPablo de Lara /* Convert to number of cycles */ 2702cfea1f30SPablo de Lara timer_period = stats_period * rte_get_timer_hz(); 2703cfea1f30SPablo de Lara 2704d9a191a0SPhil Yang while (f_quit == 0) { 2705cfea1f30SPablo de Lara cur_time = rte_get_timer_cycles(); 2706cfea1f30SPablo de Lara diff_time += cur_time - prev_time; 2707cfea1f30SPablo de Lara 2708cfea1f30SPablo de Lara if (diff_time >= timer_period) { 2709cfea1f30SPablo de Lara print_stats(); 2710cfea1f30SPablo de Lara /* Reset the timer */ 2711cfea1f30SPablo de Lara diff_time = 0; 2712cfea1f30SPablo de Lara } 2713cfea1f30SPablo de Lara /* Sleep to avoid unnecessary checks */ 2714cfea1f30SPablo de Lara prev_time = cur_time; 2715cfea1f30SPablo de Lara sleep(1); 2716cfea1f30SPablo de Lara } 2717cfea1f30SPablo de Lara } 2718cfea1f30SPablo de Lara 2719af75078fSIntel printf("Press enter to exit\n"); 2720af75078fSIntel rc = read(0, &c, 1); 2721d3a274ceSZhihong Wang pmd_test_exit(); 2722af75078fSIntel if (rc < 0) 2723af75078fSIntel return 1; 2724af75078fSIntel } 2725af75078fSIntel 2726af75078fSIntel return 0; 2727af75078fSIntel } 2728