1174a1631SBruce Richardson /* SPDX-License-Identifier: BSD-3-Clause 2174a1631SBruce Richardson * Copyright(c) 2010-2017 Intel Corporation 3af75078fSIntel */ 4af75078fSIntel 5af75078fSIntel #include <stdarg.h> 6af75078fSIntel #include <stdio.h> 7af75078fSIntel #include <stdlib.h> 8af75078fSIntel #include <signal.h> 9af75078fSIntel #include <string.h> 10af75078fSIntel #include <time.h> 11af75078fSIntel #include <fcntl.h> 121c036b16SEelco Chaudron #include <sys/mman.h> 13af75078fSIntel #include <sys/types.h> 14af75078fSIntel #include <errno.h> 15fb73e096SJeff Guo #include <stdbool.h> 16af75078fSIntel 17af75078fSIntel #include <sys/queue.h> 18af75078fSIntel #include <sys/stat.h> 19af75078fSIntel 20af75078fSIntel #include <stdint.h> 21af75078fSIntel #include <unistd.h> 22af75078fSIntel #include <inttypes.h> 23af75078fSIntel 24af75078fSIntel #include <rte_common.h> 25d1eb542eSOlivier Matz #include <rte_errno.h> 26af75078fSIntel #include <rte_byteorder.h> 27af75078fSIntel #include <rte_log.h> 28af75078fSIntel #include <rte_debug.h> 29af75078fSIntel #include <rte_cycles.h> 30c7f5dba7SAnatoly Burakov #include <rte_malloc_heap.h> 31af75078fSIntel #include <rte_memory.h> 32af75078fSIntel #include <rte_memcpy.h> 33af75078fSIntel #include <rte_launch.h> 34af75078fSIntel #include <rte_eal.h> 35284c908cSGaetan Rivet #include <rte_alarm.h> 36af75078fSIntel #include <rte_per_lcore.h> 37af75078fSIntel #include <rte_lcore.h> 38af75078fSIntel #include <rte_atomic.h> 39af75078fSIntel #include <rte_branch_prediction.h> 40af75078fSIntel #include <rte_mempool.h> 41af75078fSIntel #include <rte_malloc.h> 42af75078fSIntel #include <rte_mbuf.h> 430e798567SPavan Nikhilesh #include <rte_mbuf_pool_ops.h> 44af75078fSIntel #include <rte_interrupts.h> 45af75078fSIntel #include <rte_pci.h> 46af75078fSIntel #include <rte_ether.h> 47af75078fSIntel #include <rte_ethdev.h> 48edab33b1STetsuya Mukawa #include <rte_dev.h> 49af75078fSIntel #include <rte_string_fns.h> 50e261265eSRadu Nicolau #ifdef RTE_LIBRTE_IXGBE_PMD 51e261265eSRadu Nicolau #include <rte_pmd_ixgbe.h> 52e261265eSRadu Nicolau #endif 53102b7329SReshma Pattan #ifdef RTE_LIBRTE_PDUMP 54102b7329SReshma Pattan #include <rte_pdump.h> 55102b7329SReshma Pattan #endif 56938a184aSAdrien Mazarguil #include <rte_flow.h> 577e4441c8SRemy Horton #include <rte_metrics.h> 587e4441c8SRemy Horton #ifdef RTE_LIBRTE_BITRATE 597e4441c8SRemy Horton #include <rte_bitrate.h> 607e4441c8SRemy Horton #endif 6162d3216dSReshma Pattan #ifdef RTE_LIBRTE_LATENCY_STATS 6262d3216dSReshma Pattan #include <rte_latencystats.h> 6362d3216dSReshma Pattan #endif 64af75078fSIntel 65af75078fSIntel #include "testpmd.h" 66af75078fSIntel 67c7f5dba7SAnatoly Burakov #ifndef MAP_HUGETLB 68c7f5dba7SAnatoly Burakov /* FreeBSD may not have MAP_HUGETLB (in fact, it probably doesn't) */ 69c7f5dba7SAnatoly Burakov #define HUGE_FLAG (0x40000) 70c7f5dba7SAnatoly Burakov #else 71c7f5dba7SAnatoly Burakov #define HUGE_FLAG MAP_HUGETLB 72c7f5dba7SAnatoly Burakov #endif 73c7f5dba7SAnatoly Burakov 74c7f5dba7SAnatoly Burakov #ifndef MAP_HUGE_SHIFT 75c7f5dba7SAnatoly Burakov /* older kernels (or FreeBSD) will not have this define */ 76c7f5dba7SAnatoly Burakov #define HUGE_SHIFT (26) 77c7f5dba7SAnatoly Burakov #else 78c7f5dba7SAnatoly Burakov #define HUGE_SHIFT MAP_HUGE_SHIFT 79c7f5dba7SAnatoly Burakov #endif 80c7f5dba7SAnatoly Burakov 81c7f5dba7SAnatoly Burakov #define EXTMEM_HEAP_NAME "extmem" 82c7f5dba7SAnatoly Burakov 83af75078fSIntel uint16_t verbose_level = 0; /**< Silent by default. */ 84285fd101SOlivier Matz int testpmd_logtype; /**< Log type for testpmd logs */ 85af75078fSIntel 86af75078fSIntel /* use master core for command line ? */ 87af75078fSIntel uint8_t interactive = 0; 88ca7feb22SCyril Chemparathy uint8_t auto_start = 0; 8999cabef0SPablo de Lara uint8_t tx_first; 9081ef862bSAllain Legacy char cmdline_filename[PATH_MAX] = {0}; 91af75078fSIntel 92af75078fSIntel /* 93af75078fSIntel * NUMA support configuration. 94af75078fSIntel * When set, the NUMA support attempts to dispatch the allocation of the 95af75078fSIntel * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the 96af75078fSIntel * probed ports among the CPU sockets 0 and 1. 97af75078fSIntel * Otherwise, all memory is allocated from CPU socket 0. 98af75078fSIntel */ 99999b2ee0SBruce Richardson uint8_t numa_support = 1; /**< numa enabled by default */ 100af75078fSIntel 101af75078fSIntel /* 102b6ea6408SIntel * In UMA mode,all memory is allocated from socket 0 if --socket-num is 103b6ea6408SIntel * not configured. 104b6ea6408SIntel */ 105b6ea6408SIntel uint8_t socket_num = UMA_NO_CONFIG; 106b6ea6408SIntel 107b6ea6408SIntel /* 108c7f5dba7SAnatoly Burakov * Select mempool allocation type: 109c7f5dba7SAnatoly Burakov * - native: use regular DPDK memory 110c7f5dba7SAnatoly Burakov * - anon: use regular DPDK memory to create mempool, but populate using 111c7f5dba7SAnatoly Burakov * anonymous memory (may not be IOVA-contiguous) 112c7f5dba7SAnatoly Burakov * - xmem: use externally allocated hugepage memory 113148f963fSBruce Richardson */ 114c7f5dba7SAnatoly Burakov uint8_t mp_alloc_type = MP_ALLOC_NATIVE; 115148f963fSBruce Richardson 116148f963fSBruce Richardson /* 11763531389SGeorgios Katsikas * Store specified sockets on which memory pool to be used by ports 11863531389SGeorgios Katsikas * is allocated. 11963531389SGeorgios Katsikas */ 12063531389SGeorgios Katsikas uint8_t port_numa[RTE_MAX_ETHPORTS]; 12163531389SGeorgios Katsikas 12263531389SGeorgios Katsikas /* 12363531389SGeorgios Katsikas * Store specified sockets on which RX ring to be used by ports 12463531389SGeorgios Katsikas * is allocated. 12563531389SGeorgios Katsikas */ 12663531389SGeorgios Katsikas uint8_t rxring_numa[RTE_MAX_ETHPORTS]; 12763531389SGeorgios Katsikas 12863531389SGeorgios Katsikas /* 12963531389SGeorgios Katsikas * Store specified sockets on which TX ring to be used by ports 13063531389SGeorgios Katsikas * is allocated. 13163531389SGeorgios Katsikas */ 13263531389SGeorgios Katsikas uint8_t txring_numa[RTE_MAX_ETHPORTS]; 13363531389SGeorgios Katsikas 13463531389SGeorgios Katsikas /* 135af75078fSIntel * Record the Ethernet address of peer target ports to which packets are 136af75078fSIntel * forwarded. 137547d946cSNirmoy Das * Must be instantiated with the ethernet addresses of peer traffic generator 138af75078fSIntel * ports. 139af75078fSIntel */ 140af75078fSIntel struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS]; 141af75078fSIntel portid_t nb_peer_eth_addrs = 0; 142af75078fSIntel 143af75078fSIntel /* 144af75078fSIntel * Probed Target Environment. 145af75078fSIntel */ 146af75078fSIntel struct rte_port *ports; /**< For all probed ethernet ports. */ 147af75078fSIntel portid_t nb_ports; /**< Number of probed ethernet ports. */ 148af75078fSIntel struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */ 149af75078fSIntel lcoreid_t nb_lcores; /**< Number of probed logical cores. */ 150af75078fSIntel 1514918a357SXiaoyun Li portid_t ports_ids[RTE_MAX_ETHPORTS]; /**< Store all port ids. */ 1524918a357SXiaoyun Li 153af75078fSIntel /* 154af75078fSIntel * Test Forwarding Configuration. 155af75078fSIntel * nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores 156af75078fSIntel * nb_fwd_ports <= nb_cfg_ports <= nb_ports 157af75078fSIntel */ 158af75078fSIntel lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */ 159af75078fSIntel lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */ 160af75078fSIntel portid_t nb_cfg_ports; /**< Number of configured ports. */ 161af75078fSIntel portid_t nb_fwd_ports; /**< Number of forwarding ports. */ 162af75078fSIntel 163af75078fSIntel unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */ 164af75078fSIntel portid_t fwd_ports_ids[RTE_MAX_ETHPORTS]; /**< Port ids configuration. */ 165af75078fSIntel 166af75078fSIntel struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */ 167af75078fSIntel streamid_t nb_fwd_streams; /**< Is equal to (nb_ports * nb_rxq). */ 168af75078fSIntel 169af75078fSIntel /* 170af75078fSIntel * Forwarding engines. 171af75078fSIntel */ 172af75078fSIntel struct fwd_engine * fwd_engines[] = { 173af75078fSIntel &io_fwd_engine, 174af75078fSIntel &mac_fwd_engine, 175d47388f1SCyril Chemparathy &mac_swap_engine, 176e9e23a61SCyril Chemparathy &flow_gen_engine, 177af75078fSIntel &rx_only_engine, 178af75078fSIntel &tx_only_engine, 179af75078fSIntel &csum_fwd_engine, 180168dfa61SIvan Boule &icmp_echo_engine, 1813c156061SJens Freimann &noisy_vnf_engine, 1820ad778b3SJasvinder Singh #if defined RTE_LIBRTE_PMD_SOFTNIC 1830ad778b3SJasvinder Singh &softnic_fwd_engine, 1845b590fbeSJasvinder Singh #endif 185af75078fSIntel #ifdef RTE_LIBRTE_IEEE1588 186af75078fSIntel &ieee1588_fwd_engine, 187af75078fSIntel #endif 188af75078fSIntel NULL, 189af75078fSIntel }; 190af75078fSIntel 191af75078fSIntel struct fwd_config cur_fwd_config; 192af75078fSIntel struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */ 193bf56fce1SZhihong Wang uint32_t retry_enabled; 194bf56fce1SZhihong Wang uint32_t burst_tx_delay_time = BURST_TX_WAIT_US; 195bf56fce1SZhihong Wang uint32_t burst_tx_retry_num = BURST_TX_RETRIES; 196af75078fSIntel 197af75078fSIntel uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */ 198c8798818SIntel uint32_t param_total_num_mbufs = 0; /**< number of mbufs in all pools - if 199c8798818SIntel * specified on command-line. */ 200cfea1f30SPablo de Lara uint16_t stats_period; /**< Period to show statistics (disabled by default) */ 201d9a191a0SPhil Yang 202d9a191a0SPhil Yang /* 203d9a191a0SPhil Yang * In container, it cannot terminate the process which running with 'stats-period' 204d9a191a0SPhil Yang * option. Set flag to exit stats period loop after received SIGINT/SIGTERM. 205d9a191a0SPhil Yang */ 206d9a191a0SPhil Yang uint8_t f_quit; 207d9a191a0SPhil Yang 208af75078fSIntel /* 209af75078fSIntel * Configuration of packet segments used by the "txonly" processing engine. 210af75078fSIntel */ 211af75078fSIntel uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */ 212af75078fSIntel uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = { 213af75078fSIntel TXONLY_DEF_PACKET_LEN, 214af75078fSIntel }; 215af75078fSIntel uint8_t tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */ 216af75078fSIntel 21779bec05bSKonstantin Ananyev enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF; 21879bec05bSKonstantin Ananyev /**< Split policy for packets to TX. */ 21979bec05bSKonstantin Ananyev 220af75078fSIntel uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */ 221e9378bbcSCunming Liang uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */ 222af75078fSIntel 223900550deSIntel /* current configuration is in DCB or not,0 means it is not in DCB mode */ 224900550deSIntel uint8_t dcb_config = 0; 225900550deSIntel 226900550deSIntel /* Whether the dcb is in testing status */ 227900550deSIntel uint8_t dcb_test = 0; 228900550deSIntel 229af75078fSIntel /* 230af75078fSIntel * Configurable number of RX/TX queues. 231af75078fSIntel */ 232af75078fSIntel queueid_t nb_rxq = 1; /**< Number of RX queues per port. */ 233af75078fSIntel queueid_t nb_txq = 1; /**< Number of TX queues per port. */ 234af75078fSIntel 235af75078fSIntel /* 236af75078fSIntel * Configurable number of RX/TX ring descriptors. 2378599ed31SRemy Horton * Defaults are supplied by drivers via ethdev. 238af75078fSIntel */ 2398599ed31SRemy Horton #define RTE_TEST_RX_DESC_DEFAULT 0 2408599ed31SRemy Horton #define RTE_TEST_TX_DESC_DEFAULT 0 241af75078fSIntel uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */ 242af75078fSIntel uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */ 243af75078fSIntel 244f2c5125aSPablo de Lara #define RTE_PMD_PARAM_UNSET -1 245af75078fSIntel /* 246af75078fSIntel * Configurable values of RX and TX ring threshold registers. 247af75078fSIntel */ 248af75078fSIntel 249f2c5125aSPablo de Lara int8_t rx_pthresh = RTE_PMD_PARAM_UNSET; 250f2c5125aSPablo de Lara int8_t rx_hthresh = RTE_PMD_PARAM_UNSET; 251f2c5125aSPablo de Lara int8_t rx_wthresh = RTE_PMD_PARAM_UNSET; 252af75078fSIntel 253f2c5125aSPablo de Lara int8_t tx_pthresh = RTE_PMD_PARAM_UNSET; 254f2c5125aSPablo de Lara int8_t tx_hthresh = RTE_PMD_PARAM_UNSET; 255f2c5125aSPablo de Lara int8_t tx_wthresh = RTE_PMD_PARAM_UNSET; 256af75078fSIntel 257af75078fSIntel /* 258af75078fSIntel * Configurable value of RX free threshold. 259af75078fSIntel */ 260f2c5125aSPablo de Lara int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET; 261af75078fSIntel 262af75078fSIntel /* 263ce8d5614SIntel * Configurable value of RX drop enable. 264ce8d5614SIntel */ 265f2c5125aSPablo de Lara int8_t rx_drop_en = RTE_PMD_PARAM_UNSET; 266ce8d5614SIntel 267ce8d5614SIntel /* 268af75078fSIntel * Configurable value of TX free threshold. 269af75078fSIntel */ 270f2c5125aSPablo de Lara int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET; 271af75078fSIntel 272af75078fSIntel /* 273af75078fSIntel * Configurable value of TX RS bit threshold. 274af75078fSIntel */ 275f2c5125aSPablo de Lara int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET; 276af75078fSIntel 277af75078fSIntel /* 2783c156061SJens Freimann * Configurable value of buffered packets before sending. 2793c156061SJens Freimann */ 2803c156061SJens Freimann uint16_t noisy_tx_sw_bufsz; 2813c156061SJens Freimann 2823c156061SJens Freimann /* 2833c156061SJens Freimann * Configurable value of packet buffer timeout. 2843c156061SJens Freimann */ 2853c156061SJens Freimann uint16_t noisy_tx_sw_buf_flush_time; 2863c156061SJens Freimann 2873c156061SJens Freimann /* 2883c156061SJens Freimann * Configurable value for size of VNF internal memory area 2893c156061SJens Freimann * used for simulating noisy neighbour behaviour 2903c156061SJens Freimann */ 2913c156061SJens Freimann uint64_t noisy_lkup_mem_sz; 2923c156061SJens Freimann 2933c156061SJens Freimann /* 2943c156061SJens Freimann * Configurable value of number of random writes done in 2953c156061SJens Freimann * VNF simulation memory area. 2963c156061SJens Freimann */ 2973c156061SJens Freimann uint64_t noisy_lkup_num_writes; 2983c156061SJens Freimann 2993c156061SJens Freimann /* 3003c156061SJens Freimann * Configurable value of number of random reads done in 3013c156061SJens Freimann * VNF simulation memory area. 3023c156061SJens Freimann */ 3033c156061SJens Freimann uint64_t noisy_lkup_num_reads; 3043c156061SJens Freimann 3053c156061SJens Freimann /* 3063c156061SJens Freimann * Configurable value of number of random reads/writes done in 3073c156061SJens Freimann * VNF simulation memory area. 3083c156061SJens Freimann */ 3093c156061SJens Freimann uint64_t noisy_lkup_num_reads_writes; 3103c156061SJens Freimann 3113c156061SJens Freimann /* 312af75078fSIntel * Receive Side Scaling (RSS) configuration. 313af75078fSIntel */ 3148a387fa8SHelin Zhang uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */ 315af75078fSIntel 316af75078fSIntel /* 317af75078fSIntel * Port topology configuration 318af75078fSIntel */ 319af75078fSIntel uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */ 320af75078fSIntel 3217741e4cfSIntel /* 3227741e4cfSIntel * Avoids to flush all the RX streams before starts forwarding. 3237741e4cfSIntel */ 3247741e4cfSIntel uint8_t no_flush_rx = 0; /* flush by default */ 3257741e4cfSIntel 326af75078fSIntel /* 3277ee3e944SVasily Philipov * Flow API isolated mode. 3287ee3e944SVasily Philipov */ 3297ee3e944SVasily Philipov uint8_t flow_isolate_all; 3307ee3e944SVasily Philipov 3317ee3e944SVasily Philipov /* 332bc202406SDavid Marchand * Avoids to check link status when starting/stopping a port. 333bc202406SDavid Marchand */ 334bc202406SDavid Marchand uint8_t no_link_check = 0; /* check by default */ 335bc202406SDavid Marchand 336bc202406SDavid Marchand /* 3378ea656f8SGaetan Rivet * Enable link status change notification 3388ea656f8SGaetan Rivet */ 3398ea656f8SGaetan Rivet uint8_t lsc_interrupt = 1; /* enabled by default */ 3408ea656f8SGaetan Rivet 3418ea656f8SGaetan Rivet /* 342284c908cSGaetan Rivet * Enable device removal notification. 343284c908cSGaetan Rivet */ 344284c908cSGaetan Rivet uint8_t rmv_interrupt = 1; /* enabled by default */ 345284c908cSGaetan Rivet 346fb73e096SJeff Guo uint8_t hot_plug = 0; /**< hotplug disabled by default. */ 347fb73e096SJeff Guo 348284c908cSGaetan Rivet /* 3493af72783SGaetan Rivet * Display or mask ether events 3503af72783SGaetan Rivet * Default to all events except VF_MBOX 3513af72783SGaetan Rivet */ 3523af72783SGaetan Rivet uint32_t event_print_mask = (UINT32_C(1) << RTE_ETH_EVENT_UNKNOWN) | 3533af72783SGaetan Rivet (UINT32_C(1) << RTE_ETH_EVENT_INTR_LSC) | 3543af72783SGaetan Rivet (UINT32_C(1) << RTE_ETH_EVENT_QUEUE_STATE) | 3553af72783SGaetan Rivet (UINT32_C(1) << RTE_ETH_EVENT_INTR_RESET) | 356badb87c1SAnoob Joseph (UINT32_C(1) << RTE_ETH_EVENT_IPSEC) | 3573af72783SGaetan Rivet (UINT32_C(1) << RTE_ETH_EVENT_MACSEC) | 3583af72783SGaetan Rivet (UINT32_C(1) << RTE_ETH_EVENT_INTR_RMV); 359e505d84cSAnatoly Burakov /* 360e505d84cSAnatoly Burakov * Decide if all memory are locked for performance. 361e505d84cSAnatoly Burakov */ 362e505d84cSAnatoly Burakov int do_mlockall = 0; 3633af72783SGaetan Rivet 3643af72783SGaetan Rivet /* 3657b7e5ba7SIntel * NIC bypass mode configuration options. 3667b7e5ba7SIntel */ 3677b7e5ba7SIntel 36850c4440eSThomas Monjalon #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS 3697b7e5ba7SIntel /* The NIC bypass watchdog timeout. */ 370e261265eSRadu Nicolau uint32_t bypass_timeout = RTE_PMD_IXGBE_BYPASS_TMT_OFF; 3717b7e5ba7SIntel #endif 3727b7e5ba7SIntel 373e261265eSRadu Nicolau 37462d3216dSReshma Pattan #ifdef RTE_LIBRTE_LATENCY_STATS 37562d3216dSReshma Pattan 37662d3216dSReshma Pattan /* 37762d3216dSReshma Pattan * Set when latency stats is enabled in the commandline 37862d3216dSReshma Pattan */ 37962d3216dSReshma Pattan uint8_t latencystats_enabled; 38062d3216dSReshma Pattan 38162d3216dSReshma Pattan /* 38262d3216dSReshma Pattan * Lcore ID to serive latency statistics. 38362d3216dSReshma Pattan */ 38462d3216dSReshma Pattan lcoreid_t latencystats_lcore_id = -1; 38562d3216dSReshma Pattan 38662d3216dSReshma Pattan #endif 38762d3216dSReshma Pattan 3887b7e5ba7SIntel /* 389af75078fSIntel * Ethernet device configuration. 390af75078fSIntel */ 391af75078fSIntel struct rte_eth_rxmode rx_mode = { 392af75078fSIntel .max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */ 393af75078fSIntel }; 394af75078fSIntel 39507e5f7bdSShahaf Shuler struct rte_eth_txmode tx_mode = { 39607e5f7bdSShahaf Shuler .offloads = DEV_TX_OFFLOAD_MBUF_FAST_FREE, 39707e5f7bdSShahaf Shuler }; 398fd8c20aaSShahaf Shuler 399af75078fSIntel struct rte_fdir_conf fdir_conf = { 400af75078fSIntel .mode = RTE_FDIR_MODE_NONE, 401af75078fSIntel .pballoc = RTE_FDIR_PBALLOC_64K, 402af75078fSIntel .status = RTE_FDIR_REPORT_STATUS, 403d9d5e6f2SJingjing Wu .mask = { 40426f579aaSWei Zhao .vlan_tci_mask = 0xFFEF, 405d9d5e6f2SJingjing Wu .ipv4_mask = { 406d9d5e6f2SJingjing Wu .src_ip = 0xFFFFFFFF, 407d9d5e6f2SJingjing Wu .dst_ip = 0xFFFFFFFF, 408d9d5e6f2SJingjing Wu }, 409d9d5e6f2SJingjing Wu .ipv6_mask = { 410d9d5e6f2SJingjing Wu .src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF}, 411d9d5e6f2SJingjing Wu .dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF}, 412d9d5e6f2SJingjing Wu }, 413d9d5e6f2SJingjing Wu .src_port_mask = 0xFFFF, 414d9d5e6f2SJingjing Wu .dst_port_mask = 0xFFFF, 41547b3ac6bSWenzhuo Lu .mac_addr_byte_mask = 0xFF, 41647b3ac6bSWenzhuo Lu .tunnel_type_mask = 1, 41747b3ac6bSWenzhuo Lu .tunnel_id_mask = 0xFFFFFFFF, 418d9d5e6f2SJingjing Wu }, 419af75078fSIntel .drop_queue = 127, 420af75078fSIntel }; 421af75078fSIntel 4222950a769SDeclan Doherty volatile int test_done = 1; /* stop packet forwarding when set to 1. */ 423af75078fSIntel 424ed30d9b6SIntel struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS]; 425ed30d9b6SIntel struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS]; 426ed30d9b6SIntel 427ed30d9b6SIntel struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array; 428ed30d9b6SIntel struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array; 429ed30d9b6SIntel 430ed30d9b6SIntel uint16_t nb_tx_queue_stats_mappings = 0; 431ed30d9b6SIntel uint16_t nb_rx_queue_stats_mappings = 0; 432ed30d9b6SIntel 433a4fd5eeeSElza Mathew /* 434a4fd5eeeSElza Mathew * Display zero values by default for xstats 435a4fd5eeeSElza Mathew */ 436a4fd5eeeSElza Mathew uint8_t xstats_hide_zero; 437a4fd5eeeSElza Mathew 438c9cafcc8SShahaf Shuler unsigned int num_sockets = 0; 439c9cafcc8SShahaf Shuler unsigned int socket_ids[RTE_MAX_NUMA_NODES]; 4407acf894dSStephen Hurd 441e25e6c70SRemy Horton #ifdef RTE_LIBRTE_BITRATE 4427e4441c8SRemy Horton /* Bitrate statistics */ 4437e4441c8SRemy Horton struct rte_stats_bitrates *bitrate_data; 444e25e6c70SRemy Horton lcoreid_t bitrate_lcore_id; 445e25e6c70SRemy Horton uint8_t bitrate_enabled; 446e25e6c70SRemy Horton #endif 4477e4441c8SRemy Horton 448b40f8d78SJiayu Hu struct gro_status gro_ports[RTE_MAX_ETHPORTS]; 449b7091f1dSJiayu Hu uint8_t gro_flush_cycles = GRO_DEFAULT_FLUSH_CYCLES; 450b40f8d78SJiayu Hu 4511960be7dSNelio Laranjeiro struct vxlan_encap_conf vxlan_encap_conf = { 4521960be7dSNelio Laranjeiro .select_ipv4 = 1, 4531960be7dSNelio Laranjeiro .select_vlan = 0, 4541960be7dSNelio Laranjeiro .vni = "\x00\x00\x00", 4551960be7dSNelio Laranjeiro .udp_src = 0, 4561960be7dSNelio Laranjeiro .udp_dst = RTE_BE16(4789), 4571960be7dSNelio Laranjeiro .ipv4_src = IPv4(127, 0, 0, 1), 4581960be7dSNelio Laranjeiro .ipv4_dst = IPv4(255, 255, 255, 255), 4591960be7dSNelio Laranjeiro .ipv6_src = "\x00\x00\x00\x00\x00\x00\x00\x00" 4601960be7dSNelio Laranjeiro "\x00\x00\x00\x00\x00\x00\x00\x01", 4611960be7dSNelio Laranjeiro .ipv6_dst = "\x00\x00\x00\x00\x00\x00\x00\x00" 4621960be7dSNelio Laranjeiro "\x00\x00\x00\x00\x00\x00\x11\x11", 4631960be7dSNelio Laranjeiro .vlan_tci = 0, 4641960be7dSNelio Laranjeiro .eth_src = "\x00\x00\x00\x00\x00\x00", 4651960be7dSNelio Laranjeiro .eth_dst = "\xff\xff\xff\xff\xff\xff", 4661960be7dSNelio Laranjeiro }; 4671960be7dSNelio Laranjeiro 468dcd962fcSNelio Laranjeiro struct nvgre_encap_conf nvgre_encap_conf = { 469dcd962fcSNelio Laranjeiro .select_ipv4 = 1, 470dcd962fcSNelio Laranjeiro .select_vlan = 0, 471dcd962fcSNelio Laranjeiro .tni = "\x00\x00\x00", 472dcd962fcSNelio Laranjeiro .ipv4_src = IPv4(127, 0, 0, 1), 473dcd962fcSNelio Laranjeiro .ipv4_dst = IPv4(255, 255, 255, 255), 474dcd962fcSNelio Laranjeiro .ipv6_src = "\x00\x00\x00\x00\x00\x00\x00\x00" 475dcd962fcSNelio Laranjeiro "\x00\x00\x00\x00\x00\x00\x00\x01", 476dcd962fcSNelio Laranjeiro .ipv6_dst = "\x00\x00\x00\x00\x00\x00\x00\x00" 477dcd962fcSNelio Laranjeiro "\x00\x00\x00\x00\x00\x00\x11\x11", 478dcd962fcSNelio Laranjeiro .vlan_tci = 0, 479dcd962fcSNelio Laranjeiro .eth_src = "\x00\x00\x00\x00\x00\x00", 480dcd962fcSNelio Laranjeiro .eth_dst = "\xff\xff\xff\xff\xff\xff", 481dcd962fcSNelio Laranjeiro }; 482dcd962fcSNelio Laranjeiro 483ed30d9b6SIntel /* Forward function declarations */ 484*c9cce428SThomas Monjalon static void setup_attached_port(portid_t pi); 48528caa76aSZhiyong Yang static void map_port_queue_stats_mapping_registers(portid_t pi, 48628caa76aSZhiyong Yang struct rte_port *port); 487edab33b1STetsuya Mukawa static void check_all_ports_link_status(uint32_t port_mask); 488f8244c63SZhiyong Yang static int eth_event_callback(portid_t port_id, 48976ad4a2dSGaetan Rivet enum rte_eth_event_type type, 490d6af1a13SBernard Iremonger void *param, void *ret_param); 49189ecd110SJeff Guo static void eth_dev_event_callback(const char *device_name, 492fb73e096SJeff Guo enum rte_dev_event_type type, 493fb73e096SJeff Guo void *param); 494ce8d5614SIntel 495ce8d5614SIntel /* 496ce8d5614SIntel * Check if all the ports are started. 497ce8d5614SIntel * If yes, return positive value. If not, return zero. 498ce8d5614SIntel */ 499ce8d5614SIntel static int all_ports_started(void); 500ed30d9b6SIntel 50152f38a20SJiayu Hu struct gso_status gso_ports[RTE_MAX_ETHPORTS]; 50252f38a20SJiayu Hu uint16_t gso_max_segment_size = ETHER_MAX_LEN - ETHER_CRC_LEN; 50352f38a20SJiayu Hu 504af75078fSIntel /* 50598a7ea33SJerin Jacob * Helper function to check if socket is already discovered. 506c9cafcc8SShahaf Shuler * If yes, return positive value. If not, return zero. 507c9cafcc8SShahaf Shuler */ 508c9cafcc8SShahaf Shuler int 509c9cafcc8SShahaf Shuler new_socket_id(unsigned int socket_id) 510c9cafcc8SShahaf Shuler { 511c9cafcc8SShahaf Shuler unsigned int i; 512c9cafcc8SShahaf Shuler 513c9cafcc8SShahaf Shuler for (i = 0; i < num_sockets; i++) { 514c9cafcc8SShahaf Shuler if (socket_ids[i] == socket_id) 515c9cafcc8SShahaf Shuler return 0; 516c9cafcc8SShahaf Shuler } 517c9cafcc8SShahaf Shuler return 1; 518c9cafcc8SShahaf Shuler } 519c9cafcc8SShahaf Shuler 520c9cafcc8SShahaf Shuler /* 521af75078fSIntel * Setup default configuration. 522af75078fSIntel */ 523af75078fSIntel static void 524af75078fSIntel set_default_fwd_lcores_config(void) 525af75078fSIntel { 526af75078fSIntel unsigned int i; 527af75078fSIntel unsigned int nb_lc; 5287acf894dSStephen Hurd unsigned int sock_num; 529af75078fSIntel 530af75078fSIntel nb_lc = 0; 531af75078fSIntel for (i = 0; i < RTE_MAX_LCORE; i++) { 532dbfb8ec7SPhil Yang if (!rte_lcore_is_enabled(i)) 533dbfb8ec7SPhil Yang continue; 534c9cafcc8SShahaf Shuler sock_num = rte_lcore_to_socket_id(i); 535c9cafcc8SShahaf Shuler if (new_socket_id(sock_num)) { 536c9cafcc8SShahaf Shuler if (num_sockets >= RTE_MAX_NUMA_NODES) { 537c9cafcc8SShahaf Shuler rte_exit(EXIT_FAILURE, 538c9cafcc8SShahaf Shuler "Total sockets greater than %u\n", 539c9cafcc8SShahaf Shuler RTE_MAX_NUMA_NODES); 540c9cafcc8SShahaf Shuler } 541c9cafcc8SShahaf Shuler socket_ids[num_sockets++] = sock_num; 5427acf894dSStephen Hurd } 543f54fe5eeSStephen Hurd if (i == rte_get_master_lcore()) 544f54fe5eeSStephen Hurd continue; 545f54fe5eeSStephen Hurd fwd_lcores_cpuids[nb_lc++] = i; 546af75078fSIntel } 547af75078fSIntel nb_lcores = (lcoreid_t) nb_lc; 548af75078fSIntel nb_cfg_lcores = nb_lcores; 549af75078fSIntel nb_fwd_lcores = 1; 550af75078fSIntel } 551af75078fSIntel 552af75078fSIntel static void 553af75078fSIntel set_def_peer_eth_addrs(void) 554af75078fSIntel { 555af75078fSIntel portid_t i; 556af75078fSIntel 557af75078fSIntel for (i = 0; i < RTE_MAX_ETHPORTS; i++) { 558af75078fSIntel peer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR; 559af75078fSIntel peer_eth_addrs[i].addr_bytes[5] = i; 560af75078fSIntel } 561af75078fSIntel } 562af75078fSIntel 563af75078fSIntel static void 564af75078fSIntel set_default_fwd_ports_config(void) 565af75078fSIntel { 566af75078fSIntel portid_t pt_id; 56765a7360cSMatan Azrad int i = 0; 568af75078fSIntel 569effdb8bbSPhil Yang RTE_ETH_FOREACH_DEV(pt_id) { 57065a7360cSMatan Azrad fwd_ports_ids[i++] = pt_id; 571af75078fSIntel 572effdb8bbSPhil Yang /* Update sockets info according to the attached device */ 573effdb8bbSPhil Yang int socket_id = rte_eth_dev_socket_id(pt_id); 574effdb8bbSPhil Yang if (socket_id >= 0 && new_socket_id(socket_id)) { 575effdb8bbSPhil Yang if (num_sockets >= RTE_MAX_NUMA_NODES) { 576effdb8bbSPhil Yang rte_exit(EXIT_FAILURE, 577effdb8bbSPhil Yang "Total sockets greater than %u\n", 578effdb8bbSPhil Yang RTE_MAX_NUMA_NODES); 579effdb8bbSPhil Yang } 580effdb8bbSPhil Yang socket_ids[num_sockets++] = socket_id; 581effdb8bbSPhil Yang } 582effdb8bbSPhil Yang } 583effdb8bbSPhil Yang 584af75078fSIntel nb_cfg_ports = nb_ports; 585af75078fSIntel nb_fwd_ports = nb_ports; 586af75078fSIntel } 587af75078fSIntel 588af75078fSIntel void 589af75078fSIntel set_def_fwd_config(void) 590af75078fSIntel { 591af75078fSIntel set_default_fwd_lcores_config(); 592af75078fSIntel set_def_peer_eth_addrs(); 593af75078fSIntel set_default_fwd_ports_config(); 594af75078fSIntel } 595af75078fSIntel 596c7f5dba7SAnatoly Burakov /* extremely pessimistic estimation of memory required to create a mempool */ 597c7f5dba7SAnatoly Burakov static int 598c7f5dba7SAnatoly Burakov calc_mem_size(uint32_t nb_mbufs, uint32_t mbuf_sz, size_t pgsz, size_t *out) 599c7f5dba7SAnatoly Burakov { 600c7f5dba7SAnatoly Burakov unsigned int n_pages, mbuf_per_pg, leftover; 601c7f5dba7SAnatoly Burakov uint64_t total_mem, mbuf_mem, obj_sz; 602c7f5dba7SAnatoly Burakov 603c7f5dba7SAnatoly Burakov /* there is no good way to predict how much space the mempool will 604c7f5dba7SAnatoly Burakov * occupy because it will allocate chunks on the fly, and some of those 605c7f5dba7SAnatoly Burakov * will come from default DPDK memory while some will come from our 606c7f5dba7SAnatoly Burakov * external memory, so just assume 128MB will be enough for everyone. 607c7f5dba7SAnatoly Burakov */ 608c7f5dba7SAnatoly Burakov uint64_t hdr_mem = 128 << 20; 609c7f5dba7SAnatoly Burakov 610c7f5dba7SAnatoly Burakov /* account for possible non-contiguousness */ 611c7f5dba7SAnatoly Burakov obj_sz = rte_mempool_calc_obj_size(mbuf_sz, 0, NULL); 612c7f5dba7SAnatoly Burakov if (obj_sz > pgsz) { 613c7f5dba7SAnatoly Burakov TESTPMD_LOG(ERR, "Object size is bigger than page size\n"); 614c7f5dba7SAnatoly Burakov return -1; 615c7f5dba7SAnatoly Burakov } 616c7f5dba7SAnatoly Burakov 617c7f5dba7SAnatoly Burakov mbuf_per_pg = pgsz / obj_sz; 618c7f5dba7SAnatoly Burakov leftover = (nb_mbufs % mbuf_per_pg) > 0; 619c7f5dba7SAnatoly Burakov n_pages = (nb_mbufs / mbuf_per_pg) + leftover; 620c7f5dba7SAnatoly Burakov 621c7f5dba7SAnatoly Burakov mbuf_mem = n_pages * pgsz; 622c7f5dba7SAnatoly Burakov 623c7f5dba7SAnatoly Burakov total_mem = RTE_ALIGN(hdr_mem + mbuf_mem, pgsz); 624c7f5dba7SAnatoly Burakov 625c7f5dba7SAnatoly Burakov if (total_mem > SIZE_MAX) { 626c7f5dba7SAnatoly Burakov TESTPMD_LOG(ERR, "Memory size too big\n"); 627c7f5dba7SAnatoly Burakov return -1; 628c7f5dba7SAnatoly Burakov } 629c7f5dba7SAnatoly Burakov *out = (size_t)total_mem; 630c7f5dba7SAnatoly Burakov 631c7f5dba7SAnatoly Burakov return 0; 632c7f5dba7SAnatoly Burakov } 633c7f5dba7SAnatoly Burakov 634c7f5dba7SAnatoly Burakov static inline uint32_t 635c7f5dba7SAnatoly Burakov bsf64(uint64_t v) 636c7f5dba7SAnatoly Burakov { 637c7f5dba7SAnatoly Burakov return (uint32_t)__builtin_ctzll(v); 638c7f5dba7SAnatoly Burakov } 639c7f5dba7SAnatoly Burakov 640c7f5dba7SAnatoly Burakov static inline uint32_t 641c7f5dba7SAnatoly Burakov log2_u64(uint64_t v) 642c7f5dba7SAnatoly Burakov { 643c7f5dba7SAnatoly Burakov if (v == 0) 644c7f5dba7SAnatoly Burakov return 0; 645c7f5dba7SAnatoly Burakov v = rte_align64pow2(v); 646c7f5dba7SAnatoly Burakov return bsf64(v); 647c7f5dba7SAnatoly Burakov } 648c7f5dba7SAnatoly Burakov 649c7f5dba7SAnatoly Burakov static int 650c7f5dba7SAnatoly Burakov pagesz_flags(uint64_t page_sz) 651c7f5dba7SAnatoly Burakov { 652c7f5dba7SAnatoly Burakov /* as per mmap() manpage, all page sizes are log2 of page size 653c7f5dba7SAnatoly Burakov * shifted by MAP_HUGE_SHIFT 654c7f5dba7SAnatoly Burakov */ 655c7f5dba7SAnatoly Burakov int log2 = log2_u64(page_sz); 656c7f5dba7SAnatoly Burakov 657c7f5dba7SAnatoly Burakov return (log2 << HUGE_SHIFT); 658c7f5dba7SAnatoly Burakov } 659c7f5dba7SAnatoly Burakov 660c7f5dba7SAnatoly Burakov static void * 661c7f5dba7SAnatoly Burakov alloc_mem(size_t memsz, size_t pgsz, bool huge) 662c7f5dba7SAnatoly Burakov { 663c7f5dba7SAnatoly Burakov void *addr; 664c7f5dba7SAnatoly Burakov int flags; 665c7f5dba7SAnatoly Burakov 666c7f5dba7SAnatoly Burakov /* allocate anonymous hugepages */ 667c7f5dba7SAnatoly Burakov flags = MAP_ANONYMOUS | MAP_PRIVATE; 668c7f5dba7SAnatoly Burakov if (huge) 669c7f5dba7SAnatoly Burakov flags |= HUGE_FLAG | pagesz_flags(pgsz); 670c7f5dba7SAnatoly Burakov 671c7f5dba7SAnatoly Burakov addr = mmap(NULL, memsz, PROT_READ | PROT_WRITE, flags, -1, 0); 672c7f5dba7SAnatoly Burakov if (addr == MAP_FAILED) 673c7f5dba7SAnatoly Burakov return NULL; 674c7f5dba7SAnatoly Burakov 675c7f5dba7SAnatoly Burakov return addr; 676c7f5dba7SAnatoly Burakov } 677c7f5dba7SAnatoly Burakov 678c7f5dba7SAnatoly Burakov struct extmem_param { 679c7f5dba7SAnatoly Burakov void *addr; 680c7f5dba7SAnatoly Burakov size_t len; 681c7f5dba7SAnatoly Burakov size_t pgsz; 682c7f5dba7SAnatoly Burakov rte_iova_t *iova_table; 683c7f5dba7SAnatoly Burakov unsigned int iova_table_len; 684c7f5dba7SAnatoly Burakov }; 685c7f5dba7SAnatoly Burakov 686c7f5dba7SAnatoly Burakov static int 687c7f5dba7SAnatoly Burakov create_extmem(uint32_t nb_mbufs, uint32_t mbuf_sz, struct extmem_param *param, 688c7f5dba7SAnatoly Burakov bool huge) 689c7f5dba7SAnatoly Burakov { 690c7f5dba7SAnatoly Burakov uint64_t pgsizes[] = {RTE_PGSIZE_2M, RTE_PGSIZE_1G, /* x86_64, ARM */ 691c7f5dba7SAnatoly Burakov RTE_PGSIZE_16M, RTE_PGSIZE_16G}; /* POWER */ 692c7f5dba7SAnatoly Burakov unsigned int cur_page, n_pages, pgsz_idx; 693c7f5dba7SAnatoly Burakov size_t mem_sz, cur_pgsz; 694c7f5dba7SAnatoly Burakov rte_iova_t *iovas = NULL; 695c7f5dba7SAnatoly Burakov void *addr; 696c7f5dba7SAnatoly Burakov int ret; 697c7f5dba7SAnatoly Burakov 698c7f5dba7SAnatoly Burakov for (pgsz_idx = 0; pgsz_idx < RTE_DIM(pgsizes); pgsz_idx++) { 699c7f5dba7SAnatoly Burakov /* skip anything that is too big */ 700c7f5dba7SAnatoly Burakov if (pgsizes[pgsz_idx] > SIZE_MAX) 701c7f5dba7SAnatoly Burakov continue; 702c7f5dba7SAnatoly Burakov 703c7f5dba7SAnatoly Burakov cur_pgsz = pgsizes[pgsz_idx]; 704c7f5dba7SAnatoly Burakov 705c7f5dba7SAnatoly Burakov /* if we were told not to allocate hugepages, override */ 706c7f5dba7SAnatoly Burakov if (!huge) 707c7f5dba7SAnatoly Burakov cur_pgsz = sysconf(_SC_PAGESIZE); 708c7f5dba7SAnatoly Burakov 709c7f5dba7SAnatoly Burakov ret = calc_mem_size(nb_mbufs, mbuf_sz, cur_pgsz, &mem_sz); 710c7f5dba7SAnatoly Burakov if (ret < 0) { 711c7f5dba7SAnatoly Burakov TESTPMD_LOG(ERR, "Cannot calculate memory size\n"); 712c7f5dba7SAnatoly Burakov return -1; 713c7f5dba7SAnatoly Burakov } 714c7f5dba7SAnatoly Burakov 715c7f5dba7SAnatoly Burakov /* allocate our memory */ 716c7f5dba7SAnatoly Burakov addr = alloc_mem(mem_sz, cur_pgsz, huge); 717c7f5dba7SAnatoly Burakov 718c7f5dba7SAnatoly Burakov /* if we couldn't allocate memory with a specified page size, 719c7f5dba7SAnatoly Burakov * that doesn't mean we can't do it with other page sizes, so 720c7f5dba7SAnatoly Burakov * try another one. 721c7f5dba7SAnatoly Burakov */ 722c7f5dba7SAnatoly Burakov if (addr == NULL) 723c7f5dba7SAnatoly Burakov continue; 724c7f5dba7SAnatoly Burakov 725c7f5dba7SAnatoly Burakov /* store IOVA addresses for every page in this memory area */ 726c7f5dba7SAnatoly Burakov n_pages = mem_sz / cur_pgsz; 727c7f5dba7SAnatoly Burakov 728c7f5dba7SAnatoly Burakov iovas = malloc(sizeof(*iovas) * n_pages); 729c7f5dba7SAnatoly Burakov 730c7f5dba7SAnatoly Burakov if (iovas == NULL) { 731c7f5dba7SAnatoly Burakov TESTPMD_LOG(ERR, "Cannot allocate memory for iova addresses\n"); 732c7f5dba7SAnatoly Burakov goto fail; 733c7f5dba7SAnatoly Burakov } 734c7f5dba7SAnatoly Burakov /* lock memory if it's not huge pages */ 735c7f5dba7SAnatoly Burakov if (!huge) 736c7f5dba7SAnatoly Burakov mlock(addr, mem_sz); 737c7f5dba7SAnatoly Burakov 738c7f5dba7SAnatoly Burakov /* populate IOVA addresses */ 739c7f5dba7SAnatoly Burakov for (cur_page = 0; cur_page < n_pages; cur_page++) { 740c7f5dba7SAnatoly Burakov rte_iova_t iova; 741c7f5dba7SAnatoly Burakov size_t offset; 742c7f5dba7SAnatoly Burakov void *cur; 743c7f5dba7SAnatoly Burakov 744c7f5dba7SAnatoly Burakov offset = cur_pgsz * cur_page; 745c7f5dba7SAnatoly Burakov cur = RTE_PTR_ADD(addr, offset); 746c7f5dba7SAnatoly Burakov 747c7f5dba7SAnatoly Burakov /* touch the page before getting its IOVA */ 748c7f5dba7SAnatoly Burakov *(volatile char *)cur = 0; 749c7f5dba7SAnatoly Burakov 750c7f5dba7SAnatoly Burakov iova = rte_mem_virt2iova(cur); 751c7f5dba7SAnatoly Burakov 752c7f5dba7SAnatoly Burakov iovas[cur_page] = iova; 753c7f5dba7SAnatoly Burakov } 754c7f5dba7SAnatoly Burakov 755c7f5dba7SAnatoly Burakov break; 756c7f5dba7SAnatoly Burakov } 757c7f5dba7SAnatoly Burakov /* if we couldn't allocate anything */ 758c7f5dba7SAnatoly Burakov if (iovas == NULL) 759c7f5dba7SAnatoly Burakov return -1; 760c7f5dba7SAnatoly Burakov 761c7f5dba7SAnatoly Burakov param->addr = addr; 762c7f5dba7SAnatoly Burakov param->len = mem_sz; 763c7f5dba7SAnatoly Burakov param->pgsz = cur_pgsz; 764c7f5dba7SAnatoly Burakov param->iova_table = iovas; 765c7f5dba7SAnatoly Burakov param->iova_table_len = n_pages; 766c7f5dba7SAnatoly Burakov 767c7f5dba7SAnatoly Burakov return 0; 768c7f5dba7SAnatoly Burakov fail: 769c7f5dba7SAnatoly Burakov if (iovas) 770c7f5dba7SAnatoly Burakov free(iovas); 771c7f5dba7SAnatoly Burakov if (addr) 772c7f5dba7SAnatoly Burakov munmap(addr, mem_sz); 773c7f5dba7SAnatoly Burakov 774c7f5dba7SAnatoly Burakov return -1; 775c7f5dba7SAnatoly Burakov } 776c7f5dba7SAnatoly Burakov 777c7f5dba7SAnatoly Burakov static int 778c7f5dba7SAnatoly Burakov setup_extmem(uint32_t nb_mbufs, uint32_t mbuf_sz, bool huge) 779c7f5dba7SAnatoly Burakov { 780c7f5dba7SAnatoly Burakov struct extmem_param param; 781c7f5dba7SAnatoly Burakov int socket_id, ret; 782c7f5dba7SAnatoly Burakov 783c7f5dba7SAnatoly Burakov memset(¶m, 0, sizeof(param)); 784c7f5dba7SAnatoly Burakov 785c7f5dba7SAnatoly Burakov /* check if our heap exists */ 786c7f5dba7SAnatoly Burakov socket_id = rte_malloc_heap_get_socket(EXTMEM_HEAP_NAME); 787c7f5dba7SAnatoly Burakov if (socket_id < 0) { 788c7f5dba7SAnatoly Burakov /* create our heap */ 789c7f5dba7SAnatoly Burakov ret = rte_malloc_heap_create(EXTMEM_HEAP_NAME); 790c7f5dba7SAnatoly Burakov if (ret < 0) { 791c7f5dba7SAnatoly Burakov TESTPMD_LOG(ERR, "Cannot create heap\n"); 792c7f5dba7SAnatoly Burakov return -1; 793c7f5dba7SAnatoly Burakov } 794c7f5dba7SAnatoly Burakov } 795c7f5dba7SAnatoly Burakov 796c7f5dba7SAnatoly Burakov ret = create_extmem(nb_mbufs, mbuf_sz, ¶m, huge); 797c7f5dba7SAnatoly Burakov if (ret < 0) { 798c7f5dba7SAnatoly Burakov TESTPMD_LOG(ERR, "Cannot create memory area\n"); 799c7f5dba7SAnatoly Burakov return -1; 800c7f5dba7SAnatoly Burakov } 801c7f5dba7SAnatoly Burakov 802c7f5dba7SAnatoly Burakov /* we now have a valid memory area, so add it to heap */ 803c7f5dba7SAnatoly Burakov ret = rte_malloc_heap_memory_add(EXTMEM_HEAP_NAME, 804c7f5dba7SAnatoly Burakov param.addr, param.len, param.iova_table, 805c7f5dba7SAnatoly Burakov param.iova_table_len, param.pgsz); 806c7f5dba7SAnatoly Burakov 807c7f5dba7SAnatoly Burakov /* when using VFIO, memory is automatically mapped for DMA by EAL */ 808c7f5dba7SAnatoly Burakov 809c7f5dba7SAnatoly Burakov /* not needed any more */ 810c7f5dba7SAnatoly Burakov free(param.iova_table); 811c7f5dba7SAnatoly Burakov 812c7f5dba7SAnatoly Burakov if (ret < 0) { 813c7f5dba7SAnatoly Burakov TESTPMD_LOG(ERR, "Cannot add memory to heap\n"); 814c7f5dba7SAnatoly Burakov munmap(param.addr, param.len); 815c7f5dba7SAnatoly Burakov return -1; 816c7f5dba7SAnatoly Burakov } 817c7f5dba7SAnatoly Burakov 818c7f5dba7SAnatoly Burakov /* success */ 819c7f5dba7SAnatoly Burakov 820c7f5dba7SAnatoly Burakov TESTPMD_LOG(DEBUG, "Allocated %zuMB of external memory\n", 821c7f5dba7SAnatoly Burakov param.len >> 20); 822c7f5dba7SAnatoly Burakov 823c7f5dba7SAnatoly Burakov return 0; 824c7f5dba7SAnatoly Burakov } 825c7f5dba7SAnatoly Burakov 826af75078fSIntel /* 827af75078fSIntel * Configuration initialisation done once at init time. 828af75078fSIntel */ 829af75078fSIntel static void 830af75078fSIntel mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf, 831af75078fSIntel unsigned int socket_id) 832af75078fSIntel { 833af75078fSIntel char pool_name[RTE_MEMPOOL_NAMESIZE]; 834bece7b6cSChristian Ehrhardt struct rte_mempool *rte_mp = NULL; 835af75078fSIntel uint32_t mb_size; 836af75078fSIntel 837dfb03bbeSOlivier Matz mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size; 838af75078fSIntel mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name)); 839148f963fSBruce Richardson 840285fd101SOlivier Matz TESTPMD_LOG(INFO, 841d1eb542eSOlivier Matz "create a new mbuf pool <%s>: n=%u, size=%u, socket=%u\n", 842d1eb542eSOlivier Matz pool_name, nb_mbuf, mbuf_seg_size, socket_id); 843d1eb542eSOlivier Matz 844c7f5dba7SAnatoly Burakov switch (mp_alloc_type) { 845c7f5dba7SAnatoly Burakov case MP_ALLOC_NATIVE: 846c7f5dba7SAnatoly Burakov { 847c7f5dba7SAnatoly Burakov /* wrapper to rte_mempool_create() */ 848c7f5dba7SAnatoly Burakov TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n", 849c7f5dba7SAnatoly Burakov rte_mbuf_best_mempool_ops()); 850c7f5dba7SAnatoly Burakov rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf, 851c7f5dba7SAnatoly Burakov mb_mempool_cache, 0, mbuf_seg_size, socket_id); 852c7f5dba7SAnatoly Burakov break; 853c7f5dba7SAnatoly Burakov } 854c7f5dba7SAnatoly Burakov case MP_ALLOC_ANON: 855c7f5dba7SAnatoly Burakov { 856b19a0c75SOlivier Matz rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf, 857c7f5dba7SAnatoly Burakov mb_size, (unsigned int) mb_mempool_cache, 858148f963fSBruce Richardson sizeof(struct rte_pktmbuf_pool_private), 859148f963fSBruce Richardson socket_id, 0); 86024427bb9SOlivier Matz if (rte_mp == NULL) 86124427bb9SOlivier Matz goto err; 862b19a0c75SOlivier Matz 863b19a0c75SOlivier Matz if (rte_mempool_populate_anon(rte_mp) == 0) { 864b19a0c75SOlivier Matz rte_mempool_free(rte_mp); 865b19a0c75SOlivier Matz rte_mp = NULL; 86624427bb9SOlivier Matz goto err; 867b19a0c75SOlivier Matz } 868b19a0c75SOlivier Matz rte_pktmbuf_pool_init(rte_mp, NULL); 869b19a0c75SOlivier Matz rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL); 870c7f5dba7SAnatoly Burakov break; 871c7f5dba7SAnatoly Burakov } 872c7f5dba7SAnatoly Burakov case MP_ALLOC_XMEM: 873c7f5dba7SAnatoly Burakov case MP_ALLOC_XMEM_HUGE: 874c7f5dba7SAnatoly Burakov { 875c7f5dba7SAnatoly Burakov int heap_socket; 876c7f5dba7SAnatoly Burakov bool huge = mp_alloc_type == MP_ALLOC_XMEM_HUGE; 877c7f5dba7SAnatoly Burakov 878c7f5dba7SAnatoly Burakov if (setup_extmem(nb_mbuf, mbuf_seg_size, huge) < 0) 879c7f5dba7SAnatoly Burakov rte_exit(EXIT_FAILURE, "Could not create external memory\n"); 880c7f5dba7SAnatoly Burakov 881c7f5dba7SAnatoly Burakov heap_socket = 882c7f5dba7SAnatoly Burakov rte_malloc_heap_get_socket(EXTMEM_HEAP_NAME); 883c7f5dba7SAnatoly Burakov if (heap_socket < 0) 884c7f5dba7SAnatoly Burakov rte_exit(EXIT_FAILURE, "Could not get external memory socket ID\n"); 885c7f5dba7SAnatoly Burakov 8860e798567SPavan Nikhilesh TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n", 8870e798567SPavan Nikhilesh rte_mbuf_best_mempool_ops()); 888ea0c20eaSOlivier Matz rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf, 889c7f5dba7SAnatoly Burakov mb_mempool_cache, 0, mbuf_seg_size, 890c7f5dba7SAnatoly Burakov heap_socket); 891c7f5dba7SAnatoly Burakov break; 892c7f5dba7SAnatoly Burakov } 893c7f5dba7SAnatoly Burakov default: 894c7f5dba7SAnatoly Burakov { 895c7f5dba7SAnatoly Burakov rte_exit(EXIT_FAILURE, "Invalid mempool creation mode\n"); 896c7f5dba7SAnatoly Burakov } 897bece7b6cSChristian Ehrhardt } 898148f963fSBruce Richardson 89924427bb9SOlivier Matz err: 900af75078fSIntel if (rte_mp == NULL) { 901d1eb542eSOlivier Matz rte_exit(EXIT_FAILURE, 902d1eb542eSOlivier Matz "Creation of mbuf pool for socket %u failed: %s\n", 903d1eb542eSOlivier Matz socket_id, rte_strerror(rte_errno)); 904148f963fSBruce Richardson } else if (verbose_level > 0) { 905591a9d79SStephen Hemminger rte_mempool_dump(stdout, rte_mp); 906af75078fSIntel } 907af75078fSIntel } 908af75078fSIntel 90920a0286fSLiu Xiaofeng /* 91020a0286fSLiu Xiaofeng * Check given socket id is valid or not with NUMA mode, 91120a0286fSLiu Xiaofeng * if valid, return 0, else return -1 91220a0286fSLiu Xiaofeng */ 91320a0286fSLiu Xiaofeng static int 91420a0286fSLiu Xiaofeng check_socket_id(const unsigned int socket_id) 91520a0286fSLiu Xiaofeng { 91620a0286fSLiu Xiaofeng static int warning_once = 0; 91720a0286fSLiu Xiaofeng 918c9cafcc8SShahaf Shuler if (new_socket_id(socket_id)) { 91920a0286fSLiu Xiaofeng if (!warning_once && numa_support) 92020a0286fSLiu Xiaofeng printf("Warning: NUMA should be configured manually by" 92120a0286fSLiu Xiaofeng " using --port-numa-config and" 92220a0286fSLiu Xiaofeng " --ring-numa-config parameters along with" 92320a0286fSLiu Xiaofeng " --numa.\n"); 92420a0286fSLiu Xiaofeng warning_once = 1; 92520a0286fSLiu Xiaofeng return -1; 92620a0286fSLiu Xiaofeng } 92720a0286fSLiu Xiaofeng return 0; 92820a0286fSLiu Xiaofeng } 92920a0286fSLiu Xiaofeng 9303f7311baSWei Dai /* 9313f7311baSWei Dai * Get the allowed maximum number of RX queues. 9323f7311baSWei Dai * *pid return the port id which has minimal value of 9333f7311baSWei Dai * max_rx_queues in all ports. 9343f7311baSWei Dai */ 9353f7311baSWei Dai queueid_t 9363f7311baSWei Dai get_allowed_max_nb_rxq(portid_t *pid) 9373f7311baSWei Dai { 9383f7311baSWei Dai queueid_t allowed_max_rxq = MAX_QUEUE_ID; 9393f7311baSWei Dai portid_t pi; 9403f7311baSWei Dai struct rte_eth_dev_info dev_info; 9413f7311baSWei Dai 9423f7311baSWei Dai RTE_ETH_FOREACH_DEV(pi) { 9433f7311baSWei Dai rte_eth_dev_info_get(pi, &dev_info); 9443f7311baSWei Dai if (dev_info.max_rx_queues < allowed_max_rxq) { 9453f7311baSWei Dai allowed_max_rxq = dev_info.max_rx_queues; 9463f7311baSWei Dai *pid = pi; 9473f7311baSWei Dai } 9483f7311baSWei Dai } 9493f7311baSWei Dai return allowed_max_rxq; 9503f7311baSWei Dai } 9513f7311baSWei Dai 9523f7311baSWei Dai /* 9533f7311baSWei Dai * Check input rxq is valid or not. 9543f7311baSWei Dai * If input rxq is not greater than any of maximum number 9553f7311baSWei Dai * of RX queues of all ports, it is valid. 9563f7311baSWei Dai * if valid, return 0, else return -1 9573f7311baSWei Dai */ 9583f7311baSWei Dai int 9593f7311baSWei Dai check_nb_rxq(queueid_t rxq) 9603f7311baSWei Dai { 9613f7311baSWei Dai queueid_t allowed_max_rxq; 9623f7311baSWei Dai portid_t pid = 0; 9633f7311baSWei Dai 9643f7311baSWei Dai allowed_max_rxq = get_allowed_max_nb_rxq(&pid); 9653f7311baSWei Dai if (rxq > allowed_max_rxq) { 9663f7311baSWei Dai printf("Fail: input rxq (%u) can't be greater " 9673f7311baSWei Dai "than max_rx_queues (%u) of port %u\n", 9683f7311baSWei Dai rxq, 9693f7311baSWei Dai allowed_max_rxq, 9703f7311baSWei Dai pid); 9713f7311baSWei Dai return -1; 9723f7311baSWei Dai } 9733f7311baSWei Dai return 0; 9743f7311baSWei Dai } 9753f7311baSWei Dai 97636db4f6cSWei Dai /* 97736db4f6cSWei Dai * Get the allowed maximum number of TX queues. 97836db4f6cSWei Dai * *pid return the port id which has minimal value of 97936db4f6cSWei Dai * max_tx_queues in all ports. 98036db4f6cSWei Dai */ 98136db4f6cSWei Dai queueid_t 98236db4f6cSWei Dai get_allowed_max_nb_txq(portid_t *pid) 98336db4f6cSWei Dai { 98436db4f6cSWei Dai queueid_t allowed_max_txq = MAX_QUEUE_ID; 98536db4f6cSWei Dai portid_t pi; 98636db4f6cSWei Dai struct rte_eth_dev_info dev_info; 98736db4f6cSWei Dai 98836db4f6cSWei Dai RTE_ETH_FOREACH_DEV(pi) { 98936db4f6cSWei Dai rte_eth_dev_info_get(pi, &dev_info); 99036db4f6cSWei Dai if (dev_info.max_tx_queues < allowed_max_txq) { 99136db4f6cSWei Dai allowed_max_txq = dev_info.max_tx_queues; 99236db4f6cSWei Dai *pid = pi; 99336db4f6cSWei Dai } 99436db4f6cSWei Dai } 99536db4f6cSWei Dai return allowed_max_txq; 99636db4f6cSWei Dai } 99736db4f6cSWei Dai 99836db4f6cSWei Dai /* 99936db4f6cSWei Dai * Check input txq is valid or not. 100036db4f6cSWei Dai * If input txq is not greater than any of maximum number 100136db4f6cSWei Dai * of TX queues of all ports, it is valid. 100236db4f6cSWei Dai * if valid, return 0, else return -1 100336db4f6cSWei Dai */ 100436db4f6cSWei Dai int 100536db4f6cSWei Dai check_nb_txq(queueid_t txq) 100636db4f6cSWei Dai { 100736db4f6cSWei Dai queueid_t allowed_max_txq; 100836db4f6cSWei Dai portid_t pid = 0; 100936db4f6cSWei Dai 101036db4f6cSWei Dai allowed_max_txq = get_allowed_max_nb_txq(&pid); 101136db4f6cSWei Dai if (txq > allowed_max_txq) { 101236db4f6cSWei Dai printf("Fail: input txq (%u) can't be greater " 101336db4f6cSWei Dai "than max_tx_queues (%u) of port %u\n", 101436db4f6cSWei Dai txq, 101536db4f6cSWei Dai allowed_max_txq, 101636db4f6cSWei Dai pid); 101736db4f6cSWei Dai return -1; 101836db4f6cSWei Dai } 101936db4f6cSWei Dai return 0; 102036db4f6cSWei Dai } 102136db4f6cSWei Dai 1022af75078fSIntel static void 1023af75078fSIntel init_config(void) 1024af75078fSIntel { 1025ce8d5614SIntel portid_t pid; 1026af75078fSIntel struct rte_port *port; 1027af75078fSIntel struct rte_mempool *mbp; 1028af75078fSIntel unsigned int nb_mbuf_per_pool; 1029af75078fSIntel lcoreid_t lc_id; 10307acf894dSStephen Hurd uint8_t port_per_socket[RTE_MAX_NUMA_NODES]; 1031b7091f1dSJiayu Hu struct rte_gro_param gro_param; 103252f38a20SJiayu Hu uint32_t gso_types; 1033c73a9071SWei Dai int k; 1034af75078fSIntel 10357acf894dSStephen Hurd memset(port_per_socket,0,RTE_MAX_NUMA_NODES); 1036487f9a59SYulong Pei 1037af75078fSIntel /* Configuration of logical cores. */ 1038af75078fSIntel fwd_lcores = rte_zmalloc("testpmd: fwd_lcores", 1039af75078fSIntel sizeof(struct fwd_lcore *) * nb_lcores, 1040fdf20fa7SSergio Gonzalez Monroy RTE_CACHE_LINE_SIZE); 1041af75078fSIntel if (fwd_lcores == NULL) { 1042ce8d5614SIntel rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) " 1043ce8d5614SIntel "failed\n", nb_lcores); 1044af75078fSIntel } 1045af75078fSIntel for (lc_id = 0; lc_id < nb_lcores; lc_id++) { 1046af75078fSIntel fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore", 1047af75078fSIntel sizeof(struct fwd_lcore), 1048fdf20fa7SSergio Gonzalez Monroy RTE_CACHE_LINE_SIZE); 1049af75078fSIntel if (fwd_lcores[lc_id] == NULL) { 1050ce8d5614SIntel rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) " 1051ce8d5614SIntel "failed\n"); 1052af75078fSIntel } 1053af75078fSIntel fwd_lcores[lc_id]->cpuid_idx = lc_id; 1054af75078fSIntel } 1055af75078fSIntel 10567d89b261SGaetan Rivet RTE_ETH_FOREACH_DEV(pid) { 1057ce8d5614SIntel port = &ports[pid]; 10588b9bd0efSMoti Haimovsky /* Apply default TxRx configuration for all ports */ 1059fd8c20aaSShahaf Shuler port->dev_conf.txmode = tx_mode; 1060384161e0SShahaf Shuler port->dev_conf.rxmode = rx_mode; 1061ce8d5614SIntel rte_eth_dev_info_get(pid, &port->dev_info); 10627c45f6c0SFerruh Yigit 106307e5f7bdSShahaf Shuler if (!(port->dev_info.tx_offload_capa & 106407e5f7bdSShahaf Shuler DEV_TX_OFFLOAD_MBUF_FAST_FREE)) 106507e5f7bdSShahaf Shuler port->dev_conf.txmode.offloads &= 106607e5f7bdSShahaf Shuler ~DEV_TX_OFFLOAD_MBUF_FAST_FREE; 1067c18feafaSDekel Peled if (!(port->dev_info.tx_offload_capa & 1068c18feafaSDekel Peled DEV_TX_OFFLOAD_MATCH_METADATA)) 1069c18feafaSDekel Peled port->dev_conf.txmode.offloads &= 1070c18feafaSDekel Peled ~DEV_TX_OFFLOAD_MATCH_METADATA; 1071b6ea6408SIntel if (numa_support) { 1072b6ea6408SIntel if (port_numa[pid] != NUMA_NO_CONFIG) 1073b6ea6408SIntel port_per_socket[port_numa[pid]]++; 1074b6ea6408SIntel else { 1075b6ea6408SIntel uint32_t socket_id = rte_eth_dev_socket_id(pid); 107620a0286fSLiu Xiaofeng 107729841336SPhil Yang /* 107829841336SPhil Yang * if socket_id is invalid, 107929841336SPhil Yang * set to the first available socket. 108029841336SPhil Yang */ 108120a0286fSLiu Xiaofeng if (check_socket_id(socket_id) < 0) 108229841336SPhil Yang socket_id = socket_ids[0]; 1083b6ea6408SIntel port_per_socket[socket_id]++; 1084b6ea6408SIntel } 1085b6ea6408SIntel } 1086b6ea6408SIntel 1087c73a9071SWei Dai /* Apply Rx offloads configuration */ 1088c73a9071SWei Dai for (k = 0; k < port->dev_info.max_rx_queues; k++) 1089c73a9071SWei Dai port->rx_conf[k].offloads = 1090c73a9071SWei Dai port->dev_conf.rxmode.offloads; 1091c73a9071SWei Dai /* Apply Tx offloads configuration */ 1092c73a9071SWei Dai for (k = 0; k < port->dev_info.max_tx_queues; k++) 1093c73a9071SWei Dai port->tx_conf[k].offloads = 1094c73a9071SWei Dai port->dev_conf.txmode.offloads; 1095c73a9071SWei Dai 1096ce8d5614SIntel /* set flag to initialize port/queue */ 1097ce8d5614SIntel port->need_reconfig = 1; 1098ce8d5614SIntel port->need_reconfig_queues = 1; 1099c18feafaSDekel Peled port->tx_metadata = 0; 1100ce8d5614SIntel } 1101ce8d5614SIntel 11023ab64341SOlivier Matz /* 11033ab64341SOlivier Matz * Create pools of mbuf. 11043ab64341SOlivier Matz * If NUMA support is disabled, create a single pool of mbuf in 11053ab64341SOlivier Matz * socket 0 memory by default. 11063ab64341SOlivier Matz * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1. 11073ab64341SOlivier Matz * 11083ab64341SOlivier Matz * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and 11093ab64341SOlivier Matz * nb_txd can be configured at run time. 11103ab64341SOlivier Matz */ 11113ab64341SOlivier Matz if (param_total_num_mbufs) 11123ab64341SOlivier Matz nb_mbuf_per_pool = param_total_num_mbufs; 11133ab64341SOlivier Matz else { 11143ab64341SOlivier Matz nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX + 11153ab64341SOlivier Matz (nb_lcores * mb_mempool_cache) + 11163ab64341SOlivier Matz RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST; 11173ab64341SOlivier Matz nb_mbuf_per_pool *= RTE_MAX_ETHPORTS; 11183ab64341SOlivier Matz } 11193ab64341SOlivier Matz 1120b6ea6408SIntel if (numa_support) { 1121b6ea6408SIntel uint8_t i; 1122ce8d5614SIntel 1123c9cafcc8SShahaf Shuler for (i = 0; i < num_sockets; i++) 1124c9cafcc8SShahaf Shuler mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 1125c9cafcc8SShahaf Shuler socket_ids[i]); 11263ab64341SOlivier Matz } else { 11273ab64341SOlivier Matz if (socket_num == UMA_NO_CONFIG) 11283ab64341SOlivier Matz mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0); 11293ab64341SOlivier Matz else 11303ab64341SOlivier Matz mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 11313ab64341SOlivier Matz socket_num); 11323ab64341SOlivier Matz } 1133b6ea6408SIntel 1134b6ea6408SIntel init_port_config(); 11355886ae07SAdrien Mazarguil 113652f38a20SJiayu Hu gso_types = DEV_TX_OFFLOAD_TCP_TSO | DEV_TX_OFFLOAD_VXLAN_TNL_TSO | 1137aaacd052SJiayu Hu DEV_TX_OFFLOAD_GRE_TNL_TSO | DEV_TX_OFFLOAD_UDP_TSO; 11385886ae07SAdrien Mazarguil /* 11395886ae07SAdrien Mazarguil * Records which Mbuf pool to use by each logical core, if needed. 11405886ae07SAdrien Mazarguil */ 11415886ae07SAdrien Mazarguil for (lc_id = 0; lc_id < nb_lcores; lc_id++) { 11428fd8bebcSAdrien Mazarguil mbp = mbuf_pool_find( 11438fd8bebcSAdrien Mazarguil rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id])); 11448fd8bebcSAdrien Mazarguil 11455886ae07SAdrien Mazarguil if (mbp == NULL) 11465886ae07SAdrien Mazarguil mbp = mbuf_pool_find(0); 11475886ae07SAdrien Mazarguil fwd_lcores[lc_id]->mbp = mbp; 114852f38a20SJiayu Hu /* initialize GSO context */ 114952f38a20SJiayu Hu fwd_lcores[lc_id]->gso_ctx.direct_pool = mbp; 115052f38a20SJiayu Hu fwd_lcores[lc_id]->gso_ctx.indirect_pool = mbp; 115152f38a20SJiayu Hu fwd_lcores[lc_id]->gso_ctx.gso_types = gso_types; 115252f38a20SJiayu Hu fwd_lcores[lc_id]->gso_ctx.gso_size = ETHER_MAX_LEN - 115352f38a20SJiayu Hu ETHER_CRC_LEN; 115452f38a20SJiayu Hu fwd_lcores[lc_id]->gso_ctx.flag = 0; 11555886ae07SAdrien Mazarguil } 11565886ae07SAdrien Mazarguil 1157ce8d5614SIntel /* Configuration of packet forwarding streams. */ 1158ce8d5614SIntel if (init_fwd_streams() < 0) 1159ce8d5614SIntel rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n"); 11600c0db76fSBernard Iremonger 11610c0db76fSBernard Iremonger fwd_config_setup(); 1162b7091f1dSJiayu Hu 1163b7091f1dSJiayu Hu /* create a gro context for each lcore */ 1164b7091f1dSJiayu Hu gro_param.gro_types = RTE_GRO_TCP_IPV4; 1165b7091f1dSJiayu Hu gro_param.max_flow_num = GRO_MAX_FLUSH_CYCLES; 1166b7091f1dSJiayu Hu gro_param.max_item_per_flow = MAX_PKT_BURST; 1167b7091f1dSJiayu Hu for (lc_id = 0; lc_id < nb_lcores; lc_id++) { 1168b7091f1dSJiayu Hu gro_param.socket_id = rte_lcore_to_socket_id( 1169b7091f1dSJiayu Hu fwd_lcores_cpuids[lc_id]); 1170b7091f1dSJiayu Hu fwd_lcores[lc_id]->gro_ctx = rte_gro_ctx_create(&gro_param); 1171b7091f1dSJiayu Hu if (fwd_lcores[lc_id]->gro_ctx == NULL) { 1172b7091f1dSJiayu Hu rte_exit(EXIT_FAILURE, 1173b7091f1dSJiayu Hu "rte_gro_ctx_create() failed\n"); 1174b7091f1dSJiayu Hu } 1175b7091f1dSJiayu Hu } 11760ad778b3SJasvinder Singh 11770ad778b3SJasvinder Singh #if defined RTE_LIBRTE_PMD_SOFTNIC 11780ad778b3SJasvinder Singh if (strcmp(cur_fwd_eng->fwd_mode_name, "softnic") == 0) { 11790ad778b3SJasvinder Singh RTE_ETH_FOREACH_DEV(pid) { 11800ad778b3SJasvinder Singh port = &ports[pid]; 11810ad778b3SJasvinder Singh const char *driver = port->dev_info.driver_name; 11820ad778b3SJasvinder Singh 11830ad778b3SJasvinder Singh if (strcmp(driver, "net_softnic") == 0) 11840ad778b3SJasvinder Singh port->softport.fwd_lcore_arg = fwd_lcores; 11850ad778b3SJasvinder Singh } 11860ad778b3SJasvinder Singh } 11870ad778b3SJasvinder Singh #endif 11880ad778b3SJasvinder Singh 1189ce8d5614SIntel } 1190ce8d5614SIntel 11912950a769SDeclan Doherty 11922950a769SDeclan Doherty void 1193a21d5a4bSDeclan Doherty reconfig(portid_t new_port_id, unsigned socket_id) 11942950a769SDeclan Doherty { 11952950a769SDeclan Doherty struct rte_port *port; 11962950a769SDeclan Doherty 11972950a769SDeclan Doherty /* Reconfiguration of Ethernet ports. */ 11982950a769SDeclan Doherty port = &ports[new_port_id]; 11992950a769SDeclan Doherty rte_eth_dev_info_get(new_port_id, &port->dev_info); 12002950a769SDeclan Doherty 12012950a769SDeclan Doherty /* set flag to initialize port/queue */ 12022950a769SDeclan Doherty port->need_reconfig = 1; 12032950a769SDeclan Doherty port->need_reconfig_queues = 1; 1204a21d5a4bSDeclan Doherty port->socket_id = socket_id; 12052950a769SDeclan Doherty 12062950a769SDeclan Doherty init_port_config(); 12072950a769SDeclan Doherty } 12082950a769SDeclan Doherty 12092950a769SDeclan Doherty 1210ce8d5614SIntel int 1211ce8d5614SIntel init_fwd_streams(void) 1212ce8d5614SIntel { 1213ce8d5614SIntel portid_t pid; 1214ce8d5614SIntel struct rte_port *port; 1215ce8d5614SIntel streamid_t sm_id, nb_fwd_streams_new; 12165a8fb55cSReshma Pattan queueid_t q; 1217ce8d5614SIntel 1218ce8d5614SIntel /* set socket id according to numa or not */ 12197d89b261SGaetan Rivet RTE_ETH_FOREACH_DEV(pid) { 1220ce8d5614SIntel port = &ports[pid]; 1221ce8d5614SIntel if (nb_rxq > port->dev_info.max_rx_queues) { 1222ce8d5614SIntel printf("Fail: nb_rxq(%d) is greater than " 1223ce8d5614SIntel "max_rx_queues(%d)\n", nb_rxq, 1224ce8d5614SIntel port->dev_info.max_rx_queues); 1225ce8d5614SIntel return -1; 1226ce8d5614SIntel } 1227ce8d5614SIntel if (nb_txq > port->dev_info.max_tx_queues) { 1228ce8d5614SIntel printf("Fail: nb_txq(%d) is greater than " 1229ce8d5614SIntel "max_tx_queues(%d)\n", nb_txq, 1230ce8d5614SIntel port->dev_info.max_tx_queues); 1231ce8d5614SIntel return -1; 1232ce8d5614SIntel } 123320a0286fSLiu Xiaofeng if (numa_support) { 123420a0286fSLiu Xiaofeng if (port_numa[pid] != NUMA_NO_CONFIG) 123520a0286fSLiu Xiaofeng port->socket_id = port_numa[pid]; 123620a0286fSLiu Xiaofeng else { 1237b6ea6408SIntel port->socket_id = rte_eth_dev_socket_id(pid); 123820a0286fSLiu Xiaofeng 123929841336SPhil Yang /* 124029841336SPhil Yang * if socket_id is invalid, 124129841336SPhil Yang * set to the first available socket. 124229841336SPhil Yang */ 124320a0286fSLiu Xiaofeng if (check_socket_id(port->socket_id) < 0) 124429841336SPhil Yang port->socket_id = socket_ids[0]; 124520a0286fSLiu Xiaofeng } 124620a0286fSLiu Xiaofeng } 1247b6ea6408SIntel else { 1248b6ea6408SIntel if (socket_num == UMA_NO_CONFIG) 1249af75078fSIntel port->socket_id = 0; 1250b6ea6408SIntel else 1251b6ea6408SIntel port->socket_id = socket_num; 1252b6ea6408SIntel } 1253af75078fSIntel } 1254af75078fSIntel 12555a8fb55cSReshma Pattan q = RTE_MAX(nb_rxq, nb_txq); 12565a8fb55cSReshma Pattan if (q == 0) { 12575a8fb55cSReshma Pattan printf("Fail: Cannot allocate fwd streams as number of queues is 0\n"); 12585a8fb55cSReshma Pattan return -1; 12595a8fb55cSReshma Pattan } 12605a8fb55cSReshma Pattan nb_fwd_streams_new = (streamid_t)(nb_ports * q); 1261ce8d5614SIntel if (nb_fwd_streams_new == nb_fwd_streams) 1262ce8d5614SIntel return 0; 1263ce8d5614SIntel /* clear the old */ 1264ce8d5614SIntel if (fwd_streams != NULL) { 1265ce8d5614SIntel for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) { 1266ce8d5614SIntel if (fwd_streams[sm_id] == NULL) 1267ce8d5614SIntel continue; 1268ce8d5614SIntel rte_free(fwd_streams[sm_id]); 1269ce8d5614SIntel fwd_streams[sm_id] = NULL; 1270af75078fSIntel } 1271ce8d5614SIntel rte_free(fwd_streams); 1272ce8d5614SIntel fwd_streams = NULL; 1273ce8d5614SIntel } 1274ce8d5614SIntel 1275ce8d5614SIntel /* init new */ 1276ce8d5614SIntel nb_fwd_streams = nb_fwd_streams_new; 12771f84c469SMatan Azrad if (nb_fwd_streams) { 1278ce8d5614SIntel fwd_streams = rte_zmalloc("testpmd: fwd_streams", 12791f84c469SMatan Azrad sizeof(struct fwd_stream *) * nb_fwd_streams, 12801f84c469SMatan Azrad RTE_CACHE_LINE_SIZE); 1281ce8d5614SIntel if (fwd_streams == NULL) 12821f84c469SMatan Azrad rte_exit(EXIT_FAILURE, "rte_zmalloc(%d" 12831f84c469SMatan Azrad " (struct fwd_stream *)) failed\n", 12841f84c469SMatan Azrad nb_fwd_streams); 1285ce8d5614SIntel 1286af75078fSIntel for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) { 12871f84c469SMatan Azrad fwd_streams[sm_id] = rte_zmalloc("testpmd:" 12881f84c469SMatan Azrad " struct fwd_stream", sizeof(struct fwd_stream), 12891f84c469SMatan Azrad RTE_CACHE_LINE_SIZE); 1290ce8d5614SIntel if (fwd_streams[sm_id] == NULL) 12911f84c469SMatan Azrad rte_exit(EXIT_FAILURE, "rte_zmalloc" 12921f84c469SMatan Azrad "(struct fwd_stream) failed\n"); 12931f84c469SMatan Azrad } 1294af75078fSIntel } 1295ce8d5614SIntel 1296ce8d5614SIntel return 0; 1297af75078fSIntel } 1298af75078fSIntel 1299af75078fSIntel #ifdef RTE_TEST_PMD_RECORD_BURST_STATS 1300af75078fSIntel static void 1301af75078fSIntel pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs) 1302af75078fSIntel { 1303af75078fSIntel unsigned int total_burst; 1304af75078fSIntel unsigned int nb_burst; 1305af75078fSIntel unsigned int burst_stats[3]; 1306af75078fSIntel uint16_t pktnb_stats[3]; 1307af75078fSIntel uint16_t nb_pkt; 1308af75078fSIntel int burst_percent[3]; 1309af75078fSIntel 1310af75078fSIntel /* 1311af75078fSIntel * First compute the total number of packet bursts and the 1312af75078fSIntel * two highest numbers of bursts of the same number of packets. 1313af75078fSIntel */ 1314af75078fSIntel total_burst = 0; 1315af75078fSIntel burst_stats[0] = burst_stats[1] = burst_stats[2] = 0; 1316af75078fSIntel pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0; 1317af75078fSIntel for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) { 1318af75078fSIntel nb_burst = pbs->pkt_burst_spread[nb_pkt]; 1319af75078fSIntel if (nb_burst == 0) 1320af75078fSIntel continue; 1321af75078fSIntel total_burst += nb_burst; 1322af75078fSIntel if (nb_burst > burst_stats[0]) { 1323af75078fSIntel burst_stats[1] = burst_stats[0]; 1324af75078fSIntel pktnb_stats[1] = pktnb_stats[0]; 1325af75078fSIntel burst_stats[0] = nb_burst; 1326af75078fSIntel pktnb_stats[0] = nb_pkt; 1327fe613657SDaniel Shelepov } else if (nb_burst > burst_stats[1]) { 1328fe613657SDaniel Shelepov burst_stats[1] = nb_burst; 1329fe613657SDaniel Shelepov pktnb_stats[1] = nb_pkt; 1330af75078fSIntel } 1331af75078fSIntel } 1332af75078fSIntel if (total_burst == 0) 1333af75078fSIntel return; 1334af75078fSIntel burst_percent[0] = (burst_stats[0] * 100) / total_burst; 1335af75078fSIntel printf(" %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst, 1336af75078fSIntel burst_percent[0], (int) pktnb_stats[0]); 1337af75078fSIntel if (burst_stats[0] == total_burst) { 1338af75078fSIntel printf("]\n"); 1339af75078fSIntel return; 1340af75078fSIntel } 1341af75078fSIntel if (burst_stats[0] + burst_stats[1] == total_burst) { 1342af75078fSIntel printf(" + %d%% of %d pkts]\n", 1343af75078fSIntel 100 - burst_percent[0], pktnb_stats[1]); 1344af75078fSIntel return; 1345af75078fSIntel } 1346af75078fSIntel burst_percent[1] = (burst_stats[1] * 100) / total_burst; 1347af75078fSIntel burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]); 1348af75078fSIntel if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) { 1349af75078fSIntel printf(" + %d%% of others]\n", 100 - burst_percent[0]); 1350af75078fSIntel return; 1351af75078fSIntel } 1352af75078fSIntel printf(" + %d%% of %d pkts + %d%% of others]\n", 1353af75078fSIntel burst_percent[1], (int) pktnb_stats[1], burst_percent[2]); 1354af75078fSIntel } 1355af75078fSIntel #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */ 1356af75078fSIntel 1357af75078fSIntel static void 1358af75078fSIntel fwd_port_stats_display(portid_t port_id, struct rte_eth_stats *stats) 1359af75078fSIntel { 1360af75078fSIntel struct rte_port *port; 1361013af9b6SIntel uint8_t i; 1362af75078fSIntel 1363af75078fSIntel static const char *fwd_stats_border = "----------------------"; 1364af75078fSIntel 1365af75078fSIntel port = &ports[port_id]; 1366af75078fSIntel printf("\n %s Forward statistics for port %-2d %s\n", 1367af75078fSIntel fwd_stats_border, port_id, fwd_stats_border); 1368013af9b6SIntel 1369013af9b6SIntel if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) { 1370af75078fSIntel printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: " 1371af75078fSIntel "%-"PRIu64"\n", 137270bdb186SIvan Boule stats->ipackets, stats->imissed, 137370bdb186SIvan Boule (uint64_t) (stats->ipackets + stats->imissed)); 1374af75078fSIntel 1375af75078fSIntel if (cur_fwd_eng == &csum_fwd_engine) 137658d475b7SJerin Jacob printf(" Bad-ipcsum: %-14"PRIu64" Bad-l4csum: %-14"PRIu64"Bad-outer-l4csum: %-14"PRIu64"\n", 137758d475b7SJerin Jacob port->rx_bad_ip_csum, port->rx_bad_l4_csum, 137858d475b7SJerin Jacob port->rx_bad_outer_l4_csum); 137986057c99SIgor Ryzhov if ((stats->ierrors + stats->rx_nombuf) > 0) { 1380f72a0fa6SStephen Hemminger printf(" RX-error: %-"PRIu64"\n", stats->ierrors); 138170bdb186SIvan Boule printf(" RX-nombufs: %-14"PRIu64"\n", stats->rx_nombuf); 138270bdb186SIvan Boule } 1383af75078fSIntel 1384af75078fSIntel printf(" TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: " 1385af75078fSIntel "%-"PRIu64"\n", 1386af75078fSIntel stats->opackets, port->tx_dropped, 1387af75078fSIntel (uint64_t) (stats->opackets + port->tx_dropped)); 1388013af9b6SIntel } 1389013af9b6SIntel else { 1390013af9b6SIntel printf(" RX-packets: %14"PRIu64" RX-dropped:%14"PRIu64" RX-total:" 1391013af9b6SIntel "%14"PRIu64"\n", 139270bdb186SIvan Boule stats->ipackets, stats->imissed, 139370bdb186SIvan Boule (uint64_t) (stats->ipackets + stats->imissed)); 1394013af9b6SIntel 1395013af9b6SIntel if (cur_fwd_eng == &csum_fwd_engine) 139658d475b7SJerin Jacob printf(" Bad-ipcsum:%14"PRIu64" Bad-l4csum:%14"PRIu64" Bad-outer-l4csum: %-14"PRIu64"\n", 139758d475b7SJerin Jacob port->rx_bad_ip_csum, port->rx_bad_l4_csum, 139858d475b7SJerin Jacob port->rx_bad_outer_l4_csum); 139986057c99SIgor Ryzhov if ((stats->ierrors + stats->rx_nombuf) > 0) { 1400f72a0fa6SStephen Hemminger printf(" RX-error:%"PRIu64"\n", stats->ierrors); 140170bdb186SIvan Boule printf(" RX-nombufs: %14"PRIu64"\n", 140270bdb186SIvan Boule stats->rx_nombuf); 140370bdb186SIvan Boule } 1404013af9b6SIntel 1405013af9b6SIntel printf(" TX-packets: %14"PRIu64" TX-dropped:%14"PRIu64" TX-total:" 1406013af9b6SIntel "%14"PRIu64"\n", 1407013af9b6SIntel stats->opackets, port->tx_dropped, 1408013af9b6SIntel (uint64_t) (stats->opackets + port->tx_dropped)); 1409013af9b6SIntel } 1410e659b6b4SIvan Boule 1411af75078fSIntel #ifdef RTE_TEST_PMD_RECORD_BURST_STATS 1412af75078fSIntel if (port->rx_stream) 1413013af9b6SIntel pkt_burst_stats_display("RX", 1414013af9b6SIntel &port->rx_stream->rx_burst_stats); 1415af75078fSIntel if (port->tx_stream) 1416013af9b6SIntel pkt_burst_stats_display("TX", 1417013af9b6SIntel &port->tx_stream->tx_burst_stats); 1418af75078fSIntel #endif 1419af75078fSIntel 1420013af9b6SIntel if (port->rx_queue_stats_mapping_enabled) { 1421013af9b6SIntel printf("\n"); 1422013af9b6SIntel for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) { 1423013af9b6SIntel printf(" Stats reg %2d RX-packets:%14"PRIu64 1424013af9b6SIntel " RX-errors:%14"PRIu64 1425013af9b6SIntel " RX-bytes:%14"PRIu64"\n", 1426013af9b6SIntel i, stats->q_ipackets[i], stats->q_errors[i], stats->q_ibytes[i]); 1427013af9b6SIntel } 1428013af9b6SIntel printf("\n"); 1429013af9b6SIntel } 1430013af9b6SIntel if (port->tx_queue_stats_mapping_enabled) { 1431013af9b6SIntel for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) { 1432013af9b6SIntel printf(" Stats reg %2d TX-packets:%14"PRIu64 1433013af9b6SIntel " TX-bytes:%14"PRIu64"\n", 1434013af9b6SIntel i, stats->q_opackets[i], stats->q_obytes[i]); 1435013af9b6SIntel } 1436013af9b6SIntel } 1437013af9b6SIntel 1438af75078fSIntel printf(" %s--------------------------------%s\n", 1439af75078fSIntel fwd_stats_border, fwd_stats_border); 1440af75078fSIntel } 1441af75078fSIntel 1442af75078fSIntel static void 1443af75078fSIntel fwd_stream_stats_display(streamid_t stream_id) 1444af75078fSIntel { 1445af75078fSIntel struct fwd_stream *fs; 1446af75078fSIntel static const char *fwd_top_stats_border = "-------"; 1447af75078fSIntel 1448af75078fSIntel fs = fwd_streams[stream_id]; 1449af75078fSIntel if ((fs->rx_packets == 0) && (fs->tx_packets == 0) && 1450af75078fSIntel (fs->fwd_dropped == 0)) 1451af75078fSIntel return; 1452af75078fSIntel printf("\n %s Forward Stats for RX Port=%2d/Queue=%2d -> " 1453af75078fSIntel "TX Port=%2d/Queue=%2d %s\n", 1454af75078fSIntel fwd_top_stats_border, fs->rx_port, fs->rx_queue, 1455af75078fSIntel fs->tx_port, fs->tx_queue, fwd_top_stats_border); 1456af75078fSIntel printf(" RX-packets: %-14u TX-packets: %-14u TX-dropped: %-14u", 1457af75078fSIntel fs->rx_packets, fs->tx_packets, fs->fwd_dropped); 1458af75078fSIntel 1459af75078fSIntel /* if checksum mode */ 1460af75078fSIntel if (cur_fwd_eng == &csum_fwd_engine) { 1461013af9b6SIntel printf(" RX- bad IP checksum: %-14u Rx- bad L4 checksum: " 146258d475b7SJerin Jacob "%-14u Rx- bad outer L4 checksum: %-14u\n", 146358d475b7SJerin Jacob fs->rx_bad_ip_csum, fs->rx_bad_l4_csum, 146458d475b7SJerin Jacob fs->rx_bad_outer_l4_csum); 1465af75078fSIntel } 1466af75078fSIntel 1467af75078fSIntel #ifdef RTE_TEST_PMD_RECORD_BURST_STATS 1468af75078fSIntel pkt_burst_stats_display("RX", &fs->rx_burst_stats); 1469af75078fSIntel pkt_burst_stats_display("TX", &fs->tx_burst_stats); 1470af75078fSIntel #endif 1471af75078fSIntel } 1472af75078fSIntel 1473af75078fSIntel static void 14747741e4cfSIntel flush_fwd_rx_queues(void) 1475af75078fSIntel { 1476af75078fSIntel struct rte_mbuf *pkts_burst[MAX_PKT_BURST]; 1477af75078fSIntel portid_t rxp; 14787741e4cfSIntel portid_t port_id; 1479af75078fSIntel queueid_t rxq; 1480af75078fSIntel uint16_t nb_rx; 1481af75078fSIntel uint16_t i; 1482af75078fSIntel uint8_t j; 1483f487715fSReshma Pattan uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0; 1484594302c7SJames Poole uint64_t timer_period; 1485f487715fSReshma Pattan 1486f487715fSReshma Pattan /* convert to number of cycles */ 1487594302c7SJames Poole timer_period = rte_get_timer_hz(); /* 1 second timeout */ 1488af75078fSIntel 1489af75078fSIntel for (j = 0; j < 2; j++) { 14907741e4cfSIntel for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) { 1491af75078fSIntel for (rxq = 0; rxq < nb_rxq; rxq++) { 14927741e4cfSIntel port_id = fwd_ports_ids[rxp]; 1493f487715fSReshma Pattan /** 1494f487715fSReshma Pattan * testpmd can stuck in the below do while loop 1495f487715fSReshma Pattan * if rte_eth_rx_burst() always returns nonzero 1496f487715fSReshma Pattan * packets. So timer is added to exit this loop 1497f487715fSReshma Pattan * after 1sec timer expiry. 1498f487715fSReshma Pattan */ 1499f487715fSReshma Pattan prev_tsc = rte_rdtsc(); 1500af75078fSIntel do { 15017741e4cfSIntel nb_rx = rte_eth_rx_burst(port_id, rxq, 1502013af9b6SIntel pkts_burst, MAX_PKT_BURST); 1503af75078fSIntel for (i = 0; i < nb_rx; i++) 1504af75078fSIntel rte_pktmbuf_free(pkts_burst[i]); 1505f487715fSReshma Pattan 1506f487715fSReshma Pattan cur_tsc = rte_rdtsc(); 1507f487715fSReshma Pattan diff_tsc = cur_tsc - prev_tsc; 1508f487715fSReshma Pattan timer_tsc += diff_tsc; 1509f487715fSReshma Pattan } while ((nb_rx > 0) && 1510f487715fSReshma Pattan (timer_tsc < timer_period)); 1511f487715fSReshma Pattan timer_tsc = 0; 1512af75078fSIntel } 1513af75078fSIntel } 1514af75078fSIntel rte_delay_ms(10); /* wait 10 milli-seconds before retrying */ 1515af75078fSIntel } 1516af75078fSIntel } 1517af75078fSIntel 1518af75078fSIntel static void 1519af75078fSIntel run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd) 1520af75078fSIntel { 1521af75078fSIntel struct fwd_stream **fsm; 1522af75078fSIntel streamid_t nb_fs; 1523af75078fSIntel streamid_t sm_id; 15247e4441c8SRemy Horton #ifdef RTE_LIBRTE_BITRATE 15257e4441c8SRemy Horton uint64_t tics_per_1sec; 15267e4441c8SRemy Horton uint64_t tics_datum; 15277e4441c8SRemy Horton uint64_t tics_current; 15284918a357SXiaoyun Li uint16_t i, cnt_ports; 1529af75078fSIntel 15304918a357SXiaoyun Li cnt_ports = nb_ports; 15317e4441c8SRemy Horton tics_datum = rte_rdtsc(); 15327e4441c8SRemy Horton tics_per_1sec = rte_get_timer_hz(); 15337e4441c8SRemy Horton #endif 1534af75078fSIntel fsm = &fwd_streams[fc->stream_idx]; 1535af75078fSIntel nb_fs = fc->stream_nb; 1536af75078fSIntel do { 1537af75078fSIntel for (sm_id = 0; sm_id < nb_fs; sm_id++) 1538af75078fSIntel (*pkt_fwd)(fsm[sm_id]); 15397e4441c8SRemy Horton #ifdef RTE_LIBRTE_BITRATE 1540e25e6c70SRemy Horton if (bitrate_enabled != 0 && 1541e25e6c70SRemy Horton bitrate_lcore_id == rte_lcore_id()) { 15427e4441c8SRemy Horton tics_current = rte_rdtsc(); 15437e4441c8SRemy Horton if (tics_current - tics_datum >= tics_per_1sec) { 15447e4441c8SRemy Horton /* Periodic bitrate calculation */ 15454918a357SXiaoyun Li for (i = 0; i < cnt_ports; i++) 1546e25e6c70SRemy Horton rte_stats_bitrate_calc(bitrate_data, 15474918a357SXiaoyun Li ports_ids[i]); 15487e4441c8SRemy Horton tics_datum = tics_current; 15497e4441c8SRemy Horton } 1550e25e6c70SRemy Horton } 15517e4441c8SRemy Horton #endif 155262d3216dSReshma Pattan #ifdef RTE_LIBRTE_LATENCY_STATS 155365eb1e54SPablo de Lara if (latencystats_enabled != 0 && 155465eb1e54SPablo de Lara latencystats_lcore_id == rte_lcore_id()) 155562d3216dSReshma Pattan rte_latencystats_update(); 155662d3216dSReshma Pattan #endif 155762d3216dSReshma Pattan 1558af75078fSIntel } while (! fc->stopped); 1559af75078fSIntel } 1560af75078fSIntel 1561af75078fSIntel static int 1562af75078fSIntel start_pkt_forward_on_core(void *fwd_arg) 1563af75078fSIntel { 1564af75078fSIntel run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg, 1565af75078fSIntel cur_fwd_config.fwd_eng->packet_fwd); 1566af75078fSIntel return 0; 1567af75078fSIntel } 1568af75078fSIntel 1569af75078fSIntel /* 1570af75078fSIntel * Run the TXONLY packet forwarding engine to send a single burst of packets. 1571af75078fSIntel * Used to start communication flows in network loopback test configurations. 1572af75078fSIntel */ 1573af75078fSIntel static int 1574af75078fSIntel run_one_txonly_burst_on_core(void *fwd_arg) 1575af75078fSIntel { 1576af75078fSIntel struct fwd_lcore *fwd_lc; 1577af75078fSIntel struct fwd_lcore tmp_lcore; 1578af75078fSIntel 1579af75078fSIntel fwd_lc = (struct fwd_lcore *) fwd_arg; 1580af75078fSIntel tmp_lcore = *fwd_lc; 1581af75078fSIntel tmp_lcore.stopped = 1; 1582af75078fSIntel run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd); 1583af75078fSIntel return 0; 1584af75078fSIntel } 1585af75078fSIntel 1586af75078fSIntel /* 1587af75078fSIntel * Launch packet forwarding: 1588af75078fSIntel * - Setup per-port forwarding context. 1589af75078fSIntel * - launch logical cores with their forwarding configuration. 1590af75078fSIntel */ 1591af75078fSIntel static void 1592af75078fSIntel launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore) 1593af75078fSIntel { 1594af75078fSIntel port_fwd_begin_t port_fwd_begin; 1595af75078fSIntel unsigned int i; 1596af75078fSIntel unsigned int lc_id; 1597af75078fSIntel int diag; 1598af75078fSIntel 1599af75078fSIntel port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin; 1600af75078fSIntel if (port_fwd_begin != NULL) { 1601af75078fSIntel for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) 1602af75078fSIntel (*port_fwd_begin)(fwd_ports_ids[i]); 1603af75078fSIntel } 1604af75078fSIntel for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) { 1605af75078fSIntel lc_id = fwd_lcores_cpuids[i]; 1606af75078fSIntel if ((interactive == 0) || (lc_id != rte_lcore_id())) { 1607af75078fSIntel fwd_lcores[i]->stopped = 0; 1608af75078fSIntel diag = rte_eal_remote_launch(pkt_fwd_on_lcore, 1609af75078fSIntel fwd_lcores[i], lc_id); 1610af75078fSIntel if (diag != 0) 1611af75078fSIntel printf("launch lcore %u failed - diag=%d\n", 1612af75078fSIntel lc_id, diag); 1613af75078fSIntel } 1614af75078fSIntel } 1615af75078fSIntel } 1616af75078fSIntel 1617af75078fSIntel /* 161803ce2c53SMatan Azrad * Update the forward ports list. 161903ce2c53SMatan Azrad */ 162003ce2c53SMatan Azrad void 162103ce2c53SMatan Azrad update_fwd_ports(portid_t new_pid) 162203ce2c53SMatan Azrad { 162303ce2c53SMatan Azrad unsigned int i; 162403ce2c53SMatan Azrad unsigned int new_nb_fwd_ports = 0; 162503ce2c53SMatan Azrad int move = 0; 162603ce2c53SMatan Azrad 162703ce2c53SMatan Azrad for (i = 0; i < nb_fwd_ports; ++i) { 162803ce2c53SMatan Azrad if (port_id_is_invalid(fwd_ports_ids[i], DISABLED_WARN)) 162903ce2c53SMatan Azrad move = 1; 163003ce2c53SMatan Azrad else if (move) 163103ce2c53SMatan Azrad fwd_ports_ids[new_nb_fwd_ports++] = fwd_ports_ids[i]; 163203ce2c53SMatan Azrad else 163303ce2c53SMatan Azrad new_nb_fwd_ports++; 163403ce2c53SMatan Azrad } 163503ce2c53SMatan Azrad if (new_pid < RTE_MAX_ETHPORTS) 163603ce2c53SMatan Azrad fwd_ports_ids[new_nb_fwd_ports++] = new_pid; 163703ce2c53SMatan Azrad 163803ce2c53SMatan Azrad nb_fwd_ports = new_nb_fwd_ports; 163903ce2c53SMatan Azrad nb_cfg_ports = new_nb_fwd_ports; 164003ce2c53SMatan Azrad } 164103ce2c53SMatan Azrad 164203ce2c53SMatan Azrad /* 1643af75078fSIntel * Launch packet forwarding configuration. 1644af75078fSIntel */ 1645af75078fSIntel void 1646af75078fSIntel start_packet_forwarding(int with_tx_first) 1647af75078fSIntel { 1648af75078fSIntel port_fwd_begin_t port_fwd_begin; 1649af75078fSIntel port_fwd_end_t port_fwd_end; 1650af75078fSIntel struct rte_port *port; 1651af75078fSIntel unsigned int i; 1652af75078fSIntel portid_t pt_id; 1653af75078fSIntel streamid_t sm_id; 1654af75078fSIntel 16555a8fb55cSReshma Pattan if (strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") == 0 && !nb_rxq) 16565a8fb55cSReshma Pattan rte_exit(EXIT_FAILURE, "rxq are 0, cannot use rxonly fwd mode\n"); 16575a8fb55cSReshma Pattan 16585a8fb55cSReshma Pattan if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 && !nb_txq) 16595a8fb55cSReshma Pattan rte_exit(EXIT_FAILURE, "txq are 0, cannot use txonly fwd mode\n"); 16605a8fb55cSReshma Pattan 16615a8fb55cSReshma Pattan if ((strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") != 0 && 16625a8fb55cSReshma Pattan strcmp(cur_fwd_eng->fwd_mode_name, "txonly") != 0) && 16635a8fb55cSReshma Pattan (!nb_rxq || !nb_txq)) 16645a8fb55cSReshma Pattan rte_exit(EXIT_FAILURE, 16655a8fb55cSReshma Pattan "Either rxq or txq are 0, cannot use %s fwd mode\n", 16665a8fb55cSReshma Pattan cur_fwd_eng->fwd_mode_name); 16675a8fb55cSReshma Pattan 1668ce8d5614SIntel if (all_ports_started() == 0) { 1669ce8d5614SIntel printf("Not all ports were started\n"); 1670ce8d5614SIntel return; 1671ce8d5614SIntel } 1672af75078fSIntel if (test_done == 0) { 1673af75078fSIntel printf("Packet forwarding already started\n"); 1674af75078fSIntel return; 1675af75078fSIntel } 1676edf87b4aSBernard Iremonger 1677edf87b4aSBernard Iremonger 16787741e4cfSIntel if(dcb_test) { 16797741e4cfSIntel for (i = 0; i < nb_fwd_ports; i++) { 16807741e4cfSIntel pt_id = fwd_ports_ids[i]; 16817741e4cfSIntel port = &ports[pt_id]; 16827741e4cfSIntel if (!port->dcb_flag) { 16837741e4cfSIntel printf("In DCB mode, all forwarding ports must " 16847741e4cfSIntel "be configured in this mode.\n"); 1685013af9b6SIntel return; 1686013af9b6SIntel } 16877741e4cfSIntel } 16887741e4cfSIntel if (nb_fwd_lcores == 1) { 16897741e4cfSIntel printf("In DCB mode,the nb forwarding cores " 16907741e4cfSIntel "should be larger than 1.\n"); 16917741e4cfSIntel return; 16927741e4cfSIntel } 16937741e4cfSIntel } 1694af75078fSIntel test_done = 0; 16957741e4cfSIntel 169647a767b2SMatan Azrad fwd_config_setup(); 169747a767b2SMatan Azrad 16987741e4cfSIntel if(!no_flush_rx) 16997741e4cfSIntel flush_fwd_rx_queues(); 17007741e4cfSIntel 1701933617d8SZhihong Wang pkt_fwd_config_display(&cur_fwd_config); 1702af75078fSIntel rxtx_config_display(); 1703af75078fSIntel 1704af75078fSIntel for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) { 1705af75078fSIntel pt_id = fwd_ports_ids[i]; 1706af75078fSIntel port = &ports[pt_id]; 1707af75078fSIntel rte_eth_stats_get(pt_id, &port->stats); 1708af75078fSIntel port->tx_dropped = 0; 1709013af9b6SIntel 1710013af9b6SIntel map_port_queue_stats_mapping_registers(pt_id, port); 1711af75078fSIntel } 1712af75078fSIntel for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) { 1713af75078fSIntel fwd_streams[sm_id]->rx_packets = 0; 1714af75078fSIntel fwd_streams[sm_id]->tx_packets = 0; 1715af75078fSIntel fwd_streams[sm_id]->fwd_dropped = 0; 1716af75078fSIntel fwd_streams[sm_id]->rx_bad_ip_csum = 0; 1717af75078fSIntel fwd_streams[sm_id]->rx_bad_l4_csum = 0; 171858d475b7SJerin Jacob fwd_streams[sm_id]->rx_bad_outer_l4_csum = 0; 1719af75078fSIntel 1720af75078fSIntel #ifdef RTE_TEST_PMD_RECORD_BURST_STATS 1721af75078fSIntel memset(&fwd_streams[sm_id]->rx_burst_stats, 0, 1722af75078fSIntel sizeof(fwd_streams[sm_id]->rx_burst_stats)); 1723af75078fSIntel memset(&fwd_streams[sm_id]->tx_burst_stats, 0, 1724af75078fSIntel sizeof(fwd_streams[sm_id]->tx_burst_stats)); 1725af75078fSIntel #endif 1726af75078fSIntel #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES 1727af75078fSIntel fwd_streams[sm_id]->core_cycles = 0; 1728af75078fSIntel #endif 1729af75078fSIntel } 1730af75078fSIntel if (with_tx_first) { 1731af75078fSIntel port_fwd_begin = tx_only_engine.port_fwd_begin; 1732af75078fSIntel if (port_fwd_begin != NULL) { 1733af75078fSIntel for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) 1734af75078fSIntel (*port_fwd_begin)(fwd_ports_ids[i]); 1735af75078fSIntel } 1736acbf77a6SZhihong Wang while (with_tx_first--) { 1737acbf77a6SZhihong Wang launch_packet_forwarding( 1738acbf77a6SZhihong Wang run_one_txonly_burst_on_core); 1739af75078fSIntel rte_eal_mp_wait_lcore(); 1740acbf77a6SZhihong Wang } 1741af75078fSIntel port_fwd_end = tx_only_engine.port_fwd_end; 1742af75078fSIntel if (port_fwd_end != NULL) { 1743af75078fSIntel for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) 1744af75078fSIntel (*port_fwd_end)(fwd_ports_ids[i]); 1745af75078fSIntel } 1746af75078fSIntel } 1747af75078fSIntel launch_packet_forwarding(start_pkt_forward_on_core); 1748af75078fSIntel } 1749af75078fSIntel 1750af75078fSIntel void 1751af75078fSIntel stop_packet_forwarding(void) 1752af75078fSIntel { 1753af75078fSIntel struct rte_eth_stats stats; 1754af75078fSIntel struct rte_port *port; 1755af75078fSIntel port_fwd_end_t port_fwd_end; 1756af75078fSIntel int i; 1757af75078fSIntel portid_t pt_id; 1758af75078fSIntel streamid_t sm_id; 1759af75078fSIntel lcoreid_t lc_id; 1760af75078fSIntel uint64_t total_recv; 1761af75078fSIntel uint64_t total_xmit; 1762af75078fSIntel uint64_t total_rx_dropped; 1763af75078fSIntel uint64_t total_tx_dropped; 1764af75078fSIntel uint64_t total_rx_nombuf; 1765af75078fSIntel uint64_t tx_dropped; 1766af75078fSIntel uint64_t rx_bad_ip_csum; 1767af75078fSIntel uint64_t rx_bad_l4_csum; 1768af75078fSIntel #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES 1769af75078fSIntel uint64_t fwd_cycles; 1770af75078fSIntel #endif 1771b7091f1dSJiayu Hu 1772af75078fSIntel static const char *acc_stats_border = "+++++++++++++++"; 1773af75078fSIntel 1774af75078fSIntel if (test_done) { 1775af75078fSIntel printf("Packet forwarding not started\n"); 1776af75078fSIntel return; 1777af75078fSIntel } 1778af75078fSIntel printf("Telling cores to stop..."); 1779af75078fSIntel for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) 1780af75078fSIntel fwd_lcores[lc_id]->stopped = 1; 1781af75078fSIntel printf("\nWaiting for lcores to finish...\n"); 1782af75078fSIntel rte_eal_mp_wait_lcore(); 1783af75078fSIntel port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end; 1784af75078fSIntel if (port_fwd_end != NULL) { 1785af75078fSIntel for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) { 1786af75078fSIntel pt_id = fwd_ports_ids[i]; 1787af75078fSIntel (*port_fwd_end)(pt_id); 1788af75078fSIntel } 1789af75078fSIntel } 1790af75078fSIntel #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES 1791af75078fSIntel fwd_cycles = 0; 1792af75078fSIntel #endif 1793af75078fSIntel for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) { 1794af75078fSIntel if (cur_fwd_config.nb_fwd_streams > 1795af75078fSIntel cur_fwd_config.nb_fwd_ports) { 1796af75078fSIntel fwd_stream_stats_display(sm_id); 1797af75078fSIntel ports[fwd_streams[sm_id]->tx_port].tx_stream = NULL; 1798af75078fSIntel ports[fwd_streams[sm_id]->rx_port].rx_stream = NULL; 1799af75078fSIntel } else { 1800af75078fSIntel ports[fwd_streams[sm_id]->tx_port].tx_stream = 1801af75078fSIntel fwd_streams[sm_id]; 1802af75078fSIntel ports[fwd_streams[sm_id]->rx_port].rx_stream = 1803af75078fSIntel fwd_streams[sm_id]; 1804af75078fSIntel } 1805af75078fSIntel tx_dropped = ports[fwd_streams[sm_id]->tx_port].tx_dropped; 1806af75078fSIntel tx_dropped = (uint64_t) (tx_dropped + 1807af75078fSIntel fwd_streams[sm_id]->fwd_dropped); 1808af75078fSIntel ports[fwd_streams[sm_id]->tx_port].tx_dropped = tx_dropped; 1809af75078fSIntel 1810013af9b6SIntel rx_bad_ip_csum = 1811013af9b6SIntel ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum; 1812af75078fSIntel rx_bad_ip_csum = (uint64_t) (rx_bad_ip_csum + 1813af75078fSIntel fwd_streams[sm_id]->rx_bad_ip_csum); 1814013af9b6SIntel ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum = 1815013af9b6SIntel rx_bad_ip_csum; 1816af75078fSIntel 1817013af9b6SIntel rx_bad_l4_csum = 1818013af9b6SIntel ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum; 1819af75078fSIntel rx_bad_l4_csum = (uint64_t) (rx_bad_l4_csum + 1820af75078fSIntel fwd_streams[sm_id]->rx_bad_l4_csum); 1821013af9b6SIntel ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum = 1822013af9b6SIntel rx_bad_l4_csum; 1823af75078fSIntel 182458d475b7SJerin Jacob ports[fwd_streams[sm_id]->rx_port].rx_bad_outer_l4_csum += 182558d475b7SJerin Jacob fwd_streams[sm_id]->rx_bad_outer_l4_csum; 182658d475b7SJerin Jacob 1827af75078fSIntel #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES 1828af75078fSIntel fwd_cycles = (uint64_t) (fwd_cycles + 1829af75078fSIntel fwd_streams[sm_id]->core_cycles); 1830af75078fSIntel #endif 1831af75078fSIntel } 1832af75078fSIntel total_recv = 0; 1833af75078fSIntel total_xmit = 0; 1834af75078fSIntel total_rx_dropped = 0; 1835af75078fSIntel total_tx_dropped = 0; 1836af75078fSIntel total_rx_nombuf = 0; 18377741e4cfSIntel for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) { 1838af75078fSIntel pt_id = fwd_ports_ids[i]; 1839af75078fSIntel 1840af75078fSIntel port = &ports[pt_id]; 1841af75078fSIntel rte_eth_stats_get(pt_id, &stats); 1842af75078fSIntel stats.ipackets -= port->stats.ipackets; 1843af75078fSIntel port->stats.ipackets = 0; 1844af75078fSIntel stats.opackets -= port->stats.opackets; 1845af75078fSIntel port->stats.opackets = 0; 1846af75078fSIntel stats.ibytes -= port->stats.ibytes; 1847af75078fSIntel port->stats.ibytes = 0; 1848af75078fSIntel stats.obytes -= port->stats.obytes; 1849af75078fSIntel port->stats.obytes = 0; 185070bdb186SIvan Boule stats.imissed -= port->stats.imissed; 185170bdb186SIvan Boule port->stats.imissed = 0; 1852af75078fSIntel stats.oerrors -= port->stats.oerrors; 1853af75078fSIntel port->stats.oerrors = 0; 1854af75078fSIntel stats.rx_nombuf -= port->stats.rx_nombuf; 1855af75078fSIntel port->stats.rx_nombuf = 0; 1856af75078fSIntel 1857af75078fSIntel total_recv += stats.ipackets; 1858af75078fSIntel total_xmit += stats.opackets; 185970bdb186SIvan Boule total_rx_dropped += stats.imissed; 1860af75078fSIntel total_tx_dropped += port->tx_dropped; 1861af75078fSIntel total_rx_nombuf += stats.rx_nombuf; 1862af75078fSIntel 1863af75078fSIntel fwd_port_stats_display(pt_id, &stats); 1864af75078fSIntel } 1865b7091f1dSJiayu Hu 1866af75078fSIntel printf("\n %s Accumulated forward statistics for all ports" 1867af75078fSIntel "%s\n", 1868af75078fSIntel acc_stats_border, acc_stats_border); 1869af75078fSIntel printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: " 1870af75078fSIntel "%-"PRIu64"\n" 1871af75078fSIntel " TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: " 1872af75078fSIntel "%-"PRIu64"\n", 1873af75078fSIntel total_recv, total_rx_dropped, total_recv + total_rx_dropped, 1874af75078fSIntel total_xmit, total_tx_dropped, total_xmit + total_tx_dropped); 1875af75078fSIntel if (total_rx_nombuf > 0) 1876af75078fSIntel printf(" RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf); 1877af75078fSIntel printf(" %s++++++++++++++++++++++++++++++++++++++++++++++" 1878af75078fSIntel "%s\n", 1879af75078fSIntel acc_stats_border, acc_stats_border); 1880af75078fSIntel #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES 1881af75078fSIntel if (total_recv > 0) 1882af75078fSIntel printf("\n CPU cycles/packet=%u (total cycles=" 1883af75078fSIntel "%"PRIu64" / total RX packets=%"PRIu64")\n", 1884af75078fSIntel (unsigned int)(fwd_cycles / total_recv), 1885af75078fSIntel fwd_cycles, total_recv); 1886af75078fSIntel #endif 1887af75078fSIntel printf("\nDone.\n"); 1888af75078fSIntel test_done = 1; 1889af75078fSIntel } 1890af75078fSIntel 1891cfae07fdSOuyang Changchun void 1892cfae07fdSOuyang Changchun dev_set_link_up(portid_t pid) 1893cfae07fdSOuyang Changchun { 1894492ab604SZhiyong Yang if (rte_eth_dev_set_link_up(pid) < 0) 1895cfae07fdSOuyang Changchun printf("\nSet link up fail.\n"); 1896cfae07fdSOuyang Changchun } 1897cfae07fdSOuyang Changchun 1898cfae07fdSOuyang Changchun void 1899cfae07fdSOuyang Changchun dev_set_link_down(portid_t pid) 1900cfae07fdSOuyang Changchun { 1901492ab604SZhiyong Yang if (rte_eth_dev_set_link_down(pid) < 0) 1902cfae07fdSOuyang Changchun printf("\nSet link down fail.\n"); 1903cfae07fdSOuyang Changchun } 1904cfae07fdSOuyang Changchun 1905ce8d5614SIntel static int 1906ce8d5614SIntel all_ports_started(void) 1907ce8d5614SIntel { 1908ce8d5614SIntel portid_t pi; 1909ce8d5614SIntel struct rte_port *port; 1910ce8d5614SIntel 19117d89b261SGaetan Rivet RTE_ETH_FOREACH_DEV(pi) { 1912ce8d5614SIntel port = &ports[pi]; 1913ce8d5614SIntel /* Check if there is a port which is not started */ 191441b05095SBernard Iremonger if ((port->port_status != RTE_PORT_STARTED) && 191541b05095SBernard Iremonger (port->slave_flag == 0)) 1916ce8d5614SIntel return 0; 1917ce8d5614SIntel } 1918ce8d5614SIntel 1919ce8d5614SIntel /* No port is not started */ 1920ce8d5614SIntel return 1; 1921ce8d5614SIntel } 1922ce8d5614SIntel 1923148f963fSBruce Richardson int 19246018eb8cSShahaf Shuler port_is_stopped(portid_t port_id) 19256018eb8cSShahaf Shuler { 19266018eb8cSShahaf Shuler struct rte_port *port = &ports[port_id]; 19276018eb8cSShahaf Shuler 19286018eb8cSShahaf Shuler if ((port->port_status != RTE_PORT_STOPPED) && 19296018eb8cSShahaf Shuler (port->slave_flag == 0)) 19306018eb8cSShahaf Shuler return 0; 19316018eb8cSShahaf Shuler return 1; 19326018eb8cSShahaf Shuler } 19336018eb8cSShahaf Shuler 19346018eb8cSShahaf Shuler int 1935edab33b1STetsuya Mukawa all_ports_stopped(void) 1936edab33b1STetsuya Mukawa { 1937edab33b1STetsuya Mukawa portid_t pi; 1938edab33b1STetsuya Mukawa 19397d89b261SGaetan Rivet RTE_ETH_FOREACH_DEV(pi) { 19406018eb8cSShahaf Shuler if (!port_is_stopped(pi)) 1941edab33b1STetsuya Mukawa return 0; 1942edab33b1STetsuya Mukawa } 1943edab33b1STetsuya Mukawa 1944edab33b1STetsuya Mukawa return 1; 1945edab33b1STetsuya Mukawa } 1946edab33b1STetsuya Mukawa 1947edab33b1STetsuya Mukawa int 1948edab33b1STetsuya Mukawa port_is_started(portid_t port_id) 1949edab33b1STetsuya Mukawa { 1950edab33b1STetsuya Mukawa if (port_id_is_invalid(port_id, ENABLED_WARN)) 1951edab33b1STetsuya Mukawa return 0; 1952edab33b1STetsuya Mukawa 1953edab33b1STetsuya Mukawa if (ports[port_id].port_status != RTE_PORT_STARTED) 1954edab33b1STetsuya Mukawa return 0; 1955edab33b1STetsuya Mukawa 1956edab33b1STetsuya Mukawa return 1; 1957edab33b1STetsuya Mukawa } 1958edab33b1STetsuya Mukawa 1959edab33b1STetsuya Mukawa int 1960ce8d5614SIntel start_port(portid_t pid) 1961ce8d5614SIntel { 196292d2703eSMichael Qiu int diag, need_check_link_status = -1; 1963ce8d5614SIntel portid_t pi; 1964ce8d5614SIntel queueid_t qi; 1965ce8d5614SIntel struct rte_port *port; 19662950a769SDeclan Doherty struct ether_addr mac_addr; 196776ad4a2dSGaetan Rivet enum rte_eth_event_type event_type; 1968ce8d5614SIntel 19694468635fSMichael Qiu if (port_id_is_invalid(pid, ENABLED_WARN)) 19704468635fSMichael Qiu return 0; 19714468635fSMichael Qiu 1972ce8d5614SIntel if(dcb_config) 1973ce8d5614SIntel dcb_test = 1; 19747d89b261SGaetan Rivet RTE_ETH_FOREACH_DEV(pi) { 1975edab33b1STetsuya Mukawa if (pid != pi && pid != (portid_t)RTE_PORT_ALL) 1976ce8d5614SIntel continue; 1977ce8d5614SIntel 197892d2703eSMichael Qiu need_check_link_status = 0; 1979ce8d5614SIntel port = &ports[pi]; 1980ce8d5614SIntel if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED, 1981ce8d5614SIntel RTE_PORT_HANDLING) == 0) { 1982ce8d5614SIntel printf("Port %d is now not stopped\n", pi); 1983ce8d5614SIntel continue; 1984ce8d5614SIntel } 1985ce8d5614SIntel 1986ce8d5614SIntel if (port->need_reconfig > 0) { 1987ce8d5614SIntel port->need_reconfig = 0; 1988ce8d5614SIntel 19897ee3e944SVasily Philipov if (flow_isolate_all) { 19907ee3e944SVasily Philipov int ret = port_flow_isolate(pi, 1); 19917ee3e944SVasily Philipov if (ret) { 19927ee3e944SVasily Philipov printf("Failed to apply isolated" 19937ee3e944SVasily Philipov " mode on port %d\n", pi); 19947ee3e944SVasily Philipov return -1; 19957ee3e944SVasily Philipov } 19967ee3e944SVasily Philipov } 1997b5b38ed8SRaslan Darawsheh configure_rxtx_dump_callbacks(0); 19985706de65SJulien Cretin printf("Configuring Port %d (socket %u)\n", pi, 199920a0286fSLiu Xiaofeng port->socket_id); 2000ce8d5614SIntel /* configure port */ 2001ce8d5614SIntel diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq, 2002ce8d5614SIntel &(port->dev_conf)); 2003ce8d5614SIntel if (diag != 0) { 2004ce8d5614SIntel if (rte_atomic16_cmpset(&(port->port_status), 2005ce8d5614SIntel RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0) 2006ce8d5614SIntel printf("Port %d can not be set back " 2007ce8d5614SIntel "to stopped\n", pi); 2008ce8d5614SIntel printf("Fail to configure port %d\n", pi); 2009ce8d5614SIntel /* try to reconfigure port next time */ 2010ce8d5614SIntel port->need_reconfig = 1; 2011148f963fSBruce Richardson return -1; 2012ce8d5614SIntel } 2013ce8d5614SIntel } 2014ce8d5614SIntel if (port->need_reconfig_queues > 0) { 2015ce8d5614SIntel port->need_reconfig_queues = 0; 2016ce8d5614SIntel /* setup tx queues */ 2017ce8d5614SIntel for (qi = 0; qi < nb_txq; qi++) { 2018b6ea6408SIntel if ((numa_support) && 2019b6ea6408SIntel (txring_numa[pi] != NUMA_NO_CONFIG)) 2020b6ea6408SIntel diag = rte_eth_tx_queue_setup(pi, qi, 2021d44f8a48SQi Zhang port->nb_tx_desc[qi], 2022d44f8a48SQi Zhang txring_numa[pi], 2023d44f8a48SQi Zhang &(port->tx_conf[qi])); 2024b6ea6408SIntel else 2025b6ea6408SIntel diag = rte_eth_tx_queue_setup(pi, qi, 2026d44f8a48SQi Zhang port->nb_tx_desc[qi], 2027d44f8a48SQi Zhang port->socket_id, 2028d44f8a48SQi Zhang &(port->tx_conf[qi])); 2029b6ea6408SIntel 2030ce8d5614SIntel if (diag == 0) 2031ce8d5614SIntel continue; 2032ce8d5614SIntel 2033ce8d5614SIntel /* Fail to setup tx queue, return */ 2034ce8d5614SIntel if (rte_atomic16_cmpset(&(port->port_status), 2035ce8d5614SIntel RTE_PORT_HANDLING, 2036ce8d5614SIntel RTE_PORT_STOPPED) == 0) 2037ce8d5614SIntel printf("Port %d can not be set back " 2038ce8d5614SIntel "to stopped\n", pi); 2039d44f8a48SQi Zhang printf("Fail to configure port %d tx queues\n", 2040d44f8a48SQi Zhang pi); 2041ce8d5614SIntel /* try to reconfigure queues next time */ 2042ce8d5614SIntel port->need_reconfig_queues = 1; 2043148f963fSBruce Richardson return -1; 2044ce8d5614SIntel } 2045ce8d5614SIntel for (qi = 0; qi < nb_rxq; qi++) { 2046d44f8a48SQi Zhang /* setup rx queues */ 2047b6ea6408SIntel if ((numa_support) && 2048b6ea6408SIntel (rxring_numa[pi] != NUMA_NO_CONFIG)) { 2049b6ea6408SIntel struct rte_mempool * mp = 2050b6ea6408SIntel mbuf_pool_find(rxring_numa[pi]); 2051b6ea6408SIntel if (mp == NULL) { 2052b6ea6408SIntel printf("Failed to setup RX queue:" 2053b6ea6408SIntel "No mempool allocation" 2054b6ea6408SIntel " on the socket %d\n", 2055b6ea6408SIntel rxring_numa[pi]); 2056148f963fSBruce Richardson return -1; 2057b6ea6408SIntel } 2058b6ea6408SIntel 2059b6ea6408SIntel diag = rte_eth_rx_queue_setup(pi, qi, 2060d4930794SFerruh Yigit port->nb_rx_desc[qi], 2061d44f8a48SQi Zhang rxring_numa[pi], 2062d44f8a48SQi Zhang &(port->rx_conf[qi]), 2063d44f8a48SQi Zhang mp); 20641e1d6bddSBernard Iremonger } else { 20651e1d6bddSBernard Iremonger struct rte_mempool *mp = 20661e1d6bddSBernard Iremonger mbuf_pool_find(port->socket_id); 20671e1d6bddSBernard Iremonger if (mp == NULL) { 20681e1d6bddSBernard Iremonger printf("Failed to setup RX queue:" 20691e1d6bddSBernard Iremonger "No mempool allocation" 20701e1d6bddSBernard Iremonger " on the socket %d\n", 20711e1d6bddSBernard Iremonger port->socket_id); 20721e1d6bddSBernard Iremonger return -1; 2073b6ea6408SIntel } 2074b6ea6408SIntel diag = rte_eth_rx_queue_setup(pi, qi, 2075d4930794SFerruh Yigit port->nb_rx_desc[qi], 2076d44f8a48SQi Zhang port->socket_id, 2077d44f8a48SQi Zhang &(port->rx_conf[qi]), 2078d44f8a48SQi Zhang mp); 20791e1d6bddSBernard Iremonger } 2080ce8d5614SIntel if (diag == 0) 2081ce8d5614SIntel continue; 2082ce8d5614SIntel 2083ce8d5614SIntel /* Fail to setup rx queue, return */ 2084ce8d5614SIntel if (rte_atomic16_cmpset(&(port->port_status), 2085ce8d5614SIntel RTE_PORT_HANDLING, 2086ce8d5614SIntel RTE_PORT_STOPPED) == 0) 2087ce8d5614SIntel printf("Port %d can not be set back " 2088ce8d5614SIntel "to stopped\n", pi); 2089d44f8a48SQi Zhang printf("Fail to configure port %d rx queues\n", 2090d44f8a48SQi Zhang pi); 2091ce8d5614SIntel /* try to reconfigure queues next time */ 2092ce8d5614SIntel port->need_reconfig_queues = 1; 2093148f963fSBruce Richardson return -1; 2094ce8d5614SIntel } 2095ce8d5614SIntel } 2096b5b38ed8SRaslan Darawsheh configure_rxtx_dump_callbacks(verbose_level); 2097ce8d5614SIntel /* start port */ 2098ce8d5614SIntel if (rte_eth_dev_start(pi) < 0) { 2099ce8d5614SIntel printf("Fail to start port %d\n", pi); 2100ce8d5614SIntel 2101ce8d5614SIntel /* Fail to setup rx queue, return */ 2102ce8d5614SIntel if (rte_atomic16_cmpset(&(port->port_status), 2103ce8d5614SIntel RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0) 2104ce8d5614SIntel printf("Port %d can not be set back to " 2105ce8d5614SIntel "stopped\n", pi); 2106ce8d5614SIntel continue; 2107ce8d5614SIntel } 2108ce8d5614SIntel 2109ce8d5614SIntel if (rte_atomic16_cmpset(&(port->port_status), 2110ce8d5614SIntel RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0) 2111ce8d5614SIntel printf("Port %d can not be set into started\n", pi); 2112ce8d5614SIntel 21132950a769SDeclan Doherty rte_eth_macaddr_get(pi, &mac_addr); 2114d8c89163SZijie Pan printf("Port %d: %02X:%02X:%02X:%02X:%02X:%02X\n", pi, 21152950a769SDeclan Doherty mac_addr.addr_bytes[0], mac_addr.addr_bytes[1], 21162950a769SDeclan Doherty mac_addr.addr_bytes[2], mac_addr.addr_bytes[3], 21172950a769SDeclan Doherty mac_addr.addr_bytes[4], mac_addr.addr_bytes[5]); 2118d8c89163SZijie Pan 2119ce8d5614SIntel /* at least one port started, need checking link status */ 2120ce8d5614SIntel need_check_link_status = 1; 2121ce8d5614SIntel } 2122ce8d5614SIntel 21234fb82244SMatan Azrad for (event_type = RTE_ETH_EVENT_UNKNOWN; 21244fb82244SMatan Azrad event_type < RTE_ETH_EVENT_MAX; 21254fb82244SMatan Azrad event_type++) { 21264fb82244SMatan Azrad diag = rte_eth_dev_callback_register(RTE_ETH_ALL, 21274fb82244SMatan Azrad event_type, 21284fb82244SMatan Azrad eth_event_callback, 21294fb82244SMatan Azrad NULL); 21304fb82244SMatan Azrad if (diag) { 21314fb82244SMatan Azrad printf("Failed to setup even callback for event %d\n", 21324fb82244SMatan Azrad event_type); 21334fb82244SMatan Azrad return -1; 21344fb82244SMatan Azrad } 21354fb82244SMatan Azrad } 21364fb82244SMatan Azrad 213792d2703eSMichael Qiu if (need_check_link_status == 1 && !no_link_check) 2138edab33b1STetsuya Mukawa check_all_ports_link_status(RTE_PORT_ALL); 213992d2703eSMichael Qiu else if (need_check_link_status == 0) 2140ce8d5614SIntel printf("Please stop the ports first\n"); 2141ce8d5614SIntel 2142ce8d5614SIntel printf("Done\n"); 2143148f963fSBruce Richardson return 0; 2144ce8d5614SIntel } 2145ce8d5614SIntel 2146ce8d5614SIntel void 2147ce8d5614SIntel stop_port(portid_t pid) 2148ce8d5614SIntel { 2149ce8d5614SIntel portid_t pi; 2150ce8d5614SIntel struct rte_port *port; 2151ce8d5614SIntel int need_check_link_status = 0; 2152ce8d5614SIntel 2153ce8d5614SIntel if (dcb_test) { 2154ce8d5614SIntel dcb_test = 0; 2155ce8d5614SIntel dcb_config = 0; 2156ce8d5614SIntel } 21574468635fSMichael Qiu 21584468635fSMichael Qiu if (port_id_is_invalid(pid, ENABLED_WARN)) 21594468635fSMichael Qiu return; 21604468635fSMichael Qiu 2161ce8d5614SIntel printf("Stopping ports...\n"); 2162ce8d5614SIntel 21637d89b261SGaetan Rivet RTE_ETH_FOREACH_DEV(pi) { 21644468635fSMichael Qiu if (pid != pi && pid != (portid_t)RTE_PORT_ALL) 2165ce8d5614SIntel continue; 2166ce8d5614SIntel 2167a8ef3e3aSBernard Iremonger if (port_is_forwarding(pi) != 0 && test_done == 0) { 2168a8ef3e3aSBernard Iremonger printf("Please remove port %d from forwarding configuration.\n", pi); 2169a8ef3e3aSBernard Iremonger continue; 2170a8ef3e3aSBernard Iremonger } 2171a8ef3e3aSBernard Iremonger 21720e545d30SBernard Iremonger if (port_is_bonding_slave(pi)) { 21730e545d30SBernard Iremonger printf("Please remove port %d from bonded device.\n", pi); 21740e545d30SBernard Iremonger continue; 21750e545d30SBernard Iremonger } 21760e545d30SBernard Iremonger 2177ce8d5614SIntel port = &ports[pi]; 2178ce8d5614SIntel if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED, 2179ce8d5614SIntel RTE_PORT_HANDLING) == 0) 2180ce8d5614SIntel continue; 2181ce8d5614SIntel 2182ce8d5614SIntel rte_eth_dev_stop(pi); 2183ce8d5614SIntel 2184ce8d5614SIntel if (rte_atomic16_cmpset(&(port->port_status), 2185ce8d5614SIntel RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0) 2186ce8d5614SIntel printf("Port %d can not be set into stopped\n", pi); 2187ce8d5614SIntel need_check_link_status = 1; 2188ce8d5614SIntel } 2189bc202406SDavid Marchand if (need_check_link_status && !no_link_check) 2190edab33b1STetsuya Mukawa check_all_ports_link_status(RTE_PORT_ALL); 2191ce8d5614SIntel 2192ce8d5614SIntel printf("Done\n"); 2193ce8d5614SIntel } 2194ce8d5614SIntel 2195ce6959bfSWisam Jaddo static void 2196ce6959bfSWisam Jaddo remove_unused_fwd_ports(void) 2197ce6959bfSWisam Jaddo { 2198ce6959bfSWisam Jaddo int i; 2199ce6959bfSWisam Jaddo int last_port_idx = nb_ports - 1; 2200ce6959bfSWisam Jaddo 2201ce6959bfSWisam Jaddo for (i = 0; i <= last_port_idx; i++) { /* iterate in ports_ids */ 2202ce6959bfSWisam Jaddo if (rte_eth_devices[ports_ids[i]].state != RTE_ETH_DEV_UNUSED) 2203ce6959bfSWisam Jaddo continue; 2204ce6959bfSWisam Jaddo /* skip unused ports at the end */ 2205ce6959bfSWisam Jaddo while (i <= last_port_idx && 2206ce6959bfSWisam Jaddo rte_eth_devices[ports_ids[last_port_idx]].state 2207ce6959bfSWisam Jaddo == RTE_ETH_DEV_UNUSED) 2208ce6959bfSWisam Jaddo last_port_idx--; 2209ce6959bfSWisam Jaddo if (last_port_idx < i) 2210ce6959bfSWisam Jaddo break; 2211ce6959bfSWisam Jaddo /* overwrite unused port with last valid port */ 2212ce6959bfSWisam Jaddo ports_ids[i] = ports_ids[last_port_idx]; 2213ce6959bfSWisam Jaddo /* decrease ports count */ 2214ce6959bfSWisam Jaddo last_port_idx--; 2215ce6959bfSWisam Jaddo } 2216ce6959bfSWisam Jaddo nb_ports = rte_eth_dev_count_avail(); 2217ce6959bfSWisam Jaddo update_fwd_ports(RTE_MAX_ETHPORTS); 2218ce6959bfSWisam Jaddo } 2219ce6959bfSWisam Jaddo 2220ce8d5614SIntel void 2221ce8d5614SIntel close_port(portid_t pid) 2222ce8d5614SIntel { 2223ce8d5614SIntel portid_t pi; 2224ce8d5614SIntel struct rte_port *port; 2225ce8d5614SIntel 22264468635fSMichael Qiu if (port_id_is_invalid(pid, ENABLED_WARN)) 22274468635fSMichael Qiu return; 22284468635fSMichael Qiu 2229ce8d5614SIntel printf("Closing ports...\n"); 2230ce8d5614SIntel 22317d89b261SGaetan Rivet RTE_ETH_FOREACH_DEV(pi) { 22324468635fSMichael Qiu if (pid != pi && pid != (portid_t)RTE_PORT_ALL) 2233ce8d5614SIntel continue; 2234ce8d5614SIntel 2235a8ef3e3aSBernard Iremonger if (port_is_forwarding(pi) != 0 && test_done == 0) { 2236a8ef3e3aSBernard Iremonger printf("Please remove port %d from forwarding configuration.\n", pi); 2237a8ef3e3aSBernard Iremonger continue; 2238a8ef3e3aSBernard Iremonger } 2239a8ef3e3aSBernard Iremonger 22400e545d30SBernard Iremonger if (port_is_bonding_slave(pi)) { 22410e545d30SBernard Iremonger printf("Please remove port %d from bonded device.\n", pi); 22420e545d30SBernard Iremonger continue; 22430e545d30SBernard Iremonger } 22440e545d30SBernard Iremonger 2245ce8d5614SIntel port = &ports[pi]; 2246ce8d5614SIntel if (rte_atomic16_cmpset(&(port->port_status), 2247d4e8ad64SMichael Qiu RTE_PORT_CLOSED, RTE_PORT_CLOSED) == 1) { 2248d4e8ad64SMichael Qiu printf("Port %d is already closed\n", pi); 2249d4e8ad64SMichael Qiu continue; 2250d4e8ad64SMichael Qiu } 2251d4e8ad64SMichael Qiu 2252d4e8ad64SMichael Qiu if (rte_atomic16_cmpset(&(port->port_status), 2253ce8d5614SIntel RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) { 2254ce8d5614SIntel printf("Port %d is now not stopped\n", pi); 2255ce8d5614SIntel continue; 2256ce8d5614SIntel } 2257ce8d5614SIntel 2258938a184aSAdrien Mazarguil if (port->flow_list) 2259938a184aSAdrien Mazarguil port_flow_flush(pi); 2260ce8d5614SIntel rte_eth_dev_close(pi); 2261ce8d5614SIntel 226223ea57a2SThomas Monjalon remove_unused_fwd_ports(); 226323ea57a2SThomas Monjalon 2264ce8d5614SIntel if (rte_atomic16_cmpset(&(port->port_status), 2265ce8d5614SIntel RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0) 2266b38bb262SPablo de Lara printf("Port %d cannot be set to closed\n", pi); 2267ce8d5614SIntel } 2268ce8d5614SIntel 2269ce8d5614SIntel printf("Done\n"); 2270ce8d5614SIntel } 2271ce8d5614SIntel 2272edab33b1STetsuya Mukawa void 227397f1e196SWei Dai reset_port(portid_t pid) 227497f1e196SWei Dai { 227597f1e196SWei Dai int diag; 227697f1e196SWei Dai portid_t pi; 227797f1e196SWei Dai struct rte_port *port; 227897f1e196SWei Dai 227997f1e196SWei Dai if (port_id_is_invalid(pid, ENABLED_WARN)) 228097f1e196SWei Dai return; 228197f1e196SWei Dai 228297f1e196SWei Dai printf("Resetting ports...\n"); 228397f1e196SWei Dai 228497f1e196SWei Dai RTE_ETH_FOREACH_DEV(pi) { 228597f1e196SWei Dai if (pid != pi && pid != (portid_t)RTE_PORT_ALL) 228697f1e196SWei Dai continue; 228797f1e196SWei Dai 228897f1e196SWei Dai if (port_is_forwarding(pi) != 0 && test_done == 0) { 228997f1e196SWei Dai printf("Please remove port %d from forwarding " 229097f1e196SWei Dai "configuration.\n", pi); 229197f1e196SWei Dai continue; 229297f1e196SWei Dai } 229397f1e196SWei Dai 229497f1e196SWei Dai if (port_is_bonding_slave(pi)) { 229597f1e196SWei Dai printf("Please remove port %d from bonded device.\n", 229697f1e196SWei Dai pi); 229797f1e196SWei Dai continue; 229897f1e196SWei Dai } 229997f1e196SWei Dai 230097f1e196SWei Dai diag = rte_eth_dev_reset(pi); 230197f1e196SWei Dai if (diag == 0) { 230297f1e196SWei Dai port = &ports[pi]; 230397f1e196SWei Dai port->need_reconfig = 1; 230497f1e196SWei Dai port->need_reconfig_queues = 1; 230597f1e196SWei Dai } else { 230697f1e196SWei Dai printf("Failed to reset port %d. diag=%d\n", pi, diag); 230797f1e196SWei Dai } 230897f1e196SWei Dai } 230997f1e196SWei Dai 231097f1e196SWei Dai printf("Done\n"); 231197f1e196SWei Dai } 231297f1e196SWei Dai 231397f1e196SWei Dai void 2314edab33b1STetsuya Mukawa attach_port(char *identifier) 2315ce8d5614SIntel { 2316ebf5e9b7SBernard Iremonger portid_t pi = 0; 2317*c9cce428SThomas Monjalon struct rte_dev_iterator iterator; 2318ce8d5614SIntel 2319edab33b1STetsuya Mukawa printf("Attaching a new port...\n"); 2320edab33b1STetsuya Mukawa 2321edab33b1STetsuya Mukawa if (identifier == NULL) { 2322edab33b1STetsuya Mukawa printf("Invalid parameters are specified\n"); 2323edab33b1STetsuya Mukawa return; 2324ce8d5614SIntel } 2325ce8d5614SIntel 2326*c9cce428SThomas Monjalon if (rte_dev_probe(identifier) != 0) { 2327*c9cce428SThomas Monjalon TESTPMD_LOG(ERR, "Failed to attach port %s\n", identifier); 2328edab33b1STetsuya Mukawa return; 2329*c9cce428SThomas Monjalon } 2330*c9cce428SThomas Monjalon 2331*c9cce428SThomas Monjalon RTE_ETH_FOREACH_MATCHING_DEV(pi, identifier, &iterator) 2332*c9cce428SThomas Monjalon setup_attached_port(pi); 2333*c9cce428SThomas Monjalon } 2334*c9cce428SThomas Monjalon 2335*c9cce428SThomas Monjalon static void 2336*c9cce428SThomas Monjalon setup_attached_port(portid_t pi) 2337*c9cce428SThomas Monjalon { 2338*c9cce428SThomas Monjalon unsigned int socket_id; 2339edab33b1STetsuya Mukawa 2340931126baSBernard Iremonger socket_id = (unsigned)rte_eth_dev_socket_id(pi); 234129841336SPhil Yang /* if socket_id is invalid, set to the first available socket. */ 2342931126baSBernard Iremonger if (check_socket_id(socket_id) < 0) 234329841336SPhil Yang socket_id = socket_ids[0]; 2344931126baSBernard Iremonger reconfig(pi, socket_id); 2345edab33b1STetsuya Mukawa rte_eth_promiscuous_enable(pi); 2346edab33b1STetsuya Mukawa 23474918a357SXiaoyun Li ports_ids[nb_ports] = pi; 2348d9a42a69SThomas Monjalon nb_ports = rte_eth_dev_count_avail(); 2349edab33b1STetsuya Mukawa 2350edab33b1STetsuya Mukawa ports[pi].port_status = RTE_PORT_STOPPED; 2351edab33b1STetsuya Mukawa 235203ce2c53SMatan Azrad update_fwd_ports(pi); 235303ce2c53SMatan Azrad 2354edab33b1STetsuya Mukawa printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports); 2355edab33b1STetsuya Mukawa printf("Done\n"); 2356edab33b1STetsuya Mukawa } 2357edab33b1STetsuya Mukawa 2358edab33b1STetsuya Mukawa void 235928caa76aSZhiyong Yang detach_port(portid_t port_id) 23605f4ec54fSChen Jing D(Mark) { 2361*c9cce428SThomas Monjalon printf("Removing a device...\n"); 23625f4ec54fSChen Jing D(Mark) 236323ea57a2SThomas Monjalon if (ports[port_id].port_status != RTE_PORT_CLOSED) { 23643f4a8370SThomas Monjalon if (ports[port_id].port_status != RTE_PORT_STOPPED) { 23653f4a8370SThomas Monjalon printf("Port not stopped\n"); 2366edab33b1STetsuya Mukawa return; 2367edab33b1STetsuya Mukawa } 23683f4a8370SThomas Monjalon printf("Port was not closed\n"); 2369938a184aSAdrien Mazarguil if (ports[port_id].flow_list) 2370938a184aSAdrien Mazarguil port_flow_flush(port_id); 23713f4a8370SThomas Monjalon } 2372938a184aSAdrien Mazarguil 2373*c9cce428SThomas Monjalon if (rte_dev_remove(rte_eth_devices[port_id].device) != 0) { 2374adea04c4SZhiyong Yang TESTPMD_LOG(ERR, "Failed to detach port %u\n", port_id); 2375edab33b1STetsuya Mukawa return; 23763070419eSGaetan Rivet } 2377edab33b1STetsuya Mukawa 2378ce6959bfSWisam Jaddo remove_unused_fwd_ports(); 237903ce2c53SMatan Azrad 2380adea04c4SZhiyong Yang printf("Port %u is detached. Now total ports is %d\n", 2381adea04c4SZhiyong Yang port_id, nb_ports); 2382edab33b1STetsuya Mukawa printf("Done\n"); 2383edab33b1STetsuya Mukawa return; 23845f4ec54fSChen Jing D(Mark) } 23855f4ec54fSChen Jing D(Mark) 2386af75078fSIntel void 2387af75078fSIntel pmd_test_exit(void) 2388af75078fSIntel { 2389124909d7SZhiyong Yang struct rte_device *device; 2390af75078fSIntel portid_t pt_id; 2391fb73e096SJeff Guo int ret; 2392af75078fSIntel 23938210ec25SPablo de Lara if (test_done == 0) 23948210ec25SPablo de Lara stop_packet_forwarding(); 23958210ec25SPablo de Lara 2396d3a274ceSZhihong Wang if (ports != NULL) { 2397d3a274ceSZhihong Wang no_link_check = 1; 23987d89b261SGaetan Rivet RTE_ETH_FOREACH_DEV(pt_id) { 2399d3a274ceSZhihong Wang printf("\nShutting down port %d...\n", pt_id); 2400af75078fSIntel fflush(stdout); 2401d3a274ceSZhihong Wang stop_port(pt_id); 2402d3a274ceSZhihong Wang close_port(pt_id); 2403124909d7SZhiyong Yang 2404124909d7SZhiyong Yang /* 2405124909d7SZhiyong Yang * This is a workaround to fix a virtio-user issue that 2406124909d7SZhiyong Yang * requires to call clean-up routine to remove existing 2407124909d7SZhiyong Yang * socket. 2408124909d7SZhiyong Yang * This workaround valid only for testpmd, needs a fix 2409124909d7SZhiyong Yang * valid for all applications. 2410124909d7SZhiyong Yang * TODO: Implement proper resource cleanup 2411124909d7SZhiyong Yang */ 2412124909d7SZhiyong Yang device = rte_eth_devices[pt_id].device; 2413124909d7SZhiyong Yang if (device && !strcmp(device->driver->name, "net_virtio_user")) 2414124909d7SZhiyong Yang detach_port(pt_id); 2415af75078fSIntel } 2416d3a274ceSZhihong Wang } 2417fb73e096SJeff Guo 2418fb73e096SJeff Guo if (hot_plug) { 2419fb73e096SJeff Guo ret = rte_dev_event_monitor_stop(); 24202049c511SJeff Guo if (ret) { 2421fb73e096SJeff Guo RTE_LOG(ERR, EAL, 2422fb73e096SJeff Guo "fail to stop device event monitor."); 24232049c511SJeff Guo return; 24242049c511SJeff Guo } 2425fb73e096SJeff Guo 24262049c511SJeff Guo ret = rte_dev_event_callback_unregister(NULL, 24272049c511SJeff Guo eth_dev_event_callback, NULL); 24282049c511SJeff Guo if (ret < 0) { 2429fb73e096SJeff Guo RTE_LOG(ERR, EAL, 24302049c511SJeff Guo "fail to unregister device event callback.\n"); 24312049c511SJeff Guo return; 24322049c511SJeff Guo } 24332049c511SJeff Guo 24342049c511SJeff Guo ret = rte_dev_hotplug_handle_disable(); 24352049c511SJeff Guo if (ret) { 24362049c511SJeff Guo RTE_LOG(ERR, EAL, 24372049c511SJeff Guo "fail to disable hotplug handling.\n"); 24382049c511SJeff Guo return; 24392049c511SJeff Guo } 2440fb73e096SJeff Guo } 2441fb73e096SJeff Guo 2442d3a274ceSZhihong Wang printf("\nBye...\n"); 2443af75078fSIntel } 2444af75078fSIntel 2445af75078fSIntel typedef void (*cmd_func_t)(void); 2446af75078fSIntel struct pmd_test_command { 2447af75078fSIntel const char *cmd_name; 2448af75078fSIntel cmd_func_t cmd_func; 2449af75078fSIntel }; 2450af75078fSIntel 2451af75078fSIntel #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0])) 2452af75078fSIntel 2453ce8d5614SIntel /* Check the link status of all ports in up to 9s, and print them finally */ 2454af75078fSIntel static void 2455edab33b1STetsuya Mukawa check_all_ports_link_status(uint32_t port_mask) 2456af75078fSIntel { 2457ce8d5614SIntel #define CHECK_INTERVAL 100 /* 100ms */ 2458ce8d5614SIntel #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */ 2459f8244c63SZhiyong Yang portid_t portid; 2460f8244c63SZhiyong Yang uint8_t count, all_ports_up, print_flag = 0; 2461ce8d5614SIntel struct rte_eth_link link; 2462ce8d5614SIntel 2463ce8d5614SIntel printf("Checking link statuses...\n"); 2464ce8d5614SIntel fflush(stdout); 2465ce8d5614SIntel for (count = 0; count <= MAX_CHECK_TIME; count++) { 2466ce8d5614SIntel all_ports_up = 1; 24677d89b261SGaetan Rivet RTE_ETH_FOREACH_DEV(portid) { 2468ce8d5614SIntel if ((port_mask & (1 << portid)) == 0) 2469ce8d5614SIntel continue; 2470ce8d5614SIntel memset(&link, 0, sizeof(link)); 2471ce8d5614SIntel rte_eth_link_get_nowait(portid, &link); 2472ce8d5614SIntel /* print link status if flag set */ 2473ce8d5614SIntel if (print_flag == 1) { 2474ce8d5614SIntel if (link.link_status) 2475f8244c63SZhiyong Yang printf( 2476f8244c63SZhiyong Yang "Port%d Link Up. speed %u Mbps- %s\n", 2477f8244c63SZhiyong Yang portid, link.link_speed, 2478ce8d5614SIntel (link.link_duplex == ETH_LINK_FULL_DUPLEX) ? 2479ce8d5614SIntel ("full-duplex") : ("half-duplex\n")); 2480ce8d5614SIntel else 2481f8244c63SZhiyong Yang printf("Port %d Link Down\n", portid); 2482ce8d5614SIntel continue; 2483ce8d5614SIntel } 2484ce8d5614SIntel /* clear all_ports_up flag if any link down */ 248509419f23SThomas Monjalon if (link.link_status == ETH_LINK_DOWN) { 2486ce8d5614SIntel all_ports_up = 0; 2487ce8d5614SIntel break; 2488ce8d5614SIntel } 2489ce8d5614SIntel } 2490ce8d5614SIntel /* after finally printing all link status, get out */ 2491ce8d5614SIntel if (print_flag == 1) 2492ce8d5614SIntel break; 2493ce8d5614SIntel 2494ce8d5614SIntel if (all_ports_up == 0) { 2495ce8d5614SIntel fflush(stdout); 2496ce8d5614SIntel rte_delay_ms(CHECK_INTERVAL); 2497ce8d5614SIntel } 2498ce8d5614SIntel 2499ce8d5614SIntel /* set the print_flag if all ports up or timeout */ 2500ce8d5614SIntel if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) { 2501ce8d5614SIntel print_flag = 1; 2502ce8d5614SIntel } 25038ea656f8SGaetan Rivet 25048ea656f8SGaetan Rivet if (lsc_interrupt) 25058ea656f8SGaetan Rivet break; 2506ce8d5614SIntel } 2507af75078fSIntel } 2508af75078fSIntel 2509284c908cSGaetan Rivet static void 2510284c908cSGaetan Rivet rmv_event_callback(void *arg) 2511284c908cSGaetan Rivet { 25123b97888aSMatan Azrad int need_to_start = 0; 25130da2a62bSMatan Azrad int org_no_link_check = no_link_check; 251428caa76aSZhiyong Yang portid_t port_id = (intptr_t)arg; 2515284c908cSGaetan Rivet 2516284c908cSGaetan Rivet RTE_ETH_VALID_PORTID_OR_RET(port_id); 2517284c908cSGaetan Rivet 25183b97888aSMatan Azrad if (!test_done && port_is_forwarding(port_id)) { 25193b97888aSMatan Azrad need_to_start = 1; 25203b97888aSMatan Azrad stop_packet_forwarding(); 25213b97888aSMatan Azrad } 25220da2a62bSMatan Azrad no_link_check = 1; 2523284c908cSGaetan Rivet stop_port(port_id); 25240da2a62bSMatan Azrad no_link_check = org_no_link_check; 2525284c908cSGaetan Rivet close_port(port_id); 25263b97888aSMatan Azrad detach_port(port_id); 25273b97888aSMatan Azrad if (need_to_start) 25283b97888aSMatan Azrad start_packet_forwarding(0); 2529284c908cSGaetan Rivet } 2530284c908cSGaetan Rivet 253176ad4a2dSGaetan Rivet /* This function is used by the interrupt thread */ 2532d6af1a13SBernard Iremonger static int 2533f8244c63SZhiyong Yang eth_event_callback(portid_t port_id, enum rte_eth_event_type type, void *param, 2534d6af1a13SBernard Iremonger void *ret_param) 253576ad4a2dSGaetan Rivet { 253676ad4a2dSGaetan Rivet static const char * const event_desc[] = { 253776ad4a2dSGaetan Rivet [RTE_ETH_EVENT_UNKNOWN] = "Unknown", 253876ad4a2dSGaetan Rivet [RTE_ETH_EVENT_INTR_LSC] = "LSC", 253976ad4a2dSGaetan Rivet [RTE_ETH_EVENT_QUEUE_STATE] = "Queue state", 254076ad4a2dSGaetan Rivet [RTE_ETH_EVENT_INTR_RESET] = "Interrupt reset", 254176ad4a2dSGaetan Rivet [RTE_ETH_EVENT_VF_MBOX] = "VF Mbox", 2542badb87c1SAnoob Joseph [RTE_ETH_EVENT_IPSEC] = "IPsec", 254376ad4a2dSGaetan Rivet [RTE_ETH_EVENT_MACSEC] = "MACsec", 254476ad4a2dSGaetan Rivet [RTE_ETH_EVENT_INTR_RMV] = "device removal", 25454fb82244SMatan Azrad [RTE_ETH_EVENT_NEW] = "device probed", 25464fb82244SMatan Azrad [RTE_ETH_EVENT_DESTROY] = "device released", 254776ad4a2dSGaetan Rivet [RTE_ETH_EVENT_MAX] = NULL, 254876ad4a2dSGaetan Rivet }; 254976ad4a2dSGaetan Rivet 255076ad4a2dSGaetan Rivet RTE_SET_USED(param); 2551d6af1a13SBernard Iremonger RTE_SET_USED(ret_param); 255276ad4a2dSGaetan Rivet 255376ad4a2dSGaetan Rivet if (type >= RTE_ETH_EVENT_MAX) { 2554f431e010SHerakliusz Lipiec fprintf(stderr, "\nPort %" PRIu16 ": %s called upon invalid event %d\n", 255576ad4a2dSGaetan Rivet port_id, __func__, type); 255676ad4a2dSGaetan Rivet fflush(stderr); 25573af72783SGaetan Rivet } else if (event_print_mask & (UINT32_C(1) << type)) { 2558f431e010SHerakliusz Lipiec printf("\nPort %" PRIu16 ": %s event\n", port_id, 255976ad4a2dSGaetan Rivet event_desc[type]); 256076ad4a2dSGaetan Rivet fflush(stdout); 256176ad4a2dSGaetan Rivet } 2562284c908cSGaetan Rivet 25630e45c64dSMatan Azrad if (port_id_is_invalid(port_id, DISABLED_WARN)) 25640e45c64dSMatan Azrad return 0; 25650e45c64dSMatan Azrad 2566284c908cSGaetan Rivet switch (type) { 2567284c908cSGaetan Rivet case RTE_ETH_EVENT_INTR_RMV: 2568284c908cSGaetan Rivet if (rte_eal_alarm_set(100000, 2569284c908cSGaetan Rivet rmv_event_callback, (void *)(intptr_t)port_id)) 2570284c908cSGaetan Rivet fprintf(stderr, "Could not set up deferred device removal\n"); 2571284c908cSGaetan Rivet break; 2572284c908cSGaetan Rivet default: 2573284c908cSGaetan Rivet break; 2574284c908cSGaetan Rivet } 2575d6af1a13SBernard Iremonger return 0; 257676ad4a2dSGaetan Rivet } 257776ad4a2dSGaetan Rivet 2578fb73e096SJeff Guo /* This function is used by the interrupt thread */ 2579fb73e096SJeff Guo static void 258089ecd110SJeff Guo eth_dev_event_callback(const char *device_name, enum rte_dev_event_type type, 2581fb73e096SJeff Guo __rte_unused void *arg) 2582fb73e096SJeff Guo { 25832049c511SJeff Guo uint16_t port_id; 25842049c511SJeff Guo int ret; 25852049c511SJeff Guo 2586fb73e096SJeff Guo if (type >= RTE_DEV_EVENT_MAX) { 2587fb73e096SJeff Guo fprintf(stderr, "%s called upon invalid event %d\n", 2588fb73e096SJeff Guo __func__, type); 2589fb73e096SJeff Guo fflush(stderr); 2590fb73e096SJeff Guo } 2591fb73e096SJeff Guo 2592fb73e096SJeff Guo switch (type) { 2593fb73e096SJeff Guo case RTE_DEV_EVENT_REMOVE: 2594fb73e096SJeff Guo RTE_LOG(ERR, EAL, "The device: %s has been removed!\n", 2595fb73e096SJeff Guo device_name); 25962049c511SJeff Guo ret = rte_eth_dev_get_port_by_name(device_name, &port_id); 25972049c511SJeff Guo if (ret) { 25982049c511SJeff Guo RTE_LOG(ERR, EAL, "can not get port by device %s!\n", 25992049c511SJeff Guo device_name); 26002049c511SJeff Guo return; 26012049c511SJeff Guo } 26022049c511SJeff Guo rmv_event_callback((void *)(intptr_t)port_id); 2603fb73e096SJeff Guo break; 2604fb73e096SJeff Guo case RTE_DEV_EVENT_ADD: 2605fb73e096SJeff Guo RTE_LOG(ERR, EAL, "The device: %s has been added!\n", 2606fb73e096SJeff Guo device_name); 2607fb73e096SJeff Guo /* TODO: After finish kernel driver binding, 2608fb73e096SJeff Guo * begin to attach port. 2609fb73e096SJeff Guo */ 2610fb73e096SJeff Guo break; 2611fb73e096SJeff Guo default: 2612fb73e096SJeff Guo break; 2613fb73e096SJeff Guo } 2614fb73e096SJeff Guo } 2615fb73e096SJeff Guo 2616013af9b6SIntel static int 261728caa76aSZhiyong Yang set_tx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port) 2618af75078fSIntel { 2619013af9b6SIntel uint16_t i; 2620af75078fSIntel int diag; 2621013af9b6SIntel uint8_t mapping_found = 0; 2622af75078fSIntel 2623013af9b6SIntel for (i = 0; i < nb_tx_queue_stats_mappings; i++) { 2624013af9b6SIntel if ((tx_queue_stats_mappings[i].port_id == port_id) && 2625013af9b6SIntel (tx_queue_stats_mappings[i].queue_id < nb_txq )) { 2626013af9b6SIntel diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id, 2627013af9b6SIntel tx_queue_stats_mappings[i].queue_id, 2628013af9b6SIntel tx_queue_stats_mappings[i].stats_counter_id); 2629013af9b6SIntel if (diag != 0) 2630013af9b6SIntel return diag; 2631013af9b6SIntel mapping_found = 1; 2632af75078fSIntel } 2633013af9b6SIntel } 2634013af9b6SIntel if (mapping_found) 2635013af9b6SIntel port->tx_queue_stats_mapping_enabled = 1; 2636013af9b6SIntel return 0; 2637013af9b6SIntel } 2638013af9b6SIntel 2639013af9b6SIntel static int 264028caa76aSZhiyong Yang set_rx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port) 2641013af9b6SIntel { 2642013af9b6SIntel uint16_t i; 2643013af9b6SIntel int diag; 2644013af9b6SIntel uint8_t mapping_found = 0; 2645013af9b6SIntel 2646013af9b6SIntel for (i = 0; i < nb_rx_queue_stats_mappings; i++) { 2647013af9b6SIntel if ((rx_queue_stats_mappings[i].port_id == port_id) && 2648013af9b6SIntel (rx_queue_stats_mappings[i].queue_id < nb_rxq )) { 2649013af9b6SIntel diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id, 2650013af9b6SIntel rx_queue_stats_mappings[i].queue_id, 2651013af9b6SIntel rx_queue_stats_mappings[i].stats_counter_id); 2652013af9b6SIntel if (diag != 0) 2653013af9b6SIntel return diag; 2654013af9b6SIntel mapping_found = 1; 2655013af9b6SIntel } 2656013af9b6SIntel } 2657013af9b6SIntel if (mapping_found) 2658013af9b6SIntel port->rx_queue_stats_mapping_enabled = 1; 2659013af9b6SIntel return 0; 2660013af9b6SIntel } 2661013af9b6SIntel 2662013af9b6SIntel static void 266328caa76aSZhiyong Yang map_port_queue_stats_mapping_registers(portid_t pi, struct rte_port *port) 2664013af9b6SIntel { 2665013af9b6SIntel int diag = 0; 2666013af9b6SIntel 2667013af9b6SIntel diag = set_tx_queue_stats_mapping_registers(pi, port); 2668af75078fSIntel if (diag != 0) { 2669013af9b6SIntel if (diag == -ENOTSUP) { 2670013af9b6SIntel port->tx_queue_stats_mapping_enabled = 0; 2671013af9b6SIntel printf("TX queue stats mapping not supported port id=%d\n", pi); 2672013af9b6SIntel } 2673013af9b6SIntel else 2674013af9b6SIntel rte_exit(EXIT_FAILURE, 2675013af9b6SIntel "set_tx_queue_stats_mapping_registers " 2676013af9b6SIntel "failed for port id=%d diag=%d\n", 2677af75078fSIntel pi, diag); 2678af75078fSIntel } 2679013af9b6SIntel 2680013af9b6SIntel diag = set_rx_queue_stats_mapping_registers(pi, port); 2681af75078fSIntel if (diag != 0) { 2682013af9b6SIntel if (diag == -ENOTSUP) { 2683013af9b6SIntel port->rx_queue_stats_mapping_enabled = 0; 2684013af9b6SIntel printf("RX queue stats mapping not supported port id=%d\n", pi); 2685013af9b6SIntel } 2686013af9b6SIntel else 2687013af9b6SIntel rte_exit(EXIT_FAILURE, 2688013af9b6SIntel "set_rx_queue_stats_mapping_registers " 2689013af9b6SIntel "failed for port id=%d diag=%d\n", 2690af75078fSIntel pi, diag); 2691af75078fSIntel } 2692af75078fSIntel } 2693af75078fSIntel 2694f2c5125aSPablo de Lara static void 2695f2c5125aSPablo de Lara rxtx_port_config(struct rte_port *port) 2696f2c5125aSPablo de Lara { 2697d44f8a48SQi Zhang uint16_t qid; 2698f2c5125aSPablo de Lara 2699d44f8a48SQi Zhang for (qid = 0; qid < nb_rxq; qid++) { 2700d44f8a48SQi Zhang port->rx_conf[qid] = port->dev_info.default_rxconf; 2701d44f8a48SQi Zhang 2702d44f8a48SQi Zhang /* Check if any Rx parameters have been passed */ 2703f2c5125aSPablo de Lara if (rx_pthresh != RTE_PMD_PARAM_UNSET) 2704d44f8a48SQi Zhang port->rx_conf[qid].rx_thresh.pthresh = rx_pthresh; 2705f2c5125aSPablo de Lara 2706f2c5125aSPablo de Lara if (rx_hthresh != RTE_PMD_PARAM_UNSET) 2707d44f8a48SQi Zhang port->rx_conf[qid].rx_thresh.hthresh = rx_hthresh; 2708f2c5125aSPablo de Lara 2709f2c5125aSPablo de Lara if (rx_wthresh != RTE_PMD_PARAM_UNSET) 2710d44f8a48SQi Zhang port->rx_conf[qid].rx_thresh.wthresh = rx_wthresh; 2711f2c5125aSPablo de Lara 2712f2c5125aSPablo de Lara if (rx_free_thresh != RTE_PMD_PARAM_UNSET) 2713d44f8a48SQi Zhang port->rx_conf[qid].rx_free_thresh = rx_free_thresh; 2714f2c5125aSPablo de Lara 2715f2c5125aSPablo de Lara if (rx_drop_en != RTE_PMD_PARAM_UNSET) 2716d44f8a48SQi Zhang port->rx_conf[qid].rx_drop_en = rx_drop_en; 2717f2c5125aSPablo de Lara 2718d44f8a48SQi Zhang port->nb_rx_desc[qid] = nb_rxd; 2719d44f8a48SQi Zhang } 2720d44f8a48SQi Zhang 2721d44f8a48SQi Zhang for (qid = 0; qid < nb_txq; qid++) { 2722d44f8a48SQi Zhang port->tx_conf[qid] = port->dev_info.default_txconf; 2723d44f8a48SQi Zhang 2724d44f8a48SQi Zhang /* Check if any Tx parameters have been passed */ 2725f2c5125aSPablo de Lara if (tx_pthresh != RTE_PMD_PARAM_UNSET) 2726d44f8a48SQi Zhang port->tx_conf[qid].tx_thresh.pthresh = tx_pthresh; 2727f2c5125aSPablo de Lara 2728f2c5125aSPablo de Lara if (tx_hthresh != RTE_PMD_PARAM_UNSET) 2729d44f8a48SQi Zhang port->tx_conf[qid].tx_thresh.hthresh = tx_hthresh; 2730f2c5125aSPablo de Lara 2731f2c5125aSPablo de Lara if (tx_wthresh != RTE_PMD_PARAM_UNSET) 2732d44f8a48SQi Zhang port->tx_conf[qid].tx_thresh.wthresh = tx_wthresh; 2733f2c5125aSPablo de Lara 2734f2c5125aSPablo de Lara if (tx_rs_thresh != RTE_PMD_PARAM_UNSET) 2735d44f8a48SQi Zhang port->tx_conf[qid].tx_rs_thresh = tx_rs_thresh; 2736f2c5125aSPablo de Lara 2737f2c5125aSPablo de Lara if (tx_free_thresh != RTE_PMD_PARAM_UNSET) 2738d44f8a48SQi Zhang port->tx_conf[qid].tx_free_thresh = tx_free_thresh; 2739d44f8a48SQi Zhang 2740d44f8a48SQi Zhang port->nb_tx_desc[qid] = nb_txd; 2741d44f8a48SQi Zhang } 2742f2c5125aSPablo de Lara } 2743f2c5125aSPablo de Lara 2744013af9b6SIntel void 2745013af9b6SIntel init_port_config(void) 2746013af9b6SIntel { 2747013af9b6SIntel portid_t pid; 2748013af9b6SIntel struct rte_port *port; 2749013af9b6SIntel 27507d89b261SGaetan Rivet RTE_ETH_FOREACH_DEV(pid) { 2751013af9b6SIntel port = &ports[pid]; 2752013af9b6SIntel port->dev_conf.fdir_conf = fdir_conf; 2753422515b9SAdrien Mazarguil rte_eth_dev_info_get(pid, &port->dev_info); 27543ce690d3SBruce Richardson if (nb_rxq > 1) { 2755013af9b6SIntel port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL; 275690892962SQi Zhang port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 2757422515b9SAdrien Mazarguil rss_hf & port->dev_info.flow_type_rss_offloads; 2758af75078fSIntel } else { 2759013af9b6SIntel port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL; 2760013af9b6SIntel port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0; 2761af75078fSIntel } 27623ce690d3SBruce Richardson 27635f592039SJingjing Wu if (port->dcb_flag == 0) { 27643ce690d3SBruce Richardson if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0) 27653ce690d3SBruce Richardson port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_RSS; 27663ce690d3SBruce Richardson else 27673ce690d3SBruce Richardson port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE; 27683ce690d3SBruce Richardson } 27693ce690d3SBruce Richardson 2770f2c5125aSPablo de Lara rxtx_port_config(port); 2771013af9b6SIntel 2772013af9b6SIntel rte_eth_macaddr_get(pid, &port->eth_addr); 2773013af9b6SIntel 2774013af9b6SIntel map_port_queue_stats_mapping_registers(pid, port); 277550c4440eSThomas Monjalon #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS 2776e261265eSRadu Nicolau rte_pmd_ixgbe_bypass_init(pid); 27777b7e5ba7SIntel #endif 27788ea656f8SGaetan Rivet 27798ea656f8SGaetan Rivet if (lsc_interrupt && 27808ea656f8SGaetan Rivet (rte_eth_devices[pid].data->dev_flags & 27818ea656f8SGaetan Rivet RTE_ETH_DEV_INTR_LSC)) 27828ea656f8SGaetan Rivet port->dev_conf.intr_conf.lsc = 1; 2783284c908cSGaetan Rivet if (rmv_interrupt && 2784284c908cSGaetan Rivet (rte_eth_devices[pid].data->dev_flags & 2785284c908cSGaetan Rivet RTE_ETH_DEV_INTR_RMV)) 2786284c908cSGaetan Rivet port->dev_conf.intr_conf.rmv = 1; 2787013af9b6SIntel } 2788013af9b6SIntel } 2789013af9b6SIntel 279041b05095SBernard Iremonger void set_port_slave_flag(portid_t slave_pid) 279141b05095SBernard Iremonger { 279241b05095SBernard Iremonger struct rte_port *port; 279341b05095SBernard Iremonger 279441b05095SBernard Iremonger port = &ports[slave_pid]; 279541b05095SBernard Iremonger port->slave_flag = 1; 279641b05095SBernard Iremonger } 279741b05095SBernard Iremonger 279841b05095SBernard Iremonger void clear_port_slave_flag(portid_t slave_pid) 279941b05095SBernard Iremonger { 280041b05095SBernard Iremonger struct rte_port *port; 280141b05095SBernard Iremonger 280241b05095SBernard Iremonger port = &ports[slave_pid]; 280341b05095SBernard Iremonger port->slave_flag = 0; 280441b05095SBernard Iremonger } 280541b05095SBernard Iremonger 28060e545d30SBernard Iremonger uint8_t port_is_bonding_slave(portid_t slave_pid) 28070e545d30SBernard Iremonger { 28080e545d30SBernard Iremonger struct rte_port *port; 28090e545d30SBernard Iremonger 28100e545d30SBernard Iremonger port = &ports[slave_pid]; 2811b8b8b344SMatan Azrad if ((rte_eth_devices[slave_pid].data->dev_flags & 2812b8b8b344SMatan Azrad RTE_ETH_DEV_BONDED_SLAVE) || (port->slave_flag == 1)) 2813b8b8b344SMatan Azrad return 1; 2814b8b8b344SMatan Azrad return 0; 28150e545d30SBernard Iremonger } 28160e545d30SBernard Iremonger 2817013af9b6SIntel const uint16_t vlan_tags[] = { 2818013af9b6SIntel 0, 1, 2, 3, 4, 5, 6, 7, 2819013af9b6SIntel 8, 9, 10, 11, 12, 13, 14, 15, 2820013af9b6SIntel 16, 17, 18, 19, 20, 21, 22, 23, 2821013af9b6SIntel 24, 25, 26, 27, 28, 29, 30, 31 2822013af9b6SIntel }; 2823013af9b6SIntel 2824013af9b6SIntel static int 2825ac7c491cSKonstantin Ananyev get_eth_dcb_conf(portid_t pid, struct rte_eth_conf *eth_conf, 28261a572499SJingjing Wu enum dcb_mode_enable dcb_mode, 28271a572499SJingjing Wu enum rte_eth_nb_tcs num_tcs, 28281a572499SJingjing Wu uint8_t pfc_en) 2829013af9b6SIntel { 2830013af9b6SIntel uint8_t i; 2831ac7c491cSKonstantin Ananyev int32_t rc; 2832ac7c491cSKonstantin Ananyev struct rte_eth_rss_conf rss_conf; 2833af75078fSIntel 2834af75078fSIntel /* 2835013af9b6SIntel * Builds up the correct configuration for dcb+vt based on the vlan tags array 2836013af9b6SIntel * given above, and the number of traffic classes available for use. 2837af75078fSIntel */ 28381a572499SJingjing Wu if (dcb_mode == DCB_VT_ENABLED) { 28391a572499SJingjing Wu struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf = 28401a572499SJingjing Wu ð_conf->rx_adv_conf.vmdq_dcb_conf; 28411a572499SJingjing Wu struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf = 28421a572499SJingjing Wu ð_conf->tx_adv_conf.vmdq_dcb_tx_conf; 2843013af9b6SIntel 2844547d946cSNirmoy Das /* VMDQ+DCB RX and TX configurations */ 28451a572499SJingjing Wu vmdq_rx_conf->enable_default_pool = 0; 28461a572499SJingjing Wu vmdq_rx_conf->default_pool = 0; 28471a572499SJingjing Wu vmdq_rx_conf->nb_queue_pools = 28481a572499SJingjing Wu (num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS); 28491a572499SJingjing Wu vmdq_tx_conf->nb_queue_pools = 28501a572499SJingjing Wu (num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS); 2851013af9b6SIntel 28521a572499SJingjing Wu vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools; 28531a572499SJingjing Wu for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) { 28541a572499SJingjing Wu vmdq_rx_conf->pool_map[i].vlan_id = vlan_tags[i]; 28551a572499SJingjing Wu vmdq_rx_conf->pool_map[i].pools = 28561a572499SJingjing Wu 1 << (i % vmdq_rx_conf->nb_queue_pools); 2857af75078fSIntel } 2858013af9b6SIntel for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) { 2859f59908feSWei Dai vmdq_rx_conf->dcb_tc[i] = i % num_tcs; 2860f59908feSWei Dai vmdq_tx_conf->dcb_tc[i] = i % num_tcs; 2861013af9b6SIntel } 2862013af9b6SIntel 2863013af9b6SIntel /* set DCB mode of RX and TX of multiple queues */ 286432e7aa0bSIntel eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB; 286532e7aa0bSIntel eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB; 28661a572499SJingjing Wu } else { 28671a572499SJingjing Wu struct rte_eth_dcb_rx_conf *rx_conf = 28681a572499SJingjing Wu ð_conf->rx_adv_conf.dcb_rx_conf; 28691a572499SJingjing Wu struct rte_eth_dcb_tx_conf *tx_conf = 28701a572499SJingjing Wu ð_conf->tx_adv_conf.dcb_tx_conf; 2871013af9b6SIntel 2872ac7c491cSKonstantin Ananyev rc = rte_eth_dev_rss_hash_conf_get(pid, &rss_conf); 2873ac7c491cSKonstantin Ananyev if (rc != 0) 2874ac7c491cSKonstantin Ananyev return rc; 2875ac7c491cSKonstantin Ananyev 28761a572499SJingjing Wu rx_conf->nb_tcs = num_tcs; 28771a572499SJingjing Wu tx_conf->nb_tcs = num_tcs; 28781a572499SJingjing Wu 2879bcd0e432SJingjing Wu for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) { 2880bcd0e432SJingjing Wu rx_conf->dcb_tc[i] = i % num_tcs; 2881bcd0e432SJingjing Wu tx_conf->dcb_tc[i] = i % num_tcs; 2882013af9b6SIntel } 2883ac7c491cSKonstantin Ananyev 28841a572499SJingjing Wu eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB_RSS; 2885ac7c491cSKonstantin Ananyev eth_conf->rx_adv_conf.rss_conf = rss_conf; 288632e7aa0bSIntel eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB; 28871a572499SJingjing Wu } 28881a572499SJingjing Wu 28891a572499SJingjing Wu if (pfc_en) 28901a572499SJingjing Wu eth_conf->dcb_capability_en = 28911a572499SJingjing Wu ETH_DCB_PG_SUPPORT | ETH_DCB_PFC_SUPPORT; 2892013af9b6SIntel else 2893013af9b6SIntel eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT; 2894013af9b6SIntel 2895013af9b6SIntel return 0; 2896013af9b6SIntel } 2897013af9b6SIntel 2898013af9b6SIntel int 28991a572499SJingjing Wu init_port_dcb_config(portid_t pid, 29001a572499SJingjing Wu enum dcb_mode_enable dcb_mode, 29011a572499SJingjing Wu enum rte_eth_nb_tcs num_tcs, 29021a572499SJingjing Wu uint8_t pfc_en) 2903013af9b6SIntel { 2904013af9b6SIntel struct rte_eth_conf port_conf; 2905013af9b6SIntel struct rte_port *rte_port; 2906013af9b6SIntel int retval; 2907013af9b6SIntel uint16_t i; 2908013af9b6SIntel 29092a977b89SWenzhuo Lu rte_port = &ports[pid]; 2910013af9b6SIntel 2911013af9b6SIntel memset(&port_conf, 0, sizeof(struct rte_eth_conf)); 2912013af9b6SIntel /* Enter DCB configuration status */ 2913013af9b6SIntel dcb_config = 1; 2914013af9b6SIntel 2915d5354e89SYanglong Wu port_conf.rxmode = rte_port->dev_conf.rxmode; 2916d5354e89SYanglong Wu port_conf.txmode = rte_port->dev_conf.txmode; 2917d5354e89SYanglong Wu 2918013af9b6SIntel /*set configuration of DCB in vt mode and DCB in non-vt mode*/ 2919ac7c491cSKonstantin Ananyev retval = get_eth_dcb_conf(pid, &port_conf, dcb_mode, num_tcs, pfc_en); 2920013af9b6SIntel if (retval < 0) 2921013af9b6SIntel return retval; 29220074d02fSShahaf Shuler port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER; 2923013af9b6SIntel 29242f203d44SQi Zhang /* re-configure the device . */ 29252f203d44SQi Zhang rte_eth_dev_configure(pid, nb_rxq, nb_rxq, &port_conf); 29262a977b89SWenzhuo Lu 29272a977b89SWenzhuo Lu rte_eth_dev_info_get(pid, &rte_port->dev_info); 29282a977b89SWenzhuo Lu 29292a977b89SWenzhuo Lu /* If dev_info.vmdq_pool_base is greater than 0, 29302a977b89SWenzhuo Lu * the queue id of vmdq pools is started after pf queues. 29312a977b89SWenzhuo Lu */ 29322a977b89SWenzhuo Lu if (dcb_mode == DCB_VT_ENABLED && 29332a977b89SWenzhuo Lu rte_port->dev_info.vmdq_pool_base > 0) { 29342a977b89SWenzhuo Lu printf("VMDQ_DCB multi-queue mode is nonsensical" 29352a977b89SWenzhuo Lu " for port %d.", pid); 29362a977b89SWenzhuo Lu return -1; 29372a977b89SWenzhuo Lu } 29382a977b89SWenzhuo Lu 29392a977b89SWenzhuo Lu /* Assume the ports in testpmd have the same dcb capability 29402a977b89SWenzhuo Lu * and has the same number of rxq and txq in dcb mode 29412a977b89SWenzhuo Lu */ 29422a977b89SWenzhuo Lu if (dcb_mode == DCB_VT_ENABLED) { 294386ef65eeSBernard Iremonger if (rte_port->dev_info.max_vfs > 0) { 294486ef65eeSBernard Iremonger nb_rxq = rte_port->dev_info.nb_rx_queues; 294586ef65eeSBernard Iremonger nb_txq = rte_port->dev_info.nb_tx_queues; 294686ef65eeSBernard Iremonger } else { 29472a977b89SWenzhuo Lu nb_rxq = rte_port->dev_info.max_rx_queues; 29482a977b89SWenzhuo Lu nb_txq = rte_port->dev_info.max_tx_queues; 294986ef65eeSBernard Iremonger } 29502a977b89SWenzhuo Lu } else { 29512a977b89SWenzhuo Lu /*if vt is disabled, use all pf queues */ 29522a977b89SWenzhuo Lu if (rte_port->dev_info.vmdq_pool_base == 0) { 29532a977b89SWenzhuo Lu nb_rxq = rte_port->dev_info.max_rx_queues; 29542a977b89SWenzhuo Lu nb_txq = rte_port->dev_info.max_tx_queues; 29552a977b89SWenzhuo Lu } else { 29562a977b89SWenzhuo Lu nb_rxq = (queueid_t)num_tcs; 29572a977b89SWenzhuo Lu nb_txq = (queueid_t)num_tcs; 29582a977b89SWenzhuo Lu 29592a977b89SWenzhuo Lu } 29602a977b89SWenzhuo Lu } 29612a977b89SWenzhuo Lu rx_free_thresh = 64; 29622a977b89SWenzhuo Lu 2963013af9b6SIntel memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf)); 2964013af9b6SIntel 2965f2c5125aSPablo de Lara rxtx_port_config(rte_port); 2966013af9b6SIntel /* VLAN filter */ 29670074d02fSShahaf Shuler rte_port->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER; 29681a572499SJingjing Wu for (i = 0; i < RTE_DIM(vlan_tags); i++) 2969013af9b6SIntel rx_vft_set(pid, vlan_tags[i], 1); 2970013af9b6SIntel 2971013af9b6SIntel rte_eth_macaddr_get(pid, &rte_port->eth_addr); 2972013af9b6SIntel map_port_queue_stats_mapping_registers(pid, rte_port); 2973013af9b6SIntel 29747741e4cfSIntel rte_port->dcb_flag = 1; 29757741e4cfSIntel 2976013af9b6SIntel return 0; 2977af75078fSIntel } 2978af75078fSIntel 2979ffc468ffSTetsuya Mukawa static void 2980ffc468ffSTetsuya Mukawa init_port(void) 2981ffc468ffSTetsuya Mukawa { 2982ffc468ffSTetsuya Mukawa /* Configuration of Ethernet ports. */ 2983ffc468ffSTetsuya Mukawa ports = rte_zmalloc("testpmd: ports", 2984ffc468ffSTetsuya Mukawa sizeof(struct rte_port) * RTE_MAX_ETHPORTS, 2985ffc468ffSTetsuya Mukawa RTE_CACHE_LINE_SIZE); 2986ffc468ffSTetsuya Mukawa if (ports == NULL) { 2987ffc468ffSTetsuya Mukawa rte_exit(EXIT_FAILURE, 2988ffc468ffSTetsuya Mukawa "rte_zmalloc(%d struct rte_port) failed\n", 2989ffc468ffSTetsuya Mukawa RTE_MAX_ETHPORTS); 2990ffc468ffSTetsuya Mukawa } 299129841336SPhil Yang 299229841336SPhil Yang /* Initialize ports NUMA structures */ 299329841336SPhil Yang memset(port_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS); 299429841336SPhil Yang memset(rxring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS); 299529841336SPhil Yang memset(txring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS); 2996ffc468ffSTetsuya Mukawa } 2997ffc468ffSTetsuya Mukawa 2998d3a274ceSZhihong Wang static void 2999d3a274ceSZhihong Wang force_quit(void) 3000d3a274ceSZhihong Wang { 3001d3a274ceSZhihong Wang pmd_test_exit(); 3002d3a274ceSZhihong Wang prompt_exit(); 3003d3a274ceSZhihong Wang } 3004d3a274ceSZhihong Wang 3005d3a274ceSZhihong Wang static void 3006cfea1f30SPablo de Lara print_stats(void) 3007cfea1f30SPablo de Lara { 3008cfea1f30SPablo de Lara uint8_t i; 3009cfea1f30SPablo de Lara const char clr[] = { 27, '[', '2', 'J', '\0' }; 3010cfea1f30SPablo de Lara const char top_left[] = { 27, '[', '1', ';', '1', 'H', '\0' }; 3011cfea1f30SPablo de Lara 3012cfea1f30SPablo de Lara /* Clear screen and move to top left */ 3013cfea1f30SPablo de Lara printf("%s%s", clr, top_left); 3014cfea1f30SPablo de Lara 3015cfea1f30SPablo de Lara printf("\nPort statistics ===================================="); 3016cfea1f30SPablo de Lara for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) 3017cfea1f30SPablo de Lara nic_stats_display(fwd_ports_ids[i]); 3018cfea1f30SPablo de Lara } 3019cfea1f30SPablo de Lara 3020cfea1f30SPablo de Lara static void 3021d3a274ceSZhihong Wang signal_handler(int signum) 3022d3a274ceSZhihong Wang { 3023d3a274ceSZhihong Wang if (signum == SIGINT || signum == SIGTERM) { 3024d3a274ceSZhihong Wang printf("\nSignal %d received, preparing to exit...\n", 3025d3a274ceSZhihong Wang signum); 3026102b7329SReshma Pattan #ifdef RTE_LIBRTE_PDUMP 3027102b7329SReshma Pattan /* uninitialize packet capture framework */ 3028102b7329SReshma Pattan rte_pdump_uninit(); 3029102b7329SReshma Pattan #endif 303062d3216dSReshma Pattan #ifdef RTE_LIBRTE_LATENCY_STATS 303162d3216dSReshma Pattan rte_latencystats_uninit(); 303262d3216dSReshma Pattan #endif 3033d3a274ceSZhihong Wang force_quit(); 3034d9a191a0SPhil Yang /* Set flag to indicate the force termination. */ 3035d9a191a0SPhil Yang f_quit = 1; 3036d3a274ceSZhihong Wang /* exit with the expected status */ 3037d3a274ceSZhihong Wang signal(signum, SIG_DFL); 3038d3a274ceSZhihong Wang kill(getpid(), signum); 3039d3a274ceSZhihong Wang } 3040d3a274ceSZhihong Wang } 3041d3a274ceSZhihong Wang 3042af75078fSIntel int 3043af75078fSIntel main(int argc, char** argv) 3044af75078fSIntel { 3045af75078fSIntel int diag; 3046f8244c63SZhiyong Yang portid_t port_id; 30474918a357SXiaoyun Li uint16_t count; 3048fb73e096SJeff Guo int ret; 3049af75078fSIntel 3050d3a274ceSZhihong Wang signal(SIGINT, signal_handler); 3051d3a274ceSZhihong Wang signal(SIGTERM, signal_handler); 3052d3a274ceSZhihong Wang 3053af75078fSIntel diag = rte_eal_init(argc, argv); 3054af75078fSIntel if (diag < 0) 3055af75078fSIntel rte_panic("Cannot init EAL\n"); 3056af75078fSIntel 3057285fd101SOlivier Matz testpmd_logtype = rte_log_register("testpmd"); 3058285fd101SOlivier Matz if (testpmd_logtype < 0) 3059285fd101SOlivier Matz rte_panic("Cannot register log type"); 3060285fd101SOlivier Matz rte_log_set_level(testpmd_logtype, RTE_LOG_DEBUG); 3061285fd101SOlivier Matz 30624aa0d012SAnatoly Burakov #ifdef RTE_LIBRTE_PDUMP 30634aa0d012SAnatoly Burakov /* initialize packet capture framework */ 30644aa0d012SAnatoly Burakov rte_pdump_init(NULL); 30654aa0d012SAnatoly Burakov #endif 30664aa0d012SAnatoly Burakov 30674918a357SXiaoyun Li count = 0; 30684918a357SXiaoyun Li RTE_ETH_FOREACH_DEV(port_id) { 30694918a357SXiaoyun Li ports_ids[count] = port_id; 30704918a357SXiaoyun Li count++; 30714918a357SXiaoyun Li } 30724918a357SXiaoyun Li nb_ports = (portid_t) count; 30734aa0d012SAnatoly Burakov if (nb_ports == 0) 30744aa0d012SAnatoly Burakov TESTPMD_LOG(WARNING, "No probed ethernet devices\n"); 30754aa0d012SAnatoly Burakov 30764aa0d012SAnatoly Burakov /* allocate port structures, and init them */ 30774aa0d012SAnatoly Burakov init_port(); 30784aa0d012SAnatoly Burakov 30794aa0d012SAnatoly Burakov set_def_fwd_config(); 30804aa0d012SAnatoly Burakov if (nb_lcores == 0) 30814aa0d012SAnatoly Burakov rte_panic("Empty set of forwarding logical cores - check the " 30824aa0d012SAnatoly Burakov "core mask supplied in the command parameters\n"); 30834aa0d012SAnatoly Burakov 3084e505d84cSAnatoly Burakov /* Bitrate/latency stats disabled by default */ 3085e505d84cSAnatoly Burakov #ifdef RTE_LIBRTE_BITRATE 3086e505d84cSAnatoly Burakov bitrate_enabled = 0; 3087e505d84cSAnatoly Burakov #endif 3088e505d84cSAnatoly Burakov #ifdef RTE_LIBRTE_LATENCY_STATS 3089e505d84cSAnatoly Burakov latencystats_enabled = 0; 3090e505d84cSAnatoly Burakov #endif 3091e505d84cSAnatoly Burakov 3092fb7b8b32SAnatoly Burakov /* on FreeBSD, mlockall() is disabled by default */ 3093fb7b8b32SAnatoly Burakov #ifdef RTE_EXEC_ENV_BSDAPP 3094fb7b8b32SAnatoly Burakov do_mlockall = 0; 3095fb7b8b32SAnatoly Burakov #else 3096fb7b8b32SAnatoly Burakov do_mlockall = 1; 3097fb7b8b32SAnatoly Burakov #endif 3098fb7b8b32SAnatoly Burakov 3099e505d84cSAnatoly Burakov argc -= diag; 3100e505d84cSAnatoly Burakov argv += diag; 3101e505d84cSAnatoly Burakov if (argc > 1) 3102e505d84cSAnatoly Burakov launch_args_parse(argc, argv); 3103e505d84cSAnatoly Burakov 3104e505d84cSAnatoly Burakov if (do_mlockall && mlockall(MCL_CURRENT | MCL_FUTURE)) { 3105285fd101SOlivier Matz TESTPMD_LOG(NOTICE, "mlockall() failed with error \"%s\"\n", 31061c036b16SEelco Chaudron strerror(errno)); 31071c036b16SEelco Chaudron } 31081c036b16SEelco Chaudron 310999cabef0SPablo de Lara if (tx_first && interactive) 311099cabef0SPablo de Lara rte_exit(EXIT_FAILURE, "--tx-first cannot be used on " 311199cabef0SPablo de Lara "interactive mode.\n"); 31128820cba4SDavid Hunt 31138820cba4SDavid Hunt if (tx_first && lsc_interrupt) { 31148820cba4SDavid Hunt printf("Warning: lsc_interrupt needs to be off when " 31158820cba4SDavid Hunt " using tx_first. Disabling.\n"); 31168820cba4SDavid Hunt lsc_interrupt = 0; 31178820cba4SDavid Hunt } 31188820cba4SDavid Hunt 31195a8fb55cSReshma Pattan if (!nb_rxq && !nb_txq) 31205a8fb55cSReshma Pattan printf("Warning: Either rx or tx queues should be non-zero\n"); 31215a8fb55cSReshma Pattan 31225a8fb55cSReshma Pattan if (nb_rxq > 1 && nb_rxq > nb_txq) 3123af75078fSIntel printf("Warning: nb_rxq=%d enables RSS configuration, " 3124af75078fSIntel "but nb_txq=%d will prevent to fully test it.\n", 3125af75078fSIntel nb_rxq, nb_txq); 3126af75078fSIntel 3127af75078fSIntel init_config(); 3128fb73e096SJeff Guo 3129fb73e096SJeff Guo if (hot_plug) { 31302049c511SJeff Guo ret = rte_dev_hotplug_handle_enable(); 3131fb73e096SJeff Guo if (ret) { 31322049c511SJeff Guo RTE_LOG(ERR, EAL, 31332049c511SJeff Guo "fail to enable hotplug handling."); 3134fb73e096SJeff Guo return -1; 3135fb73e096SJeff Guo } 3136fb73e096SJeff Guo 31372049c511SJeff Guo ret = rte_dev_event_monitor_start(); 31382049c511SJeff Guo if (ret) { 31392049c511SJeff Guo RTE_LOG(ERR, EAL, 31402049c511SJeff Guo "fail to start device event monitoring."); 31412049c511SJeff Guo return -1; 31422049c511SJeff Guo } 31432049c511SJeff Guo 31442049c511SJeff Guo ret = rte_dev_event_callback_register(NULL, 31452049c511SJeff Guo eth_dev_event_callback, NULL); 31462049c511SJeff Guo if (ret) { 31472049c511SJeff Guo RTE_LOG(ERR, EAL, 31482049c511SJeff Guo "fail to register device event callback\n"); 31492049c511SJeff Guo return -1; 31502049c511SJeff Guo } 3151fb73e096SJeff Guo } 3152fb73e096SJeff Guo 3153148f963fSBruce Richardson if (start_port(RTE_PORT_ALL) != 0) 3154148f963fSBruce Richardson rte_exit(EXIT_FAILURE, "Start ports failed\n"); 3155af75078fSIntel 3156ce8d5614SIntel /* set all ports to promiscuous mode by default */ 31577d89b261SGaetan Rivet RTE_ETH_FOREACH_DEV(port_id) 3158ce8d5614SIntel rte_eth_promiscuous_enable(port_id); 3159af75078fSIntel 31607e4441c8SRemy Horton /* Init metrics library */ 31617e4441c8SRemy Horton rte_metrics_init(rte_socket_id()); 31627e4441c8SRemy Horton 316362d3216dSReshma Pattan #ifdef RTE_LIBRTE_LATENCY_STATS 316462d3216dSReshma Pattan if (latencystats_enabled != 0) { 316562d3216dSReshma Pattan int ret = rte_latencystats_init(1, NULL); 316662d3216dSReshma Pattan if (ret) 316762d3216dSReshma Pattan printf("Warning: latencystats init()" 316862d3216dSReshma Pattan " returned error %d\n", ret); 316962d3216dSReshma Pattan printf("Latencystats running on lcore %d\n", 317062d3216dSReshma Pattan latencystats_lcore_id); 317162d3216dSReshma Pattan } 317262d3216dSReshma Pattan #endif 317362d3216dSReshma Pattan 31747e4441c8SRemy Horton /* Setup bitrate stats */ 31757e4441c8SRemy Horton #ifdef RTE_LIBRTE_BITRATE 3176e25e6c70SRemy Horton if (bitrate_enabled != 0) { 31777e4441c8SRemy Horton bitrate_data = rte_stats_bitrate_create(); 31787e4441c8SRemy Horton if (bitrate_data == NULL) 3179e25e6c70SRemy Horton rte_exit(EXIT_FAILURE, 3180e25e6c70SRemy Horton "Could not allocate bitrate data.\n"); 31817e4441c8SRemy Horton rte_stats_bitrate_reg(bitrate_data); 3182e25e6c70SRemy Horton } 31837e4441c8SRemy Horton #endif 31847e4441c8SRemy Horton 31850d56cb81SThomas Monjalon #ifdef RTE_LIBRTE_CMDLINE 318681ef862bSAllain Legacy if (strlen(cmdline_filename) != 0) 318781ef862bSAllain Legacy cmdline_read_from_file(cmdline_filename); 318881ef862bSAllain Legacy 3189ca7feb22SCyril Chemparathy if (interactive == 1) { 3190ca7feb22SCyril Chemparathy if (auto_start) { 3191ca7feb22SCyril Chemparathy printf("Start automatic packet forwarding\n"); 3192ca7feb22SCyril Chemparathy start_packet_forwarding(0); 3193ca7feb22SCyril Chemparathy } 3194af75078fSIntel prompt(); 31950de738cfSJiayu Hu pmd_test_exit(); 3196ca7feb22SCyril Chemparathy } else 31970d56cb81SThomas Monjalon #endif 31980d56cb81SThomas Monjalon { 3199af75078fSIntel char c; 3200af75078fSIntel int rc; 3201af75078fSIntel 3202d9a191a0SPhil Yang f_quit = 0; 3203d9a191a0SPhil Yang 3204af75078fSIntel printf("No commandline core given, start packet forwarding\n"); 320599cabef0SPablo de Lara start_packet_forwarding(tx_first); 3206cfea1f30SPablo de Lara if (stats_period != 0) { 3207cfea1f30SPablo de Lara uint64_t prev_time = 0, cur_time, diff_time = 0; 3208cfea1f30SPablo de Lara uint64_t timer_period; 3209cfea1f30SPablo de Lara 3210cfea1f30SPablo de Lara /* Convert to number of cycles */ 3211cfea1f30SPablo de Lara timer_period = stats_period * rte_get_timer_hz(); 3212cfea1f30SPablo de Lara 3213d9a191a0SPhil Yang while (f_quit == 0) { 3214cfea1f30SPablo de Lara cur_time = rte_get_timer_cycles(); 3215cfea1f30SPablo de Lara diff_time += cur_time - prev_time; 3216cfea1f30SPablo de Lara 3217cfea1f30SPablo de Lara if (diff_time >= timer_period) { 3218cfea1f30SPablo de Lara print_stats(); 3219cfea1f30SPablo de Lara /* Reset the timer */ 3220cfea1f30SPablo de Lara diff_time = 0; 3221cfea1f30SPablo de Lara } 3222cfea1f30SPablo de Lara /* Sleep to avoid unnecessary checks */ 3223cfea1f30SPablo de Lara prev_time = cur_time; 3224cfea1f30SPablo de Lara sleep(1); 3225cfea1f30SPablo de Lara } 3226cfea1f30SPablo de Lara } 3227cfea1f30SPablo de Lara 3228af75078fSIntel printf("Press enter to exit\n"); 3229af75078fSIntel rc = read(0, &c, 1); 3230d3a274ceSZhihong Wang pmd_test_exit(); 3231af75078fSIntel if (rc < 0) 3232af75078fSIntel return 1; 3233af75078fSIntel } 3234af75078fSIntel 3235af75078fSIntel return 0; 3236af75078fSIntel } 3237