1174a1631SBruce Richardson /* SPDX-License-Identifier: BSD-3-Clause 2174a1631SBruce Richardson * Copyright(c) 2010-2017 Intel Corporation 3af75078fSIntel */ 4af75078fSIntel 5af75078fSIntel #include <stdarg.h> 6af75078fSIntel #include <stdio.h> 7af75078fSIntel #include <stdlib.h> 8af75078fSIntel #include <signal.h> 9af75078fSIntel #include <string.h> 10af75078fSIntel #include <time.h> 11af75078fSIntel #include <fcntl.h> 121c036b16SEelco Chaudron #include <sys/mman.h> 13af75078fSIntel #include <sys/types.h> 14af75078fSIntel #include <errno.h> 15fb73e096SJeff Guo #include <stdbool.h> 16af75078fSIntel 17af75078fSIntel #include <sys/queue.h> 18af75078fSIntel #include <sys/stat.h> 19af75078fSIntel 20af75078fSIntel #include <stdint.h> 21af75078fSIntel #include <unistd.h> 22af75078fSIntel #include <inttypes.h> 23af75078fSIntel 24af75078fSIntel #include <rte_common.h> 25d1eb542eSOlivier Matz #include <rte_errno.h> 26af75078fSIntel #include <rte_byteorder.h> 27af75078fSIntel #include <rte_log.h> 28af75078fSIntel #include <rte_debug.h> 29af75078fSIntel #include <rte_cycles.h> 30c7f5dba7SAnatoly Burakov #include <rte_malloc_heap.h> 31af75078fSIntel #include <rte_memory.h> 32af75078fSIntel #include <rte_memcpy.h> 33af75078fSIntel #include <rte_launch.h> 34af75078fSIntel #include <rte_eal.h> 35284c908cSGaetan Rivet #include <rte_alarm.h> 36af75078fSIntel #include <rte_per_lcore.h> 37af75078fSIntel #include <rte_lcore.h> 38af75078fSIntel #include <rte_atomic.h> 39af75078fSIntel #include <rte_branch_prediction.h> 40af75078fSIntel #include <rte_mempool.h> 41af75078fSIntel #include <rte_malloc.h> 42af75078fSIntel #include <rte_mbuf.h> 430e798567SPavan Nikhilesh #include <rte_mbuf_pool_ops.h> 44af75078fSIntel #include <rte_interrupts.h> 45af75078fSIntel #include <rte_pci.h> 46af75078fSIntel #include <rte_ether.h> 47af75078fSIntel #include <rte_ethdev.h> 48edab33b1STetsuya Mukawa #include <rte_dev.h> 49af75078fSIntel #include <rte_string_fns.h> 50e261265eSRadu Nicolau #ifdef RTE_LIBRTE_IXGBE_PMD 51e261265eSRadu Nicolau #include <rte_pmd_ixgbe.h> 52e261265eSRadu Nicolau #endif 53102b7329SReshma Pattan #ifdef RTE_LIBRTE_PDUMP 54102b7329SReshma Pattan #include <rte_pdump.h> 55102b7329SReshma Pattan #endif 56938a184aSAdrien Mazarguil #include <rte_flow.h> 577e4441c8SRemy Horton #include <rte_metrics.h> 587e4441c8SRemy Horton #ifdef RTE_LIBRTE_BITRATE 597e4441c8SRemy Horton #include <rte_bitrate.h> 607e4441c8SRemy Horton #endif 6162d3216dSReshma Pattan #ifdef RTE_LIBRTE_LATENCY_STATS 6262d3216dSReshma Pattan #include <rte_latencystats.h> 6362d3216dSReshma Pattan #endif 64af75078fSIntel 65af75078fSIntel #include "testpmd.h" 66af75078fSIntel 67c7f5dba7SAnatoly Burakov #ifndef MAP_HUGETLB 68c7f5dba7SAnatoly Burakov /* FreeBSD may not have MAP_HUGETLB (in fact, it probably doesn't) */ 69c7f5dba7SAnatoly Burakov #define HUGE_FLAG (0x40000) 70c7f5dba7SAnatoly Burakov #else 71c7f5dba7SAnatoly Burakov #define HUGE_FLAG MAP_HUGETLB 72c7f5dba7SAnatoly Burakov #endif 73c7f5dba7SAnatoly Burakov 74c7f5dba7SAnatoly Burakov #ifndef MAP_HUGE_SHIFT 75c7f5dba7SAnatoly Burakov /* older kernels (or FreeBSD) will not have this define */ 76c7f5dba7SAnatoly Burakov #define HUGE_SHIFT (26) 77c7f5dba7SAnatoly Burakov #else 78c7f5dba7SAnatoly Burakov #define HUGE_SHIFT MAP_HUGE_SHIFT 79c7f5dba7SAnatoly Burakov #endif 80c7f5dba7SAnatoly Burakov 81c7f5dba7SAnatoly Burakov #define EXTMEM_HEAP_NAME "extmem" 82c7f5dba7SAnatoly Burakov 83af75078fSIntel uint16_t verbose_level = 0; /**< Silent by default. */ 84285fd101SOlivier Matz int testpmd_logtype; /**< Log type for testpmd logs */ 85af75078fSIntel 86af75078fSIntel /* use master core for command line ? */ 87af75078fSIntel uint8_t interactive = 0; 88ca7feb22SCyril Chemparathy uint8_t auto_start = 0; 8999cabef0SPablo de Lara uint8_t tx_first; 9081ef862bSAllain Legacy char cmdline_filename[PATH_MAX] = {0}; 91af75078fSIntel 92af75078fSIntel /* 93af75078fSIntel * NUMA support configuration. 94af75078fSIntel * When set, the NUMA support attempts to dispatch the allocation of the 95af75078fSIntel * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the 96af75078fSIntel * probed ports among the CPU sockets 0 and 1. 97af75078fSIntel * Otherwise, all memory is allocated from CPU socket 0. 98af75078fSIntel */ 99999b2ee0SBruce Richardson uint8_t numa_support = 1; /**< numa enabled by default */ 100af75078fSIntel 101af75078fSIntel /* 102b6ea6408SIntel * In UMA mode,all memory is allocated from socket 0 if --socket-num is 103b6ea6408SIntel * not configured. 104b6ea6408SIntel */ 105b6ea6408SIntel uint8_t socket_num = UMA_NO_CONFIG; 106b6ea6408SIntel 107b6ea6408SIntel /* 108c7f5dba7SAnatoly Burakov * Select mempool allocation type: 109c7f5dba7SAnatoly Burakov * - native: use regular DPDK memory 110c7f5dba7SAnatoly Burakov * - anon: use regular DPDK memory to create mempool, but populate using 111c7f5dba7SAnatoly Burakov * anonymous memory (may not be IOVA-contiguous) 112c7f5dba7SAnatoly Burakov * - xmem: use externally allocated hugepage memory 113148f963fSBruce Richardson */ 114c7f5dba7SAnatoly Burakov uint8_t mp_alloc_type = MP_ALLOC_NATIVE; 115148f963fSBruce Richardson 116148f963fSBruce Richardson /* 11763531389SGeorgios Katsikas * Store specified sockets on which memory pool to be used by ports 11863531389SGeorgios Katsikas * is allocated. 11963531389SGeorgios Katsikas */ 12063531389SGeorgios Katsikas uint8_t port_numa[RTE_MAX_ETHPORTS]; 12163531389SGeorgios Katsikas 12263531389SGeorgios Katsikas /* 12363531389SGeorgios Katsikas * Store specified sockets on which RX ring to be used by ports 12463531389SGeorgios Katsikas * is allocated. 12563531389SGeorgios Katsikas */ 12663531389SGeorgios Katsikas uint8_t rxring_numa[RTE_MAX_ETHPORTS]; 12763531389SGeorgios Katsikas 12863531389SGeorgios Katsikas /* 12963531389SGeorgios Katsikas * Store specified sockets on which TX ring to be used by ports 13063531389SGeorgios Katsikas * is allocated. 13163531389SGeorgios Katsikas */ 13263531389SGeorgios Katsikas uint8_t txring_numa[RTE_MAX_ETHPORTS]; 13363531389SGeorgios Katsikas 13463531389SGeorgios Katsikas /* 135af75078fSIntel * Record the Ethernet address of peer target ports to which packets are 136af75078fSIntel * forwarded. 137547d946cSNirmoy Das * Must be instantiated with the ethernet addresses of peer traffic generator 138af75078fSIntel * ports. 139af75078fSIntel */ 1406d13ea8eSOlivier Matz struct rte_ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS]; 141af75078fSIntel portid_t nb_peer_eth_addrs = 0; 142af75078fSIntel 143af75078fSIntel /* 144af75078fSIntel * Probed Target Environment. 145af75078fSIntel */ 146af75078fSIntel struct rte_port *ports; /**< For all probed ethernet ports. */ 147af75078fSIntel portid_t nb_ports; /**< Number of probed ethernet ports. */ 148af75078fSIntel struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */ 149af75078fSIntel lcoreid_t nb_lcores; /**< Number of probed logical cores. */ 150af75078fSIntel 1514918a357SXiaoyun Li portid_t ports_ids[RTE_MAX_ETHPORTS]; /**< Store all port ids. */ 1524918a357SXiaoyun Li 153af75078fSIntel /* 154af75078fSIntel * Test Forwarding Configuration. 155af75078fSIntel * nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores 156af75078fSIntel * nb_fwd_ports <= nb_cfg_ports <= nb_ports 157af75078fSIntel */ 158af75078fSIntel lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */ 159af75078fSIntel lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */ 160af75078fSIntel portid_t nb_cfg_ports; /**< Number of configured ports. */ 161af75078fSIntel portid_t nb_fwd_ports; /**< Number of forwarding ports. */ 162af75078fSIntel 163af75078fSIntel unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */ 164af75078fSIntel portid_t fwd_ports_ids[RTE_MAX_ETHPORTS]; /**< Port ids configuration. */ 165af75078fSIntel 166af75078fSIntel struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */ 167af75078fSIntel streamid_t nb_fwd_streams; /**< Is equal to (nb_ports * nb_rxq). */ 168af75078fSIntel 169af75078fSIntel /* 170af75078fSIntel * Forwarding engines. 171af75078fSIntel */ 172af75078fSIntel struct fwd_engine * fwd_engines[] = { 173af75078fSIntel &io_fwd_engine, 174af75078fSIntel &mac_fwd_engine, 175d47388f1SCyril Chemparathy &mac_swap_engine, 176e9e23a61SCyril Chemparathy &flow_gen_engine, 177af75078fSIntel &rx_only_engine, 178af75078fSIntel &tx_only_engine, 179af75078fSIntel &csum_fwd_engine, 180168dfa61SIvan Boule &icmp_echo_engine, 1813c156061SJens Freimann &noisy_vnf_engine, 1820ad778b3SJasvinder Singh #if defined RTE_LIBRTE_PMD_SOFTNIC 1830ad778b3SJasvinder Singh &softnic_fwd_engine, 1845b590fbeSJasvinder Singh #endif 185af75078fSIntel #ifdef RTE_LIBRTE_IEEE1588 186af75078fSIntel &ieee1588_fwd_engine, 187af75078fSIntel #endif 188af75078fSIntel NULL, 189af75078fSIntel }; 190af75078fSIntel 191401b744dSShahaf Shuler struct rte_mempool *mempools[RTE_MAX_NUMA_NODES]; 19259fcf854SShahaf Shuler uint16_t mempool_flags; 193401b744dSShahaf Shuler 194af75078fSIntel struct fwd_config cur_fwd_config; 195af75078fSIntel struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */ 196bf56fce1SZhihong Wang uint32_t retry_enabled; 197bf56fce1SZhihong Wang uint32_t burst_tx_delay_time = BURST_TX_WAIT_US; 198bf56fce1SZhihong Wang uint32_t burst_tx_retry_num = BURST_TX_RETRIES; 199af75078fSIntel 200af75078fSIntel uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */ 201c8798818SIntel uint32_t param_total_num_mbufs = 0; /**< number of mbufs in all pools - if 202c8798818SIntel * specified on command-line. */ 203cfea1f30SPablo de Lara uint16_t stats_period; /**< Period to show statistics (disabled by default) */ 204d9a191a0SPhil Yang 205d9a191a0SPhil Yang /* 206d9a191a0SPhil Yang * In container, it cannot terminate the process which running with 'stats-period' 207d9a191a0SPhil Yang * option. Set flag to exit stats period loop after received SIGINT/SIGTERM. 208d9a191a0SPhil Yang */ 209d9a191a0SPhil Yang uint8_t f_quit; 210d9a191a0SPhil Yang 211af75078fSIntel /* 212af75078fSIntel * Configuration of packet segments used by the "txonly" processing engine. 213af75078fSIntel */ 214af75078fSIntel uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */ 215af75078fSIntel uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = { 216af75078fSIntel TXONLY_DEF_PACKET_LEN, 217af75078fSIntel }; 218af75078fSIntel uint8_t tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */ 219af75078fSIntel 22079bec05bSKonstantin Ananyev enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF; 22179bec05bSKonstantin Ananyev /**< Split policy for packets to TX. */ 22279bec05bSKonstantin Ananyev 22382010ef5SYongseok Koh uint8_t txonly_multi_flow; 22482010ef5SYongseok Koh /**< Whether multiple flows are generated in TXONLY mode. */ 22582010ef5SYongseok Koh 226af75078fSIntel uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */ 227e9378bbcSCunming Liang uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */ 228af75078fSIntel 229900550deSIntel /* current configuration is in DCB or not,0 means it is not in DCB mode */ 230900550deSIntel uint8_t dcb_config = 0; 231900550deSIntel 232900550deSIntel /* Whether the dcb is in testing status */ 233900550deSIntel uint8_t dcb_test = 0; 234900550deSIntel 235af75078fSIntel /* 236af75078fSIntel * Configurable number of RX/TX queues. 237af75078fSIntel */ 238af75078fSIntel queueid_t nb_rxq = 1; /**< Number of RX queues per port. */ 239af75078fSIntel queueid_t nb_txq = 1; /**< Number of TX queues per port. */ 240af75078fSIntel 241af75078fSIntel /* 242af75078fSIntel * Configurable number of RX/TX ring descriptors. 2438599ed31SRemy Horton * Defaults are supplied by drivers via ethdev. 244af75078fSIntel */ 2458599ed31SRemy Horton #define RTE_TEST_RX_DESC_DEFAULT 0 2468599ed31SRemy Horton #define RTE_TEST_TX_DESC_DEFAULT 0 247af75078fSIntel uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */ 248af75078fSIntel uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */ 249af75078fSIntel 250f2c5125aSPablo de Lara #define RTE_PMD_PARAM_UNSET -1 251af75078fSIntel /* 252af75078fSIntel * Configurable values of RX and TX ring threshold registers. 253af75078fSIntel */ 254af75078fSIntel 255f2c5125aSPablo de Lara int8_t rx_pthresh = RTE_PMD_PARAM_UNSET; 256f2c5125aSPablo de Lara int8_t rx_hthresh = RTE_PMD_PARAM_UNSET; 257f2c5125aSPablo de Lara int8_t rx_wthresh = RTE_PMD_PARAM_UNSET; 258af75078fSIntel 259f2c5125aSPablo de Lara int8_t tx_pthresh = RTE_PMD_PARAM_UNSET; 260f2c5125aSPablo de Lara int8_t tx_hthresh = RTE_PMD_PARAM_UNSET; 261f2c5125aSPablo de Lara int8_t tx_wthresh = RTE_PMD_PARAM_UNSET; 262af75078fSIntel 263af75078fSIntel /* 264af75078fSIntel * Configurable value of RX free threshold. 265af75078fSIntel */ 266f2c5125aSPablo de Lara int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET; 267af75078fSIntel 268af75078fSIntel /* 269ce8d5614SIntel * Configurable value of RX drop enable. 270ce8d5614SIntel */ 271f2c5125aSPablo de Lara int8_t rx_drop_en = RTE_PMD_PARAM_UNSET; 272ce8d5614SIntel 273ce8d5614SIntel /* 274af75078fSIntel * Configurable value of TX free threshold. 275af75078fSIntel */ 276f2c5125aSPablo de Lara int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET; 277af75078fSIntel 278af75078fSIntel /* 279af75078fSIntel * Configurable value of TX RS bit threshold. 280af75078fSIntel */ 281f2c5125aSPablo de Lara int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET; 282af75078fSIntel 283af75078fSIntel /* 2843c156061SJens Freimann * Configurable value of buffered packets before sending. 2853c156061SJens Freimann */ 2863c156061SJens Freimann uint16_t noisy_tx_sw_bufsz; 2873c156061SJens Freimann 2883c156061SJens Freimann /* 2893c156061SJens Freimann * Configurable value of packet buffer timeout. 2903c156061SJens Freimann */ 2913c156061SJens Freimann uint16_t noisy_tx_sw_buf_flush_time; 2923c156061SJens Freimann 2933c156061SJens Freimann /* 2943c156061SJens Freimann * Configurable value for size of VNF internal memory area 2953c156061SJens Freimann * used for simulating noisy neighbour behaviour 2963c156061SJens Freimann */ 2973c156061SJens Freimann uint64_t noisy_lkup_mem_sz; 2983c156061SJens Freimann 2993c156061SJens Freimann /* 3003c156061SJens Freimann * Configurable value of number of random writes done in 3013c156061SJens Freimann * VNF simulation memory area. 3023c156061SJens Freimann */ 3033c156061SJens Freimann uint64_t noisy_lkup_num_writes; 3043c156061SJens Freimann 3053c156061SJens Freimann /* 3063c156061SJens Freimann * Configurable value of number of random reads done in 3073c156061SJens Freimann * VNF simulation memory area. 3083c156061SJens Freimann */ 3093c156061SJens Freimann uint64_t noisy_lkup_num_reads; 3103c156061SJens Freimann 3113c156061SJens Freimann /* 3123c156061SJens Freimann * Configurable value of number of random reads/writes done in 3133c156061SJens Freimann * VNF simulation memory area. 3143c156061SJens Freimann */ 3153c156061SJens Freimann uint64_t noisy_lkup_num_reads_writes; 3163c156061SJens Freimann 3173c156061SJens Freimann /* 318af75078fSIntel * Receive Side Scaling (RSS) configuration. 319af75078fSIntel */ 3208a387fa8SHelin Zhang uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */ 321af75078fSIntel 322af75078fSIntel /* 323af75078fSIntel * Port topology configuration 324af75078fSIntel */ 325af75078fSIntel uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */ 326af75078fSIntel 3277741e4cfSIntel /* 3287741e4cfSIntel * Avoids to flush all the RX streams before starts forwarding. 3297741e4cfSIntel */ 3307741e4cfSIntel uint8_t no_flush_rx = 0; /* flush by default */ 3317741e4cfSIntel 332af75078fSIntel /* 3337ee3e944SVasily Philipov * Flow API isolated mode. 3347ee3e944SVasily Philipov */ 3357ee3e944SVasily Philipov uint8_t flow_isolate_all; 3367ee3e944SVasily Philipov 3377ee3e944SVasily Philipov /* 338bc202406SDavid Marchand * Avoids to check link status when starting/stopping a port. 339bc202406SDavid Marchand */ 340bc202406SDavid Marchand uint8_t no_link_check = 0; /* check by default */ 341bc202406SDavid Marchand 342bc202406SDavid Marchand /* 3438ea656f8SGaetan Rivet * Enable link status change notification 3448ea656f8SGaetan Rivet */ 3458ea656f8SGaetan Rivet uint8_t lsc_interrupt = 1; /* enabled by default */ 3468ea656f8SGaetan Rivet 3478ea656f8SGaetan Rivet /* 348284c908cSGaetan Rivet * Enable device removal notification. 349284c908cSGaetan Rivet */ 350284c908cSGaetan Rivet uint8_t rmv_interrupt = 1; /* enabled by default */ 351284c908cSGaetan Rivet 352fb73e096SJeff Guo uint8_t hot_plug = 0; /**< hotplug disabled by default. */ 353fb73e096SJeff Guo 3544f1ed78eSThomas Monjalon /* After attach, port setup is called on event or by iterator */ 3554f1ed78eSThomas Monjalon bool setup_on_probe_event = true; 3564f1ed78eSThomas Monjalon 35797b5d8b5SThomas Monjalon /* Pretty printing of ethdev events */ 35897b5d8b5SThomas Monjalon static const char * const eth_event_desc[] = { 35997b5d8b5SThomas Monjalon [RTE_ETH_EVENT_UNKNOWN] = "unknown", 36097b5d8b5SThomas Monjalon [RTE_ETH_EVENT_INTR_LSC] = "link state change", 36197b5d8b5SThomas Monjalon [RTE_ETH_EVENT_QUEUE_STATE] = "queue state", 36297b5d8b5SThomas Monjalon [RTE_ETH_EVENT_INTR_RESET] = "reset", 36397b5d8b5SThomas Monjalon [RTE_ETH_EVENT_VF_MBOX] = "VF mbox", 36497b5d8b5SThomas Monjalon [RTE_ETH_EVENT_IPSEC] = "IPsec", 36597b5d8b5SThomas Monjalon [RTE_ETH_EVENT_MACSEC] = "MACsec", 36697b5d8b5SThomas Monjalon [RTE_ETH_EVENT_INTR_RMV] = "device removal", 36797b5d8b5SThomas Monjalon [RTE_ETH_EVENT_NEW] = "device probed", 36897b5d8b5SThomas Monjalon [RTE_ETH_EVENT_DESTROY] = "device released", 36997b5d8b5SThomas Monjalon [RTE_ETH_EVENT_MAX] = NULL, 37097b5d8b5SThomas Monjalon }; 37197b5d8b5SThomas Monjalon 372284c908cSGaetan Rivet /* 3733af72783SGaetan Rivet * Display or mask ether events 3743af72783SGaetan Rivet * Default to all events except VF_MBOX 3753af72783SGaetan Rivet */ 3763af72783SGaetan Rivet uint32_t event_print_mask = (UINT32_C(1) << RTE_ETH_EVENT_UNKNOWN) | 3773af72783SGaetan Rivet (UINT32_C(1) << RTE_ETH_EVENT_INTR_LSC) | 3783af72783SGaetan Rivet (UINT32_C(1) << RTE_ETH_EVENT_QUEUE_STATE) | 3793af72783SGaetan Rivet (UINT32_C(1) << RTE_ETH_EVENT_INTR_RESET) | 380badb87c1SAnoob Joseph (UINT32_C(1) << RTE_ETH_EVENT_IPSEC) | 3813af72783SGaetan Rivet (UINT32_C(1) << RTE_ETH_EVENT_MACSEC) | 3823af72783SGaetan Rivet (UINT32_C(1) << RTE_ETH_EVENT_INTR_RMV); 383e505d84cSAnatoly Burakov /* 384e505d84cSAnatoly Burakov * Decide if all memory are locked for performance. 385e505d84cSAnatoly Burakov */ 386e505d84cSAnatoly Burakov int do_mlockall = 0; 3873af72783SGaetan Rivet 3883af72783SGaetan Rivet /* 3897b7e5ba7SIntel * NIC bypass mode configuration options. 3907b7e5ba7SIntel */ 3917b7e5ba7SIntel 39250c4440eSThomas Monjalon #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS 3937b7e5ba7SIntel /* The NIC bypass watchdog timeout. */ 394e261265eSRadu Nicolau uint32_t bypass_timeout = RTE_PMD_IXGBE_BYPASS_TMT_OFF; 3957b7e5ba7SIntel #endif 3967b7e5ba7SIntel 397e261265eSRadu Nicolau 39862d3216dSReshma Pattan #ifdef RTE_LIBRTE_LATENCY_STATS 39962d3216dSReshma Pattan 40062d3216dSReshma Pattan /* 40162d3216dSReshma Pattan * Set when latency stats is enabled in the commandline 40262d3216dSReshma Pattan */ 40362d3216dSReshma Pattan uint8_t latencystats_enabled; 40462d3216dSReshma Pattan 40562d3216dSReshma Pattan /* 40662d3216dSReshma Pattan * Lcore ID to serive latency statistics. 40762d3216dSReshma Pattan */ 40862d3216dSReshma Pattan lcoreid_t latencystats_lcore_id = -1; 40962d3216dSReshma Pattan 41062d3216dSReshma Pattan #endif 41162d3216dSReshma Pattan 4127b7e5ba7SIntel /* 413af75078fSIntel * Ethernet device configuration. 414af75078fSIntel */ 415af75078fSIntel struct rte_eth_rxmode rx_mode = { 41635b2d13fSOlivier Matz .max_rx_pkt_len = RTE_ETHER_MAX_LEN, 41735b2d13fSOlivier Matz /**< Default maximum frame length. */ 418af75078fSIntel }; 419af75078fSIntel 42007e5f7bdSShahaf Shuler struct rte_eth_txmode tx_mode = { 42107e5f7bdSShahaf Shuler .offloads = DEV_TX_OFFLOAD_MBUF_FAST_FREE, 42207e5f7bdSShahaf Shuler }; 423fd8c20aaSShahaf Shuler 424af75078fSIntel struct rte_fdir_conf fdir_conf = { 425af75078fSIntel .mode = RTE_FDIR_MODE_NONE, 426af75078fSIntel .pballoc = RTE_FDIR_PBALLOC_64K, 427af75078fSIntel .status = RTE_FDIR_REPORT_STATUS, 428d9d5e6f2SJingjing Wu .mask = { 42926f579aaSWei Zhao .vlan_tci_mask = 0xFFEF, 430d9d5e6f2SJingjing Wu .ipv4_mask = { 431d9d5e6f2SJingjing Wu .src_ip = 0xFFFFFFFF, 432d9d5e6f2SJingjing Wu .dst_ip = 0xFFFFFFFF, 433d9d5e6f2SJingjing Wu }, 434d9d5e6f2SJingjing Wu .ipv6_mask = { 435d9d5e6f2SJingjing Wu .src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF}, 436d9d5e6f2SJingjing Wu .dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF}, 437d9d5e6f2SJingjing Wu }, 438d9d5e6f2SJingjing Wu .src_port_mask = 0xFFFF, 439d9d5e6f2SJingjing Wu .dst_port_mask = 0xFFFF, 44047b3ac6bSWenzhuo Lu .mac_addr_byte_mask = 0xFF, 44147b3ac6bSWenzhuo Lu .tunnel_type_mask = 1, 44247b3ac6bSWenzhuo Lu .tunnel_id_mask = 0xFFFFFFFF, 443d9d5e6f2SJingjing Wu }, 444af75078fSIntel .drop_queue = 127, 445af75078fSIntel }; 446af75078fSIntel 4472950a769SDeclan Doherty volatile int test_done = 1; /* stop packet forwarding when set to 1. */ 448af75078fSIntel 449ed30d9b6SIntel struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS]; 450ed30d9b6SIntel struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS]; 451ed30d9b6SIntel 452ed30d9b6SIntel struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array; 453ed30d9b6SIntel struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array; 454ed30d9b6SIntel 455ed30d9b6SIntel uint16_t nb_tx_queue_stats_mappings = 0; 456ed30d9b6SIntel uint16_t nb_rx_queue_stats_mappings = 0; 457ed30d9b6SIntel 458a4fd5eeeSElza Mathew /* 459a4fd5eeeSElza Mathew * Display zero values by default for xstats 460a4fd5eeeSElza Mathew */ 461a4fd5eeeSElza Mathew uint8_t xstats_hide_zero; 462a4fd5eeeSElza Mathew 463c9cafcc8SShahaf Shuler unsigned int num_sockets = 0; 464c9cafcc8SShahaf Shuler unsigned int socket_ids[RTE_MAX_NUMA_NODES]; 4657acf894dSStephen Hurd 466e25e6c70SRemy Horton #ifdef RTE_LIBRTE_BITRATE 4677e4441c8SRemy Horton /* Bitrate statistics */ 4687e4441c8SRemy Horton struct rte_stats_bitrates *bitrate_data; 469e25e6c70SRemy Horton lcoreid_t bitrate_lcore_id; 470e25e6c70SRemy Horton uint8_t bitrate_enabled; 471e25e6c70SRemy Horton #endif 4727e4441c8SRemy Horton 473b40f8d78SJiayu Hu struct gro_status gro_ports[RTE_MAX_ETHPORTS]; 474b7091f1dSJiayu Hu uint8_t gro_flush_cycles = GRO_DEFAULT_FLUSH_CYCLES; 475b40f8d78SJiayu Hu 4761960be7dSNelio Laranjeiro struct vxlan_encap_conf vxlan_encap_conf = { 4771960be7dSNelio Laranjeiro .select_ipv4 = 1, 4781960be7dSNelio Laranjeiro .select_vlan = 0, 47962e8a5a8SViacheslav Ovsiienko .select_tos_ttl = 0, 4801960be7dSNelio Laranjeiro .vni = "\x00\x00\x00", 4811960be7dSNelio Laranjeiro .udp_src = 0, 4821960be7dSNelio Laranjeiro .udp_dst = RTE_BE16(4789), 4830c9da755SDavid Marchand .ipv4_src = RTE_IPV4(127, 0, 0, 1), 4840c9da755SDavid Marchand .ipv4_dst = RTE_IPV4(255, 255, 255, 255), 4851960be7dSNelio Laranjeiro .ipv6_src = "\x00\x00\x00\x00\x00\x00\x00\x00" 4861960be7dSNelio Laranjeiro "\x00\x00\x00\x00\x00\x00\x00\x01", 4871960be7dSNelio Laranjeiro .ipv6_dst = "\x00\x00\x00\x00\x00\x00\x00\x00" 4881960be7dSNelio Laranjeiro "\x00\x00\x00\x00\x00\x00\x11\x11", 4891960be7dSNelio Laranjeiro .vlan_tci = 0, 49062e8a5a8SViacheslav Ovsiienko .ip_tos = 0, 49162e8a5a8SViacheslav Ovsiienko .ip_ttl = 255, 4921960be7dSNelio Laranjeiro .eth_src = "\x00\x00\x00\x00\x00\x00", 4931960be7dSNelio Laranjeiro .eth_dst = "\xff\xff\xff\xff\xff\xff", 4941960be7dSNelio Laranjeiro }; 4951960be7dSNelio Laranjeiro 496dcd962fcSNelio Laranjeiro struct nvgre_encap_conf nvgre_encap_conf = { 497dcd962fcSNelio Laranjeiro .select_ipv4 = 1, 498dcd962fcSNelio Laranjeiro .select_vlan = 0, 499dcd962fcSNelio Laranjeiro .tni = "\x00\x00\x00", 5000c9da755SDavid Marchand .ipv4_src = RTE_IPV4(127, 0, 0, 1), 5010c9da755SDavid Marchand .ipv4_dst = RTE_IPV4(255, 255, 255, 255), 502dcd962fcSNelio Laranjeiro .ipv6_src = "\x00\x00\x00\x00\x00\x00\x00\x00" 503dcd962fcSNelio Laranjeiro "\x00\x00\x00\x00\x00\x00\x00\x01", 504dcd962fcSNelio Laranjeiro .ipv6_dst = "\x00\x00\x00\x00\x00\x00\x00\x00" 505dcd962fcSNelio Laranjeiro "\x00\x00\x00\x00\x00\x00\x11\x11", 506dcd962fcSNelio Laranjeiro .vlan_tci = 0, 507dcd962fcSNelio Laranjeiro .eth_src = "\x00\x00\x00\x00\x00\x00", 508dcd962fcSNelio Laranjeiro .eth_dst = "\xff\xff\xff\xff\xff\xff", 509dcd962fcSNelio Laranjeiro }; 510dcd962fcSNelio Laranjeiro 511ed30d9b6SIntel /* Forward function declarations */ 512c9cce428SThomas Monjalon static void setup_attached_port(portid_t pi); 51328caa76aSZhiyong Yang static void map_port_queue_stats_mapping_registers(portid_t pi, 51428caa76aSZhiyong Yang struct rte_port *port); 515edab33b1STetsuya Mukawa static void check_all_ports_link_status(uint32_t port_mask); 516f8244c63SZhiyong Yang static int eth_event_callback(portid_t port_id, 51776ad4a2dSGaetan Rivet enum rte_eth_event_type type, 518d6af1a13SBernard Iremonger void *param, void *ret_param); 519cc1bf307SJeff Guo static void dev_event_callback(const char *device_name, 520fb73e096SJeff Guo enum rte_dev_event_type type, 521fb73e096SJeff Guo void *param); 522ce8d5614SIntel 523ce8d5614SIntel /* 524ce8d5614SIntel * Check if all the ports are started. 525ce8d5614SIntel * If yes, return positive value. If not, return zero. 526ce8d5614SIntel */ 527ce8d5614SIntel static int all_ports_started(void); 528ed30d9b6SIntel 52952f38a20SJiayu Hu struct gso_status gso_ports[RTE_MAX_ETHPORTS]; 53035b2d13fSOlivier Matz uint16_t gso_max_segment_size = RTE_ETHER_MAX_LEN - RTE_ETHER_CRC_LEN; 53152f38a20SJiayu Hu 532af75078fSIntel /* 53398a7ea33SJerin Jacob * Helper function to check if socket is already discovered. 534c9cafcc8SShahaf Shuler * If yes, return positive value. If not, return zero. 535c9cafcc8SShahaf Shuler */ 536c9cafcc8SShahaf Shuler int 537c9cafcc8SShahaf Shuler new_socket_id(unsigned int socket_id) 538c9cafcc8SShahaf Shuler { 539c9cafcc8SShahaf Shuler unsigned int i; 540c9cafcc8SShahaf Shuler 541c9cafcc8SShahaf Shuler for (i = 0; i < num_sockets; i++) { 542c9cafcc8SShahaf Shuler if (socket_ids[i] == socket_id) 543c9cafcc8SShahaf Shuler return 0; 544c9cafcc8SShahaf Shuler } 545c9cafcc8SShahaf Shuler return 1; 546c9cafcc8SShahaf Shuler } 547c9cafcc8SShahaf Shuler 548c9cafcc8SShahaf Shuler /* 549af75078fSIntel * Setup default configuration. 550af75078fSIntel */ 551af75078fSIntel static void 552af75078fSIntel set_default_fwd_lcores_config(void) 553af75078fSIntel { 554af75078fSIntel unsigned int i; 555af75078fSIntel unsigned int nb_lc; 5567acf894dSStephen Hurd unsigned int sock_num; 557af75078fSIntel 558af75078fSIntel nb_lc = 0; 559af75078fSIntel for (i = 0; i < RTE_MAX_LCORE; i++) { 560dbfb8ec7SPhil Yang if (!rte_lcore_is_enabled(i)) 561dbfb8ec7SPhil Yang continue; 562c9cafcc8SShahaf Shuler sock_num = rte_lcore_to_socket_id(i); 563c9cafcc8SShahaf Shuler if (new_socket_id(sock_num)) { 564c9cafcc8SShahaf Shuler if (num_sockets >= RTE_MAX_NUMA_NODES) { 565c9cafcc8SShahaf Shuler rte_exit(EXIT_FAILURE, 566c9cafcc8SShahaf Shuler "Total sockets greater than %u\n", 567c9cafcc8SShahaf Shuler RTE_MAX_NUMA_NODES); 568c9cafcc8SShahaf Shuler } 569c9cafcc8SShahaf Shuler socket_ids[num_sockets++] = sock_num; 5707acf894dSStephen Hurd } 571f54fe5eeSStephen Hurd if (i == rte_get_master_lcore()) 572f54fe5eeSStephen Hurd continue; 573f54fe5eeSStephen Hurd fwd_lcores_cpuids[nb_lc++] = i; 574af75078fSIntel } 575af75078fSIntel nb_lcores = (lcoreid_t) nb_lc; 576af75078fSIntel nb_cfg_lcores = nb_lcores; 577af75078fSIntel nb_fwd_lcores = 1; 578af75078fSIntel } 579af75078fSIntel 580af75078fSIntel static void 581af75078fSIntel set_def_peer_eth_addrs(void) 582af75078fSIntel { 583af75078fSIntel portid_t i; 584af75078fSIntel 585af75078fSIntel for (i = 0; i < RTE_MAX_ETHPORTS; i++) { 58635b2d13fSOlivier Matz peer_eth_addrs[i].addr_bytes[0] = RTE_ETHER_LOCAL_ADMIN_ADDR; 587af75078fSIntel peer_eth_addrs[i].addr_bytes[5] = i; 588af75078fSIntel } 589af75078fSIntel } 590af75078fSIntel 591af75078fSIntel static void 592af75078fSIntel set_default_fwd_ports_config(void) 593af75078fSIntel { 594af75078fSIntel portid_t pt_id; 59565a7360cSMatan Azrad int i = 0; 596af75078fSIntel 597effdb8bbSPhil Yang RTE_ETH_FOREACH_DEV(pt_id) { 59865a7360cSMatan Azrad fwd_ports_ids[i++] = pt_id; 599af75078fSIntel 600effdb8bbSPhil Yang /* Update sockets info according to the attached device */ 601effdb8bbSPhil Yang int socket_id = rte_eth_dev_socket_id(pt_id); 602effdb8bbSPhil Yang if (socket_id >= 0 && new_socket_id(socket_id)) { 603effdb8bbSPhil Yang if (num_sockets >= RTE_MAX_NUMA_NODES) { 604effdb8bbSPhil Yang rte_exit(EXIT_FAILURE, 605effdb8bbSPhil Yang "Total sockets greater than %u\n", 606effdb8bbSPhil Yang RTE_MAX_NUMA_NODES); 607effdb8bbSPhil Yang } 608effdb8bbSPhil Yang socket_ids[num_sockets++] = socket_id; 609effdb8bbSPhil Yang } 610effdb8bbSPhil Yang } 611effdb8bbSPhil Yang 612af75078fSIntel nb_cfg_ports = nb_ports; 613af75078fSIntel nb_fwd_ports = nb_ports; 614af75078fSIntel } 615af75078fSIntel 616af75078fSIntel void 617af75078fSIntel set_def_fwd_config(void) 618af75078fSIntel { 619af75078fSIntel set_default_fwd_lcores_config(); 620af75078fSIntel set_def_peer_eth_addrs(); 621af75078fSIntel set_default_fwd_ports_config(); 622af75078fSIntel } 623af75078fSIntel 624c7f5dba7SAnatoly Burakov /* extremely pessimistic estimation of memory required to create a mempool */ 625c7f5dba7SAnatoly Burakov static int 626c7f5dba7SAnatoly Burakov calc_mem_size(uint32_t nb_mbufs, uint32_t mbuf_sz, size_t pgsz, size_t *out) 627c7f5dba7SAnatoly Burakov { 628c7f5dba7SAnatoly Burakov unsigned int n_pages, mbuf_per_pg, leftover; 629c7f5dba7SAnatoly Burakov uint64_t total_mem, mbuf_mem, obj_sz; 630c7f5dba7SAnatoly Burakov 631c7f5dba7SAnatoly Burakov /* there is no good way to predict how much space the mempool will 632c7f5dba7SAnatoly Burakov * occupy because it will allocate chunks on the fly, and some of those 633c7f5dba7SAnatoly Burakov * will come from default DPDK memory while some will come from our 634c7f5dba7SAnatoly Burakov * external memory, so just assume 128MB will be enough for everyone. 635c7f5dba7SAnatoly Burakov */ 636c7f5dba7SAnatoly Burakov uint64_t hdr_mem = 128 << 20; 637c7f5dba7SAnatoly Burakov 638c7f5dba7SAnatoly Burakov /* account for possible non-contiguousness */ 639c7f5dba7SAnatoly Burakov obj_sz = rte_mempool_calc_obj_size(mbuf_sz, 0, NULL); 640c7f5dba7SAnatoly Burakov if (obj_sz > pgsz) { 641c7f5dba7SAnatoly Burakov TESTPMD_LOG(ERR, "Object size is bigger than page size\n"); 642c7f5dba7SAnatoly Burakov return -1; 643c7f5dba7SAnatoly Burakov } 644c7f5dba7SAnatoly Burakov 645c7f5dba7SAnatoly Burakov mbuf_per_pg = pgsz / obj_sz; 646c7f5dba7SAnatoly Burakov leftover = (nb_mbufs % mbuf_per_pg) > 0; 647c7f5dba7SAnatoly Burakov n_pages = (nb_mbufs / mbuf_per_pg) + leftover; 648c7f5dba7SAnatoly Burakov 649c7f5dba7SAnatoly Burakov mbuf_mem = n_pages * pgsz; 650c7f5dba7SAnatoly Burakov 651c7f5dba7SAnatoly Burakov total_mem = RTE_ALIGN(hdr_mem + mbuf_mem, pgsz); 652c7f5dba7SAnatoly Burakov 653c7f5dba7SAnatoly Burakov if (total_mem > SIZE_MAX) { 654c7f5dba7SAnatoly Burakov TESTPMD_LOG(ERR, "Memory size too big\n"); 655c7f5dba7SAnatoly Burakov return -1; 656c7f5dba7SAnatoly Burakov } 657c7f5dba7SAnatoly Burakov *out = (size_t)total_mem; 658c7f5dba7SAnatoly Burakov 659c7f5dba7SAnatoly Burakov return 0; 660c7f5dba7SAnatoly Burakov } 661c7f5dba7SAnatoly Burakov 662c7f5dba7SAnatoly Burakov static int 663c7f5dba7SAnatoly Burakov pagesz_flags(uint64_t page_sz) 664c7f5dba7SAnatoly Burakov { 665c7f5dba7SAnatoly Burakov /* as per mmap() manpage, all page sizes are log2 of page size 666c7f5dba7SAnatoly Burakov * shifted by MAP_HUGE_SHIFT 667c7f5dba7SAnatoly Burakov */ 6689d650537SAnatoly Burakov int log2 = rte_log2_u64(page_sz); 669c7f5dba7SAnatoly Burakov 670c7f5dba7SAnatoly Burakov return (log2 << HUGE_SHIFT); 671c7f5dba7SAnatoly Burakov } 672c7f5dba7SAnatoly Burakov 673c7f5dba7SAnatoly Burakov static void * 674c7f5dba7SAnatoly Burakov alloc_mem(size_t memsz, size_t pgsz, bool huge) 675c7f5dba7SAnatoly Burakov { 676c7f5dba7SAnatoly Burakov void *addr; 677c7f5dba7SAnatoly Burakov int flags; 678c7f5dba7SAnatoly Burakov 679c7f5dba7SAnatoly Burakov /* allocate anonymous hugepages */ 680c7f5dba7SAnatoly Burakov flags = MAP_ANONYMOUS | MAP_PRIVATE; 681c7f5dba7SAnatoly Burakov if (huge) 682c7f5dba7SAnatoly Burakov flags |= HUGE_FLAG | pagesz_flags(pgsz); 683c7f5dba7SAnatoly Burakov 684c7f5dba7SAnatoly Burakov addr = mmap(NULL, memsz, PROT_READ | PROT_WRITE, flags, -1, 0); 685c7f5dba7SAnatoly Burakov if (addr == MAP_FAILED) 686c7f5dba7SAnatoly Burakov return NULL; 687c7f5dba7SAnatoly Burakov 688c7f5dba7SAnatoly Burakov return addr; 689c7f5dba7SAnatoly Burakov } 690c7f5dba7SAnatoly Burakov 691c7f5dba7SAnatoly Burakov struct extmem_param { 692c7f5dba7SAnatoly Burakov void *addr; 693c7f5dba7SAnatoly Burakov size_t len; 694c7f5dba7SAnatoly Burakov size_t pgsz; 695c7f5dba7SAnatoly Burakov rte_iova_t *iova_table; 696c7f5dba7SAnatoly Burakov unsigned int iova_table_len; 697c7f5dba7SAnatoly Burakov }; 698c7f5dba7SAnatoly Burakov 699c7f5dba7SAnatoly Burakov static int 700c7f5dba7SAnatoly Burakov create_extmem(uint32_t nb_mbufs, uint32_t mbuf_sz, struct extmem_param *param, 701c7f5dba7SAnatoly Burakov bool huge) 702c7f5dba7SAnatoly Burakov { 703c7f5dba7SAnatoly Burakov uint64_t pgsizes[] = {RTE_PGSIZE_2M, RTE_PGSIZE_1G, /* x86_64, ARM */ 704c7f5dba7SAnatoly Burakov RTE_PGSIZE_16M, RTE_PGSIZE_16G}; /* POWER */ 705c7f5dba7SAnatoly Burakov unsigned int cur_page, n_pages, pgsz_idx; 706c7f5dba7SAnatoly Burakov size_t mem_sz, cur_pgsz; 707c7f5dba7SAnatoly Burakov rte_iova_t *iovas = NULL; 708c7f5dba7SAnatoly Burakov void *addr; 709c7f5dba7SAnatoly Burakov int ret; 710c7f5dba7SAnatoly Burakov 711c7f5dba7SAnatoly Burakov for (pgsz_idx = 0; pgsz_idx < RTE_DIM(pgsizes); pgsz_idx++) { 712c7f5dba7SAnatoly Burakov /* skip anything that is too big */ 713c7f5dba7SAnatoly Burakov if (pgsizes[pgsz_idx] > SIZE_MAX) 714c7f5dba7SAnatoly Burakov continue; 715c7f5dba7SAnatoly Burakov 716c7f5dba7SAnatoly Burakov cur_pgsz = pgsizes[pgsz_idx]; 717c7f5dba7SAnatoly Burakov 718c7f5dba7SAnatoly Burakov /* if we were told not to allocate hugepages, override */ 719c7f5dba7SAnatoly Burakov if (!huge) 720c7f5dba7SAnatoly Burakov cur_pgsz = sysconf(_SC_PAGESIZE); 721c7f5dba7SAnatoly Burakov 722c7f5dba7SAnatoly Burakov ret = calc_mem_size(nb_mbufs, mbuf_sz, cur_pgsz, &mem_sz); 723c7f5dba7SAnatoly Burakov if (ret < 0) { 724c7f5dba7SAnatoly Burakov TESTPMD_LOG(ERR, "Cannot calculate memory size\n"); 725c7f5dba7SAnatoly Burakov return -1; 726c7f5dba7SAnatoly Burakov } 727c7f5dba7SAnatoly Burakov 728c7f5dba7SAnatoly Burakov /* allocate our memory */ 729c7f5dba7SAnatoly Burakov addr = alloc_mem(mem_sz, cur_pgsz, huge); 730c7f5dba7SAnatoly Burakov 731c7f5dba7SAnatoly Burakov /* if we couldn't allocate memory with a specified page size, 732c7f5dba7SAnatoly Burakov * that doesn't mean we can't do it with other page sizes, so 733c7f5dba7SAnatoly Burakov * try another one. 734c7f5dba7SAnatoly Burakov */ 735c7f5dba7SAnatoly Burakov if (addr == NULL) 736c7f5dba7SAnatoly Burakov continue; 737c7f5dba7SAnatoly Burakov 738c7f5dba7SAnatoly Burakov /* store IOVA addresses for every page in this memory area */ 739c7f5dba7SAnatoly Burakov n_pages = mem_sz / cur_pgsz; 740c7f5dba7SAnatoly Burakov 741c7f5dba7SAnatoly Burakov iovas = malloc(sizeof(*iovas) * n_pages); 742c7f5dba7SAnatoly Burakov 743c7f5dba7SAnatoly Burakov if (iovas == NULL) { 744c7f5dba7SAnatoly Burakov TESTPMD_LOG(ERR, "Cannot allocate memory for iova addresses\n"); 745c7f5dba7SAnatoly Burakov goto fail; 746c7f5dba7SAnatoly Burakov } 747c7f5dba7SAnatoly Burakov /* lock memory if it's not huge pages */ 748c7f5dba7SAnatoly Burakov if (!huge) 749c7f5dba7SAnatoly Burakov mlock(addr, mem_sz); 750c7f5dba7SAnatoly Burakov 751c7f5dba7SAnatoly Burakov /* populate IOVA addresses */ 752c7f5dba7SAnatoly Burakov for (cur_page = 0; cur_page < n_pages; cur_page++) { 753c7f5dba7SAnatoly Burakov rte_iova_t iova; 754c7f5dba7SAnatoly Burakov size_t offset; 755c7f5dba7SAnatoly Burakov void *cur; 756c7f5dba7SAnatoly Burakov 757c7f5dba7SAnatoly Burakov offset = cur_pgsz * cur_page; 758c7f5dba7SAnatoly Burakov cur = RTE_PTR_ADD(addr, offset); 759c7f5dba7SAnatoly Burakov 760c7f5dba7SAnatoly Burakov /* touch the page before getting its IOVA */ 761c7f5dba7SAnatoly Burakov *(volatile char *)cur = 0; 762c7f5dba7SAnatoly Burakov 763c7f5dba7SAnatoly Burakov iova = rte_mem_virt2iova(cur); 764c7f5dba7SAnatoly Burakov 765c7f5dba7SAnatoly Burakov iovas[cur_page] = iova; 766c7f5dba7SAnatoly Burakov } 767c7f5dba7SAnatoly Burakov 768c7f5dba7SAnatoly Burakov break; 769c7f5dba7SAnatoly Burakov } 770c7f5dba7SAnatoly Burakov /* if we couldn't allocate anything */ 771c7f5dba7SAnatoly Burakov if (iovas == NULL) 772c7f5dba7SAnatoly Burakov return -1; 773c7f5dba7SAnatoly Burakov 774c7f5dba7SAnatoly Burakov param->addr = addr; 775c7f5dba7SAnatoly Burakov param->len = mem_sz; 776c7f5dba7SAnatoly Burakov param->pgsz = cur_pgsz; 777c7f5dba7SAnatoly Burakov param->iova_table = iovas; 778c7f5dba7SAnatoly Burakov param->iova_table_len = n_pages; 779c7f5dba7SAnatoly Burakov 780c7f5dba7SAnatoly Burakov return 0; 781c7f5dba7SAnatoly Burakov fail: 782c7f5dba7SAnatoly Burakov if (iovas) 783c7f5dba7SAnatoly Burakov free(iovas); 784c7f5dba7SAnatoly Burakov if (addr) 785c7f5dba7SAnatoly Burakov munmap(addr, mem_sz); 786c7f5dba7SAnatoly Burakov 787c7f5dba7SAnatoly Burakov return -1; 788c7f5dba7SAnatoly Burakov } 789c7f5dba7SAnatoly Burakov 790c7f5dba7SAnatoly Burakov static int 791c7f5dba7SAnatoly Burakov setup_extmem(uint32_t nb_mbufs, uint32_t mbuf_sz, bool huge) 792c7f5dba7SAnatoly Burakov { 793c7f5dba7SAnatoly Burakov struct extmem_param param; 794c7f5dba7SAnatoly Burakov int socket_id, ret; 795c7f5dba7SAnatoly Burakov 796c7f5dba7SAnatoly Burakov memset(¶m, 0, sizeof(param)); 797c7f5dba7SAnatoly Burakov 798c7f5dba7SAnatoly Burakov /* check if our heap exists */ 799c7f5dba7SAnatoly Burakov socket_id = rte_malloc_heap_get_socket(EXTMEM_HEAP_NAME); 800c7f5dba7SAnatoly Burakov if (socket_id < 0) { 801c7f5dba7SAnatoly Burakov /* create our heap */ 802c7f5dba7SAnatoly Burakov ret = rte_malloc_heap_create(EXTMEM_HEAP_NAME); 803c7f5dba7SAnatoly Burakov if (ret < 0) { 804c7f5dba7SAnatoly Burakov TESTPMD_LOG(ERR, "Cannot create heap\n"); 805c7f5dba7SAnatoly Burakov return -1; 806c7f5dba7SAnatoly Burakov } 807c7f5dba7SAnatoly Burakov } 808c7f5dba7SAnatoly Burakov 809c7f5dba7SAnatoly Burakov ret = create_extmem(nb_mbufs, mbuf_sz, ¶m, huge); 810c7f5dba7SAnatoly Burakov if (ret < 0) { 811c7f5dba7SAnatoly Burakov TESTPMD_LOG(ERR, "Cannot create memory area\n"); 812c7f5dba7SAnatoly Burakov return -1; 813c7f5dba7SAnatoly Burakov } 814c7f5dba7SAnatoly Burakov 815c7f5dba7SAnatoly Burakov /* we now have a valid memory area, so add it to heap */ 816c7f5dba7SAnatoly Burakov ret = rte_malloc_heap_memory_add(EXTMEM_HEAP_NAME, 817c7f5dba7SAnatoly Burakov param.addr, param.len, param.iova_table, 818c7f5dba7SAnatoly Burakov param.iova_table_len, param.pgsz); 819c7f5dba7SAnatoly Burakov 820c7f5dba7SAnatoly Burakov /* when using VFIO, memory is automatically mapped for DMA by EAL */ 821c7f5dba7SAnatoly Burakov 822c7f5dba7SAnatoly Burakov /* not needed any more */ 823c7f5dba7SAnatoly Burakov free(param.iova_table); 824c7f5dba7SAnatoly Burakov 825c7f5dba7SAnatoly Burakov if (ret < 0) { 826c7f5dba7SAnatoly Burakov TESTPMD_LOG(ERR, "Cannot add memory to heap\n"); 827c7f5dba7SAnatoly Burakov munmap(param.addr, param.len); 828c7f5dba7SAnatoly Burakov return -1; 829c7f5dba7SAnatoly Burakov } 830c7f5dba7SAnatoly Burakov 831c7f5dba7SAnatoly Burakov /* success */ 832c7f5dba7SAnatoly Burakov 833c7f5dba7SAnatoly Burakov TESTPMD_LOG(DEBUG, "Allocated %zuMB of external memory\n", 834c7f5dba7SAnatoly Burakov param.len >> 20); 835c7f5dba7SAnatoly Burakov 836c7f5dba7SAnatoly Burakov return 0; 837c7f5dba7SAnatoly Burakov } 8383a0968c8SShahaf Shuler static void 8393a0968c8SShahaf Shuler dma_unmap_cb(struct rte_mempool *mp __rte_unused, void *opaque __rte_unused, 8403a0968c8SShahaf Shuler struct rte_mempool_memhdr *memhdr, unsigned mem_idx __rte_unused) 8413a0968c8SShahaf Shuler { 8423a0968c8SShahaf Shuler uint16_t pid = 0; 8433a0968c8SShahaf Shuler int ret; 8443a0968c8SShahaf Shuler 8453a0968c8SShahaf Shuler RTE_ETH_FOREACH_DEV(pid) { 8463a0968c8SShahaf Shuler struct rte_eth_dev *dev = 8473a0968c8SShahaf Shuler &rte_eth_devices[pid]; 8483a0968c8SShahaf Shuler 8493a0968c8SShahaf Shuler ret = rte_dev_dma_unmap(dev->device, memhdr->addr, 0, 8503a0968c8SShahaf Shuler memhdr->len); 8513a0968c8SShahaf Shuler if (ret) { 8523a0968c8SShahaf Shuler TESTPMD_LOG(DEBUG, 8533a0968c8SShahaf Shuler "unable to DMA unmap addr 0x%p " 8543a0968c8SShahaf Shuler "for device %s\n", 8553a0968c8SShahaf Shuler memhdr->addr, dev->data->name); 8563a0968c8SShahaf Shuler } 8573a0968c8SShahaf Shuler } 8583a0968c8SShahaf Shuler ret = rte_extmem_unregister(memhdr->addr, memhdr->len); 8593a0968c8SShahaf Shuler if (ret) { 8603a0968c8SShahaf Shuler TESTPMD_LOG(DEBUG, 8613a0968c8SShahaf Shuler "unable to un-register addr 0x%p\n", memhdr->addr); 8623a0968c8SShahaf Shuler } 8633a0968c8SShahaf Shuler } 8643a0968c8SShahaf Shuler 8653a0968c8SShahaf Shuler static void 8663a0968c8SShahaf Shuler dma_map_cb(struct rte_mempool *mp __rte_unused, void *opaque __rte_unused, 8673a0968c8SShahaf Shuler struct rte_mempool_memhdr *memhdr, unsigned mem_idx __rte_unused) 8683a0968c8SShahaf Shuler { 8693a0968c8SShahaf Shuler uint16_t pid = 0; 8703a0968c8SShahaf Shuler size_t page_size = sysconf(_SC_PAGESIZE); 8713a0968c8SShahaf Shuler int ret; 8723a0968c8SShahaf Shuler 8733a0968c8SShahaf Shuler ret = rte_extmem_register(memhdr->addr, memhdr->len, NULL, 0, 8743a0968c8SShahaf Shuler page_size); 8753a0968c8SShahaf Shuler if (ret) { 8763a0968c8SShahaf Shuler TESTPMD_LOG(DEBUG, 8773a0968c8SShahaf Shuler "unable to register addr 0x%p\n", memhdr->addr); 8783a0968c8SShahaf Shuler return; 8793a0968c8SShahaf Shuler } 8803a0968c8SShahaf Shuler RTE_ETH_FOREACH_DEV(pid) { 8813a0968c8SShahaf Shuler struct rte_eth_dev *dev = 8823a0968c8SShahaf Shuler &rte_eth_devices[pid]; 8833a0968c8SShahaf Shuler 8843a0968c8SShahaf Shuler ret = rte_dev_dma_map(dev->device, memhdr->addr, 0, 8853a0968c8SShahaf Shuler memhdr->len); 8863a0968c8SShahaf Shuler if (ret) { 8873a0968c8SShahaf Shuler TESTPMD_LOG(DEBUG, 8883a0968c8SShahaf Shuler "unable to DMA map addr 0x%p " 8893a0968c8SShahaf Shuler "for device %s\n", 8903a0968c8SShahaf Shuler memhdr->addr, dev->data->name); 8913a0968c8SShahaf Shuler } 8923a0968c8SShahaf Shuler } 8933a0968c8SShahaf Shuler } 894c7f5dba7SAnatoly Burakov 895af75078fSIntel /* 896af75078fSIntel * Configuration initialisation done once at init time. 897af75078fSIntel */ 898401b744dSShahaf Shuler static struct rte_mempool * 899af75078fSIntel mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf, 900af75078fSIntel unsigned int socket_id) 901af75078fSIntel { 902af75078fSIntel char pool_name[RTE_MEMPOOL_NAMESIZE]; 903bece7b6cSChristian Ehrhardt struct rte_mempool *rte_mp = NULL; 904af75078fSIntel uint32_t mb_size; 905af75078fSIntel 906dfb03bbeSOlivier Matz mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size; 907af75078fSIntel mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name)); 908148f963fSBruce Richardson 909285fd101SOlivier Matz TESTPMD_LOG(INFO, 910d1eb542eSOlivier Matz "create a new mbuf pool <%s>: n=%u, size=%u, socket=%u\n", 911d1eb542eSOlivier Matz pool_name, nb_mbuf, mbuf_seg_size, socket_id); 912d1eb542eSOlivier Matz 913c7f5dba7SAnatoly Burakov switch (mp_alloc_type) { 914c7f5dba7SAnatoly Burakov case MP_ALLOC_NATIVE: 915c7f5dba7SAnatoly Burakov { 916c7f5dba7SAnatoly Burakov /* wrapper to rte_mempool_create() */ 917c7f5dba7SAnatoly Burakov TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n", 918c7f5dba7SAnatoly Burakov rte_mbuf_best_mempool_ops()); 919c7f5dba7SAnatoly Burakov rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf, 920c7f5dba7SAnatoly Burakov mb_mempool_cache, 0, mbuf_seg_size, socket_id); 921c7f5dba7SAnatoly Burakov break; 922c7f5dba7SAnatoly Burakov } 923c7f5dba7SAnatoly Burakov case MP_ALLOC_ANON: 924c7f5dba7SAnatoly Burakov { 925b19a0c75SOlivier Matz rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf, 926c7f5dba7SAnatoly Burakov mb_size, (unsigned int) mb_mempool_cache, 927148f963fSBruce Richardson sizeof(struct rte_pktmbuf_pool_private), 92859fcf854SShahaf Shuler socket_id, mempool_flags); 92924427bb9SOlivier Matz if (rte_mp == NULL) 93024427bb9SOlivier Matz goto err; 931b19a0c75SOlivier Matz 932b19a0c75SOlivier Matz if (rte_mempool_populate_anon(rte_mp) == 0) { 933b19a0c75SOlivier Matz rte_mempool_free(rte_mp); 934b19a0c75SOlivier Matz rte_mp = NULL; 93524427bb9SOlivier Matz goto err; 936b19a0c75SOlivier Matz } 937b19a0c75SOlivier Matz rte_pktmbuf_pool_init(rte_mp, NULL); 938b19a0c75SOlivier Matz rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL); 9393a0968c8SShahaf Shuler rte_mempool_mem_iter(rte_mp, dma_map_cb, NULL); 940c7f5dba7SAnatoly Burakov break; 941c7f5dba7SAnatoly Burakov } 942c7f5dba7SAnatoly Burakov case MP_ALLOC_XMEM: 943c7f5dba7SAnatoly Burakov case MP_ALLOC_XMEM_HUGE: 944c7f5dba7SAnatoly Burakov { 945c7f5dba7SAnatoly Burakov int heap_socket; 946c7f5dba7SAnatoly Burakov bool huge = mp_alloc_type == MP_ALLOC_XMEM_HUGE; 947c7f5dba7SAnatoly Burakov 948c7f5dba7SAnatoly Burakov if (setup_extmem(nb_mbuf, mbuf_seg_size, huge) < 0) 949c7f5dba7SAnatoly Burakov rte_exit(EXIT_FAILURE, "Could not create external memory\n"); 950c7f5dba7SAnatoly Burakov 951c7f5dba7SAnatoly Burakov heap_socket = 952c7f5dba7SAnatoly Burakov rte_malloc_heap_get_socket(EXTMEM_HEAP_NAME); 953c7f5dba7SAnatoly Burakov if (heap_socket < 0) 954c7f5dba7SAnatoly Burakov rte_exit(EXIT_FAILURE, "Could not get external memory socket ID\n"); 955c7f5dba7SAnatoly Burakov 9560e798567SPavan Nikhilesh TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n", 9570e798567SPavan Nikhilesh rte_mbuf_best_mempool_ops()); 958ea0c20eaSOlivier Matz rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf, 959c7f5dba7SAnatoly Burakov mb_mempool_cache, 0, mbuf_seg_size, 960c7f5dba7SAnatoly Burakov heap_socket); 961c7f5dba7SAnatoly Burakov break; 962c7f5dba7SAnatoly Burakov } 963c7f5dba7SAnatoly Burakov default: 964c7f5dba7SAnatoly Burakov { 965c7f5dba7SAnatoly Burakov rte_exit(EXIT_FAILURE, "Invalid mempool creation mode\n"); 966c7f5dba7SAnatoly Burakov } 967bece7b6cSChristian Ehrhardt } 968148f963fSBruce Richardson 96924427bb9SOlivier Matz err: 970af75078fSIntel if (rte_mp == NULL) { 971d1eb542eSOlivier Matz rte_exit(EXIT_FAILURE, 972d1eb542eSOlivier Matz "Creation of mbuf pool for socket %u failed: %s\n", 973d1eb542eSOlivier Matz socket_id, rte_strerror(rte_errno)); 974148f963fSBruce Richardson } else if (verbose_level > 0) { 975591a9d79SStephen Hemminger rte_mempool_dump(stdout, rte_mp); 976af75078fSIntel } 977401b744dSShahaf Shuler return rte_mp; 978af75078fSIntel } 979af75078fSIntel 98020a0286fSLiu Xiaofeng /* 98120a0286fSLiu Xiaofeng * Check given socket id is valid or not with NUMA mode, 98220a0286fSLiu Xiaofeng * if valid, return 0, else return -1 98320a0286fSLiu Xiaofeng */ 98420a0286fSLiu Xiaofeng static int 98520a0286fSLiu Xiaofeng check_socket_id(const unsigned int socket_id) 98620a0286fSLiu Xiaofeng { 98720a0286fSLiu Xiaofeng static int warning_once = 0; 98820a0286fSLiu Xiaofeng 989c9cafcc8SShahaf Shuler if (new_socket_id(socket_id)) { 99020a0286fSLiu Xiaofeng if (!warning_once && numa_support) 99120a0286fSLiu Xiaofeng printf("Warning: NUMA should be configured manually by" 99220a0286fSLiu Xiaofeng " using --port-numa-config and" 99320a0286fSLiu Xiaofeng " --ring-numa-config parameters along with" 99420a0286fSLiu Xiaofeng " --numa.\n"); 99520a0286fSLiu Xiaofeng warning_once = 1; 99620a0286fSLiu Xiaofeng return -1; 99720a0286fSLiu Xiaofeng } 99820a0286fSLiu Xiaofeng return 0; 99920a0286fSLiu Xiaofeng } 100020a0286fSLiu Xiaofeng 10013f7311baSWei Dai /* 10023f7311baSWei Dai * Get the allowed maximum number of RX queues. 10033f7311baSWei Dai * *pid return the port id which has minimal value of 10043f7311baSWei Dai * max_rx_queues in all ports. 10053f7311baSWei Dai */ 10063f7311baSWei Dai queueid_t 10073f7311baSWei Dai get_allowed_max_nb_rxq(portid_t *pid) 10083f7311baSWei Dai { 10093f7311baSWei Dai queueid_t allowed_max_rxq = MAX_QUEUE_ID; 10103f7311baSWei Dai portid_t pi; 10113f7311baSWei Dai struct rte_eth_dev_info dev_info; 10123f7311baSWei Dai 10133f7311baSWei Dai RTE_ETH_FOREACH_DEV(pi) { 10143f7311baSWei Dai rte_eth_dev_info_get(pi, &dev_info); 10153f7311baSWei Dai if (dev_info.max_rx_queues < allowed_max_rxq) { 10163f7311baSWei Dai allowed_max_rxq = dev_info.max_rx_queues; 10173f7311baSWei Dai *pid = pi; 10183f7311baSWei Dai } 10193f7311baSWei Dai } 10203f7311baSWei Dai return allowed_max_rxq; 10213f7311baSWei Dai } 10223f7311baSWei Dai 10233f7311baSWei Dai /* 10243f7311baSWei Dai * Check input rxq is valid or not. 10253f7311baSWei Dai * If input rxq is not greater than any of maximum number 10263f7311baSWei Dai * of RX queues of all ports, it is valid. 10273f7311baSWei Dai * if valid, return 0, else return -1 10283f7311baSWei Dai */ 10293f7311baSWei Dai int 10303f7311baSWei Dai check_nb_rxq(queueid_t rxq) 10313f7311baSWei Dai { 10323f7311baSWei Dai queueid_t allowed_max_rxq; 10333f7311baSWei Dai portid_t pid = 0; 10343f7311baSWei Dai 10353f7311baSWei Dai allowed_max_rxq = get_allowed_max_nb_rxq(&pid); 10363f7311baSWei Dai if (rxq > allowed_max_rxq) { 10373f7311baSWei Dai printf("Fail: input rxq (%u) can't be greater " 10383f7311baSWei Dai "than max_rx_queues (%u) of port %u\n", 10393f7311baSWei Dai rxq, 10403f7311baSWei Dai allowed_max_rxq, 10413f7311baSWei Dai pid); 10423f7311baSWei Dai return -1; 10433f7311baSWei Dai } 10443f7311baSWei Dai return 0; 10453f7311baSWei Dai } 10463f7311baSWei Dai 104736db4f6cSWei Dai /* 104836db4f6cSWei Dai * Get the allowed maximum number of TX queues. 104936db4f6cSWei Dai * *pid return the port id which has minimal value of 105036db4f6cSWei Dai * max_tx_queues in all ports. 105136db4f6cSWei Dai */ 105236db4f6cSWei Dai queueid_t 105336db4f6cSWei Dai get_allowed_max_nb_txq(portid_t *pid) 105436db4f6cSWei Dai { 105536db4f6cSWei Dai queueid_t allowed_max_txq = MAX_QUEUE_ID; 105636db4f6cSWei Dai portid_t pi; 105736db4f6cSWei Dai struct rte_eth_dev_info dev_info; 105836db4f6cSWei Dai 105936db4f6cSWei Dai RTE_ETH_FOREACH_DEV(pi) { 106036db4f6cSWei Dai rte_eth_dev_info_get(pi, &dev_info); 106136db4f6cSWei Dai if (dev_info.max_tx_queues < allowed_max_txq) { 106236db4f6cSWei Dai allowed_max_txq = dev_info.max_tx_queues; 106336db4f6cSWei Dai *pid = pi; 106436db4f6cSWei Dai } 106536db4f6cSWei Dai } 106636db4f6cSWei Dai return allowed_max_txq; 106736db4f6cSWei Dai } 106836db4f6cSWei Dai 106936db4f6cSWei Dai /* 107036db4f6cSWei Dai * Check input txq is valid or not. 107136db4f6cSWei Dai * If input txq is not greater than any of maximum number 107236db4f6cSWei Dai * of TX queues of all ports, it is valid. 107336db4f6cSWei Dai * if valid, return 0, else return -1 107436db4f6cSWei Dai */ 107536db4f6cSWei Dai int 107636db4f6cSWei Dai check_nb_txq(queueid_t txq) 107736db4f6cSWei Dai { 107836db4f6cSWei Dai queueid_t allowed_max_txq; 107936db4f6cSWei Dai portid_t pid = 0; 108036db4f6cSWei Dai 108136db4f6cSWei Dai allowed_max_txq = get_allowed_max_nb_txq(&pid); 108236db4f6cSWei Dai if (txq > allowed_max_txq) { 108336db4f6cSWei Dai printf("Fail: input txq (%u) can't be greater " 108436db4f6cSWei Dai "than max_tx_queues (%u) of port %u\n", 108536db4f6cSWei Dai txq, 108636db4f6cSWei Dai allowed_max_txq, 108736db4f6cSWei Dai pid); 108836db4f6cSWei Dai return -1; 108936db4f6cSWei Dai } 109036db4f6cSWei Dai return 0; 109136db4f6cSWei Dai } 109236db4f6cSWei Dai 1093af75078fSIntel static void 1094af75078fSIntel init_config(void) 1095af75078fSIntel { 1096ce8d5614SIntel portid_t pid; 1097af75078fSIntel struct rte_port *port; 1098af75078fSIntel struct rte_mempool *mbp; 1099af75078fSIntel unsigned int nb_mbuf_per_pool; 1100af75078fSIntel lcoreid_t lc_id; 11017acf894dSStephen Hurd uint8_t port_per_socket[RTE_MAX_NUMA_NODES]; 1102b7091f1dSJiayu Hu struct rte_gro_param gro_param; 110352f38a20SJiayu Hu uint32_t gso_types; 1104*33f9630fSSunil Kumar Kori uint16_t data_size; 1105*33f9630fSSunil Kumar Kori bool warning = 0; 1106c73a9071SWei Dai int k; 1107af75078fSIntel 11087acf894dSStephen Hurd memset(port_per_socket,0,RTE_MAX_NUMA_NODES); 1109487f9a59SYulong Pei 1110af75078fSIntel /* Configuration of logical cores. */ 1111af75078fSIntel fwd_lcores = rte_zmalloc("testpmd: fwd_lcores", 1112af75078fSIntel sizeof(struct fwd_lcore *) * nb_lcores, 1113fdf20fa7SSergio Gonzalez Monroy RTE_CACHE_LINE_SIZE); 1114af75078fSIntel if (fwd_lcores == NULL) { 1115ce8d5614SIntel rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) " 1116ce8d5614SIntel "failed\n", nb_lcores); 1117af75078fSIntel } 1118af75078fSIntel for (lc_id = 0; lc_id < nb_lcores; lc_id++) { 1119af75078fSIntel fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore", 1120af75078fSIntel sizeof(struct fwd_lcore), 1121fdf20fa7SSergio Gonzalez Monroy RTE_CACHE_LINE_SIZE); 1122af75078fSIntel if (fwd_lcores[lc_id] == NULL) { 1123ce8d5614SIntel rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) " 1124ce8d5614SIntel "failed\n"); 1125af75078fSIntel } 1126af75078fSIntel fwd_lcores[lc_id]->cpuid_idx = lc_id; 1127af75078fSIntel } 1128af75078fSIntel 11297d89b261SGaetan Rivet RTE_ETH_FOREACH_DEV(pid) { 1130ce8d5614SIntel port = &ports[pid]; 11318b9bd0efSMoti Haimovsky /* Apply default TxRx configuration for all ports */ 1132fd8c20aaSShahaf Shuler port->dev_conf.txmode = tx_mode; 1133384161e0SShahaf Shuler port->dev_conf.rxmode = rx_mode; 1134ce8d5614SIntel rte_eth_dev_info_get(pid, &port->dev_info); 11357c45f6c0SFerruh Yigit 113607e5f7bdSShahaf Shuler if (!(port->dev_info.tx_offload_capa & 113707e5f7bdSShahaf Shuler DEV_TX_OFFLOAD_MBUF_FAST_FREE)) 113807e5f7bdSShahaf Shuler port->dev_conf.txmode.offloads &= 113907e5f7bdSShahaf Shuler ~DEV_TX_OFFLOAD_MBUF_FAST_FREE; 1140c18feafaSDekel Peled if (!(port->dev_info.tx_offload_capa & 1141c18feafaSDekel Peled DEV_TX_OFFLOAD_MATCH_METADATA)) 1142c18feafaSDekel Peled port->dev_conf.txmode.offloads &= 1143c18feafaSDekel Peled ~DEV_TX_OFFLOAD_MATCH_METADATA; 1144b6ea6408SIntel if (numa_support) { 1145b6ea6408SIntel if (port_numa[pid] != NUMA_NO_CONFIG) 1146b6ea6408SIntel port_per_socket[port_numa[pid]]++; 1147b6ea6408SIntel else { 1148b6ea6408SIntel uint32_t socket_id = rte_eth_dev_socket_id(pid); 114920a0286fSLiu Xiaofeng 115029841336SPhil Yang /* 115129841336SPhil Yang * if socket_id is invalid, 115229841336SPhil Yang * set to the first available socket. 115329841336SPhil Yang */ 115420a0286fSLiu Xiaofeng if (check_socket_id(socket_id) < 0) 115529841336SPhil Yang socket_id = socket_ids[0]; 1156b6ea6408SIntel port_per_socket[socket_id]++; 1157b6ea6408SIntel } 1158b6ea6408SIntel } 1159b6ea6408SIntel 1160c73a9071SWei Dai /* Apply Rx offloads configuration */ 1161c73a9071SWei Dai for (k = 0; k < port->dev_info.max_rx_queues; k++) 1162c73a9071SWei Dai port->rx_conf[k].offloads = 1163c73a9071SWei Dai port->dev_conf.rxmode.offloads; 1164c73a9071SWei Dai /* Apply Tx offloads configuration */ 1165c73a9071SWei Dai for (k = 0; k < port->dev_info.max_tx_queues; k++) 1166c73a9071SWei Dai port->tx_conf[k].offloads = 1167c73a9071SWei Dai port->dev_conf.txmode.offloads; 1168c73a9071SWei Dai 1169ce8d5614SIntel /* set flag to initialize port/queue */ 1170ce8d5614SIntel port->need_reconfig = 1; 1171ce8d5614SIntel port->need_reconfig_queues = 1; 1172c18feafaSDekel Peled port->tx_metadata = 0; 1173*33f9630fSSunil Kumar Kori 1174*33f9630fSSunil Kumar Kori /* Check for maximum number of segments per MTU. Accordingly 1175*33f9630fSSunil Kumar Kori * update the mbuf data size. 1176*33f9630fSSunil Kumar Kori */ 1177*33f9630fSSunil Kumar Kori if (port->dev_info.rx_desc_lim.nb_mtu_seg_max != UINT16_MAX) { 1178*33f9630fSSunil Kumar Kori data_size = rx_mode.max_rx_pkt_len / 1179*33f9630fSSunil Kumar Kori port->dev_info.rx_desc_lim.nb_mtu_seg_max; 1180*33f9630fSSunil Kumar Kori 1181*33f9630fSSunil Kumar Kori if ((data_size + RTE_PKTMBUF_HEADROOM) > 1182*33f9630fSSunil Kumar Kori mbuf_data_size) { 1183*33f9630fSSunil Kumar Kori mbuf_data_size = data_size + 1184*33f9630fSSunil Kumar Kori RTE_PKTMBUF_HEADROOM; 1185*33f9630fSSunil Kumar Kori warning = 1; 1186ce8d5614SIntel } 1187*33f9630fSSunil Kumar Kori } 1188*33f9630fSSunil Kumar Kori } 1189*33f9630fSSunil Kumar Kori 1190*33f9630fSSunil Kumar Kori if (warning) 1191*33f9630fSSunil Kumar Kori TESTPMD_LOG(WARNING, "Configured mbuf size %hu\n", 1192*33f9630fSSunil Kumar Kori mbuf_data_size); 1193ce8d5614SIntel 11943ab64341SOlivier Matz /* 11953ab64341SOlivier Matz * Create pools of mbuf. 11963ab64341SOlivier Matz * If NUMA support is disabled, create a single pool of mbuf in 11973ab64341SOlivier Matz * socket 0 memory by default. 11983ab64341SOlivier Matz * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1. 11993ab64341SOlivier Matz * 12003ab64341SOlivier Matz * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and 12013ab64341SOlivier Matz * nb_txd can be configured at run time. 12023ab64341SOlivier Matz */ 12033ab64341SOlivier Matz if (param_total_num_mbufs) 12043ab64341SOlivier Matz nb_mbuf_per_pool = param_total_num_mbufs; 12053ab64341SOlivier Matz else { 12063ab64341SOlivier Matz nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX + 12073ab64341SOlivier Matz (nb_lcores * mb_mempool_cache) + 12083ab64341SOlivier Matz RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST; 12093ab64341SOlivier Matz nb_mbuf_per_pool *= RTE_MAX_ETHPORTS; 12103ab64341SOlivier Matz } 12113ab64341SOlivier Matz 1212b6ea6408SIntel if (numa_support) { 1213b6ea6408SIntel uint8_t i; 1214ce8d5614SIntel 1215c9cafcc8SShahaf Shuler for (i = 0; i < num_sockets; i++) 1216401b744dSShahaf Shuler mempools[i] = mbuf_pool_create(mbuf_data_size, 1217401b744dSShahaf Shuler nb_mbuf_per_pool, 1218c9cafcc8SShahaf Shuler socket_ids[i]); 12193ab64341SOlivier Matz } else { 12203ab64341SOlivier Matz if (socket_num == UMA_NO_CONFIG) 1221401b744dSShahaf Shuler mempools[0] = mbuf_pool_create(mbuf_data_size, 1222401b744dSShahaf Shuler nb_mbuf_per_pool, 0); 12233ab64341SOlivier Matz else 1224401b744dSShahaf Shuler mempools[socket_num] = mbuf_pool_create 1225401b744dSShahaf Shuler (mbuf_data_size, 1226401b744dSShahaf Shuler nb_mbuf_per_pool, 12273ab64341SOlivier Matz socket_num); 12283ab64341SOlivier Matz } 1229b6ea6408SIntel 1230b6ea6408SIntel init_port_config(); 12315886ae07SAdrien Mazarguil 123252f38a20SJiayu Hu gso_types = DEV_TX_OFFLOAD_TCP_TSO | DEV_TX_OFFLOAD_VXLAN_TNL_TSO | 1233aaacd052SJiayu Hu DEV_TX_OFFLOAD_GRE_TNL_TSO | DEV_TX_OFFLOAD_UDP_TSO; 12345886ae07SAdrien Mazarguil /* 12355886ae07SAdrien Mazarguil * Records which Mbuf pool to use by each logical core, if needed. 12365886ae07SAdrien Mazarguil */ 12375886ae07SAdrien Mazarguil for (lc_id = 0; lc_id < nb_lcores; lc_id++) { 12388fd8bebcSAdrien Mazarguil mbp = mbuf_pool_find( 12398fd8bebcSAdrien Mazarguil rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id])); 12408fd8bebcSAdrien Mazarguil 12415886ae07SAdrien Mazarguil if (mbp == NULL) 12425886ae07SAdrien Mazarguil mbp = mbuf_pool_find(0); 12435886ae07SAdrien Mazarguil fwd_lcores[lc_id]->mbp = mbp; 124452f38a20SJiayu Hu /* initialize GSO context */ 124552f38a20SJiayu Hu fwd_lcores[lc_id]->gso_ctx.direct_pool = mbp; 124652f38a20SJiayu Hu fwd_lcores[lc_id]->gso_ctx.indirect_pool = mbp; 124752f38a20SJiayu Hu fwd_lcores[lc_id]->gso_ctx.gso_types = gso_types; 124835b2d13fSOlivier Matz fwd_lcores[lc_id]->gso_ctx.gso_size = RTE_ETHER_MAX_LEN - 124935b2d13fSOlivier Matz RTE_ETHER_CRC_LEN; 125052f38a20SJiayu Hu fwd_lcores[lc_id]->gso_ctx.flag = 0; 12515886ae07SAdrien Mazarguil } 12525886ae07SAdrien Mazarguil 1253ce8d5614SIntel /* Configuration of packet forwarding streams. */ 1254ce8d5614SIntel if (init_fwd_streams() < 0) 1255ce8d5614SIntel rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n"); 12560c0db76fSBernard Iremonger 12570c0db76fSBernard Iremonger fwd_config_setup(); 1258b7091f1dSJiayu Hu 1259b7091f1dSJiayu Hu /* create a gro context for each lcore */ 1260b7091f1dSJiayu Hu gro_param.gro_types = RTE_GRO_TCP_IPV4; 1261b7091f1dSJiayu Hu gro_param.max_flow_num = GRO_MAX_FLUSH_CYCLES; 1262b7091f1dSJiayu Hu gro_param.max_item_per_flow = MAX_PKT_BURST; 1263b7091f1dSJiayu Hu for (lc_id = 0; lc_id < nb_lcores; lc_id++) { 1264b7091f1dSJiayu Hu gro_param.socket_id = rte_lcore_to_socket_id( 1265b7091f1dSJiayu Hu fwd_lcores_cpuids[lc_id]); 1266b7091f1dSJiayu Hu fwd_lcores[lc_id]->gro_ctx = rte_gro_ctx_create(&gro_param); 1267b7091f1dSJiayu Hu if (fwd_lcores[lc_id]->gro_ctx == NULL) { 1268b7091f1dSJiayu Hu rte_exit(EXIT_FAILURE, 1269b7091f1dSJiayu Hu "rte_gro_ctx_create() failed\n"); 1270b7091f1dSJiayu Hu } 1271b7091f1dSJiayu Hu } 12720ad778b3SJasvinder Singh 12730ad778b3SJasvinder Singh #if defined RTE_LIBRTE_PMD_SOFTNIC 12740ad778b3SJasvinder Singh if (strcmp(cur_fwd_eng->fwd_mode_name, "softnic") == 0) { 12750ad778b3SJasvinder Singh RTE_ETH_FOREACH_DEV(pid) { 12760ad778b3SJasvinder Singh port = &ports[pid]; 12770ad778b3SJasvinder Singh const char *driver = port->dev_info.driver_name; 12780ad778b3SJasvinder Singh 12790ad778b3SJasvinder Singh if (strcmp(driver, "net_softnic") == 0) 12800ad778b3SJasvinder Singh port->softport.fwd_lcore_arg = fwd_lcores; 12810ad778b3SJasvinder Singh } 12820ad778b3SJasvinder Singh } 12830ad778b3SJasvinder Singh #endif 12840ad778b3SJasvinder Singh 1285ce8d5614SIntel } 1286ce8d5614SIntel 12872950a769SDeclan Doherty 12882950a769SDeclan Doherty void 1289a21d5a4bSDeclan Doherty reconfig(portid_t new_port_id, unsigned socket_id) 12902950a769SDeclan Doherty { 12912950a769SDeclan Doherty struct rte_port *port; 12922950a769SDeclan Doherty 12932950a769SDeclan Doherty /* Reconfiguration of Ethernet ports. */ 12942950a769SDeclan Doherty port = &ports[new_port_id]; 12952950a769SDeclan Doherty rte_eth_dev_info_get(new_port_id, &port->dev_info); 12962950a769SDeclan Doherty 12972950a769SDeclan Doherty /* set flag to initialize port/queue */ 12982950a769SDeclan Doherty port->need_reconfig = 1; 12992950a769SDeclan Doherty port->need_reconfig_queues = 1; 1300a21d5a4bSDeclan Doherty port->socket_id = socket_id; 13012950a769SDeclan Doherty 13022950a769SDeclan Doherty init_port_config(); 13032950a769SDeclan Doherty } 13042950a769SDeclan Doherty 13052950a769SDeclan Doherty 1306ce8d5614SIntel int 1307ce8d5614SIntel init_fwd_streams(void) 1308ce8d5614SIntel { 1309ce8d5614SIntel portid_t pid; 1310ce8d5614SIntel struct rte_port *port; 1311ce8d5614SIntel streamid_t sm_id, nb_fwd_streams_new; 13125a8fb55cSReshma Pattan queueid_t q; 1313ce8d5614SIntel 1314ce8d5614SIntel /* set socket id according to numa or not */ 13157d89b261SGaetan Rivet RTE_ETH_FOREACH_DEV(pid) { 1316ce8d5614SIntel port = &ports[pid]; 1317ce8d5614SIntel if (nb_rxq > port->dev_info.max_rx_queues) { 1318ce8d5614SIntel printf("Fail: nb_rxq(%d) is greater than " 1319ce8d5614SIntel "max_rx_queues(%d)\n", nb_rxq, 1320ce8d5614SIntel port->dev_info.max_rx_queues); 1321ce8d5614SIntel return -1; 1322ce8d5614SIntel } 1323ce8d5614SIntel if (nb_txq > port->dev_info.max_tx_queues) { 1324ce8d5614SIntel printf("Fail: nb_txq(%d) is greater than " 1325ce8d5614SIntel "max_tx_queues(%d)\n", nb_txq, 1326ce8d5614SIntel port->dev_info.max_tx_queues); 1327ce8d5614SIntel return -1; 1328ce8d5614SIntel } 132920a0286fSLiu Xiaofeng if (numa_support) { 133020a0286fSLiu Xiaofeng if (port_numa[pid] != NUMA_NO_CONFIG) 133120a0286fSLiu Xiaofeng port->socket_id = port_numa[pid]; 133220a0286fSLiu Xiaofeng else { 1333b6ea6408SIntel port->socket_id = rte_eth_dev_socket_id(pid); 133420a0286fSLiu Xiaofeng 133529841336SPhil Yang /* 133629841336SPhil Yang * if socket_id is invalid, 133729841336SPhil Yang * set to the first available socket. 133829841336SPhil Yang */ 133920a0286fSLiu Xiaofeng if (check_socket_id(port->socket_id) < 0) 134029841336SPhil Yang port->socket_id = socket_ids[0]; 134120a0286fSLiu Xiaofeng } 134220a0286fSLiu Xiaofeng } 1343b6ea6408SIntel else { 1344b6ea6408SIntel if (socket_num == UMA_NO_CONFIG) 1345af75078fSIntel port->socket_id = 0; 1346b6ea6408SIntel else 1347b6ea6408SIntel port->socket_id = socket_num; 1348b6ea6408SIntel } 1349af75078fSIntel } 1350af75078fSIntel 13515a8fb55cSReshma Pattan q = RTE_MAX(nb_rxq, nb_txq); 13525a8fb55cSReshma Pattan if (q == 0) { 13535a8fb55cSReshma Pattan printf("Fail: Cannot allocate fwd streams as number of queues is 0\n"); 13545a8fb55cSReshma Pattan return -1; 13555a8fb55cSReshma Pattan } 13565a8fb55cSReshma Pattan nb_fwd_streams_new = (streamid_t)(nb_ports * q); 1357ce8d5614SIntel if (nb_fwd_streams_new == nb_fwd_streams) 1358ce8d5614SIntel return 0; 1359ce8d5614SIntel /* clear the old */ 1360ce8d5614SIntel if (fwd_streams != NULL) { 1361ce8d5614SIntel for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) { 1362ce8d5614SIntel if (fwd_streams[sm_id] == NULL) 1363ce8d5614SIntel continue; 1364ce8d5614SIntel rte_free(fwd_streams[sm_id]); 1365ce8d5614SIntel fwd_streams[sm_id] = NULL; 1366af75078fSIntel } 1367ce8d5614SIntel rte_free(fwd_streams); 1368ce8d5614SIntel fwd_streams = NULL; 1369ce8d5614SIntel } 1370ce8d5614SIntel 1371ce8d5614SIntel /* init new */ 1372ce8d5614SIntel nb_fwd_streams = nb_fwd_streams_new; 13731f84c469SMatan Azrad if (nb_fwd_streams) { 1374ce8d5614SIntel fwd_streams = rte_zmalloc("testpmd: fwd_streams", 13751f84c469SMatan Azrad sizeof(struct fwd_stream *) * nb_fwd_streams, 13761f84c469SMatan Azrad RTE_CACHE_LINE_SIZE); 1377ce8d5614SIntel if (fwd_streams == NULL) 13781f84c469SMatan Azrad rte_exit(EXIT_FAILURE, "rte_zmalloc(%d" 13791f84c469SMatan Azrad " (struct fwd_stream *)) failed\n", 13801f84c469SMatan Azrad nb_fwd_streams); 1381ce8d5614SIntel 1382af75078fSIntel for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) { 13831f84c469SMatan Azrad fwd_streams[sm_id] = rte_zmalloc("testpmd:" 13841f84c469SMatan Azrad " struct fwd_stream", sizeof(struct fwd_stream), 13851f84c469SMatan Azrad RTE_CACHE_LINE_SIZE); 1386ce8d5614SIntel if (fwd_streams[sm_id] == NULL) 13871f84c469SMatan Azrad rte_exit(EXIT_FAILURE, "rte_zmalloc" 13881f84c469SMatan Azrad "(struct fwd_stream) failed\n"); 13891f84c469SMatan Azrad } 1390af75078fSIntel } 1391ce8d5614SIntel 1392ce8d5614SIntel return 0; 1393af75078fSIntel } 1394af75078fSIntel 1395af75078fSIntel #ifdef RTE_TEST_PMD_RECORD_BURST_STATS 1396af75078fSIntel static void 1397af75078fSIntel pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs) 1398af75078fSIntel { 1399af75078fSIntel unsigned int total_burst; 1400af75078fSIntel unsigned int nb_burst; 1401af75078fSIntel unsigned int burst_stats[3]; 1402af75078fSIntel uint16_t pktnb_stats[3]; 1403af75078fSIntel uint16_t nb_pkt; 1404af75078fSIntel int burst_percent[3]; 1405af75078fSIntel 1406af75078fSIntel /* 1407af75078fSIntel * First compute the total number of packet bursts and the 1408af75078fSIntel * two highest numbers of bursts of the same number of packets. 1409af75078fSIntel */ 1410af75078fSIntel total_burst = 0; 1411af75078fSIntel burst_stats[0] = burst_stats[1] = burst_stats[2] = 0; 1412af75078fSIntel pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0; 1413af75078fSIntel for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) { 1414af75078fSIntel nb_burst = pbs->pkt_burst_spread[nb_pkt]; 1415af75078fSIntel if (nb_burst == 0) 1416af75078fSIntel continue; 1417af75078fSIntel total_burst += nb_burst; 1418af75078fSIntel if (nb_burst > burst_stats[0]) { 1419af75078fSIntel burst_stats[1] = burst_stats[0]; 1420af75078fSIntel pktnb_stats[1] = pktnb_stats[0]; 1421af75078fSIntel burst_stats[0] = nb_burst; 1422af75078fSIntel pktnb_stats[0] = nb_pkt; 1423fe613657SDaniel Shelepov } else if (nb_burst > burst_stats[1]) { 1424fe613657SDaniel Shelepov burst_stats[1] = nb_burst; 1425fe613657SDaniel Shelepov pktnb_stats[1] = nb_pkt; 1426af75078fSIntel } 1427af75078fSIntel } 1428af75078fSIntel if (total_burst == 0) 1429af75078fSIntel return; 1430af75078fSIntel burst_percent[0] = (burst_stats[0] * 100) / total_burst; 1431af75078fSIntel printf(" %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst, 1432af75078fSIntel burst_percent[0], (int) pktnb_stats[0]); 1433af75078fSIntel if (burst_stats[0] == total_burst) { 1434af75078fSIntel printf("]\n"); 1435af75078fSIntel return; 1436af75078fSIntel } 1437af75078fSIntel if (burst_stats[0] + burst_stats[1] == total_burst) { 1438af75078fSIntel printf(" + %d%% of %d pkts]\n", 1439af75078fSIntel 100 - burst_percent[0], pktnb_stats[1]); 1440af75078fSIntel return; 1441af75078fSIntel } 1442af75078fSIntel burst_percent[1] = (burst_stats[1] * 100) / total_burst; 1443af75078fSIntel burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]); 1444af75078fSIntel if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) { 1445af75078fSIntel printf(" + %d%% of others]\n", 100 - burst_percent[0]); 1446af75078fSIntel return; 1447af75078fSIntel } 1448af75078fSIntel printf(" + %d%% of %d pkts + %d%% of others]\n", 1449af75078fSIntel burst_percent[1], (int) pktnb_stats[1], burst_percent[2]); 1450af75078fSIntel } 1451af75078fSIntel #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */ 1452af75078fSIntel 1453af75078fSIntel static void 1454af75078fSIntel fwd_stream_stats_display(streamid_t stream_id) 1455af75078fSIntel { 1456af75078fSIntel struct fwd_stream *fs; 1457af75078fSIntel static const char *fwd_top_stats_border = "-------"; 1458af75078fSIntel 1459af75078fSIntel fs = fwd_streams[stream_id]; 1460af75078fSIntel if ((fs->rx_packets == 0) && (fs->tx_packets == 0) && 1461af75078fSIntel (fs->fwd_dropped == 0)) 1462af75078fSIntel return; 1463af75078fSIntel printf("\n %s Forward Stats for RX Port=%2d/Queue=%2d -> " 1464af75078fSIntel "TX Port=%2d/Queue=%2d %s\n", 1465af75078fSIntel fwd_top_stats_border, fs->rx_port, fs->rx_queue, 1466af75078fSIntel fs->tx_port, fs->tx_queue, fwd_top_stats_border); 1467c185d42cSDavid Marchand printf(" RX-packets: %-14"PRIu64" TX-packets: %-14"PRIu64 1468c185d42cSDavid Marchand " TX-dropped: %-14"PRIu64, 1469af75078fSIntel fs->rx_packets, fs->tx_packets, fs->fwd_dropped); 1470af75078fSIntel 1471af75078fSIntel /* if checksum mode */ 1472af75078fSIntel if (cur_fwd_eng == &csum_fwd_engine) { 1473c185d42cSDavid Marchand printf(" RX- bad IP checksum: %-14"PRIu64 1474c185d42cSDavid Marchand " Rx- bad L4 checksum: %-14"PRIu64 1475c185d42cSDavid Marchand " Rx- bad outer L4 checksum: %-14"PRIu64"\n", 147658d475b7SJerin Jacob fs->rx_bad_ip_csum, fs->rx_bad_l4_csum, 147758d475b7SJerin Jacob fs->rx_bad_outer_l4_csum); 147894d65546SDavid Marchand } else { 147994d65546SDavid Marchand printf("\n"); 1480af75078fSIntel } 1481af75078fSIntel 1482af75078fSIntel #ifdef RTE_TEST_PMD_RECORD_BURST_STATS 1483af75078fSIntel pkt_burst_stats_display("RX", &fs->rx_burst_stats); 1484af75078fSIntel pkt_burst_stats_display("TX", &fs->tx_burst_stats); 1485af75078fSIntel #endif 1486af75078fSIntel } 1487af75078fSIntel 148853324971SDavid Marchand void 148953324971SDavid Marchand fwd_stats_display(void) 149053324971SDavid Marchand { 149153324971SDavid Marchand static const char *fwd_stats_border = "----------------------"; 149253324971SDavid Marchand static const char *acc_stats_border = "+++++++++++++++"; 149353324971SDavid Marchand struct { 149453324971SDavid Marchand struct fwd_stream *rx_stream; 149553324971SDavid Marchand struct fwd_stream *tx_stream; 149653324971SDavid Marchand uint64_t tx_dropped; 149753324971SDavid Marchand uint64_t rx_bad_ip_csum; 149853324971SDavid Marchand uint64_t rx_bad_l4_csum; 149953324971SDavid Marchand uint64_t rx_bad_outer_l4_csum; 150053324971SDavid Marchand } ports_stats[RTE_MAX_ETHPORTS]; 150153324971SDavid Marchand uint64_t total_rx_dropped = 0; 150253324971SDavid Marchand uint64_t total_tx_dropped = 0; 150353324971SDavid Marchand uint64_t total_rx_nombuf = 0; 150453324971SDavid Marchand struct rte_eth_stats stats; 150553324971SDavid Marchand #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES 150653324971SDavid Marchand uint64_t fwd_cycles = 0; 150753324971SDavid Marchand #endif 150853324971SDavid Marchand uint64_t total_recv = 0; 150953324971SDavid Marchand uint64_t total_xmit = 0; 151053324971SDavid Marchand struct rte_port *port; 151153324971SDavid Marchand streamid_t sm_id; 151253324971SDavid Marchand portid_t pt_id; 151353324971SDavid Marchand int i; 151453324971SDavid Marchand 151553324971SDavid Marchand memset(ports_stats, 0, sizeof(ports_stats)); 151653324971SDavid Marchand 151753324971SDavid Marchand for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) { 151853324971SDavid Marchand struct fwd_stream *fs = fwd_streams[sm_id]; 151953324971SDavid Marchand 152053324971SDavid Marchand if (cur_fwd_config.nb_fwd_streams > 152153324971SDavid Marchand cur_fwd_config.nb_fwd_ports) { 152253324971SDavid Marchand fwd_stream_stats_display(sm_id); 152353324971SDavid Marchand } else { 152453324971SDavid Marchand ports_stats[fs->tx_port].tx_stream = fs; 152553324971SDavid Marchand ports_stats[fs->rx_port].rx_stream = fs; 152653324971SDavid Marchand } 152753324971SDavid Marchand 152853324971SDavid Marchand ports_stats[fs->tx_port].tx_dropped += fs->fwd_dropped; 152953324971SDavid Marchand 153053324971SDavid Marchand ports_stats[fs->rx_port].rx_bad_ip_csum += fs->rx_bad_ip_csum; 153153324971SDavid Marchand ports_stats[fs->rx_port].rx_bad_l4_csum += fs->rx_bad_l4_csum; 153253324971SDavid Marchand ports_stats[fs->rx_port].rx_bad_outer_l4_csum += 153353324971SDavid Marchand fs->rx_bad_outer_l4_csum; 153453324971SDavid Marchand 153553324971SDavid Marchand #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES 153653324971SDavid Marchand fwd_cycles += fs->core_cycles; 153753324971SDavid Marchand #endif 153853324971SDavid Marchand } 153953324971SDavid Marchand for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) { 154053324971SDavid Marchand uint8_t j; 154153324971SDavid Marchand 154253324971SDavid Marchand pt_id = fwd_ports_ids[i]; 154353324971SDavid Marchand port = &ports[pt_id]; 154453324971SDavid Marchand 154553324971SDavid Marchand rte_eth_stats_get(pt_id, &stats); 154653324971SDavid Marchand stats.ipackets -= port->stats.ipackets; 154753324971SDavid Marchand stats.opackets -= port->stats.opackets; 154853324971SDavid Marchand stats.ibytes -= port->stats.ibytes; 154953324971SDavid Marchand stats.obytes -= port->stats.obytes; 155053324971SDavid Marchand stats.imissed -= port->stats.imissed; 155153324971SDavid Marchand stats.oerrors -= port->stats.oerrors; 155253324971SDavid Marchand stats.rx_nombuf -= port->stats.rx_nombuf; 155353324971SDavid Marchand 155453324971SDavid Marchand total_recv += stats.ipackets; 155553324971SDavid Marchand total_xmit += stats.opackets; 155653324971SDavid Marchand total_rx_dropped += stats.imissed; 155753324971SDavid Marchand total_tx_dropped += ports_stats[pt_id].tx_dropped; 155853324971SDavid Marchand total_tx_dropped += stats.oerrors; 155953324971SDavid Marchand total_rx_nombuf += stats.rx_nombuf; 156053324971SDavid Marchand 156153324971SDavid Marchand printf("\n %s Forward statistics for port %-2d %s\n", 156253324971SDavid Marchand fwd_stats_border, pt_id, fwd_stats_border); 156353324971SDavid Marchand 156453324971SDavid Marchand if (!port->rx_queue_stats_mapping_enabled && 156553324971SDavid Marchand !port->tx_queue_stats_mapping_enabled) { 156653324971SDavid Marchand printf(" RX-packets: %-14"PRIu64 156753324971SDavid Marchand " RX-dropped: %-14"PRIu64 156853324971SDavid Marchand "RX-total: %-"PRIu64"\n", 156953324971SDavid Marchand stats.ipackets, stats.imissed, 157053324971SDavid Marchand stats.ipackets + stats.imissed); 157153324971SDavid Marchand 157253324971SDavid Marchand if (cur_fwd_eng == &csum_fwd_engine) 157353324971SDavid Marchand printf(" Bad-ipcsum: %-14"PRIu64 157453324971SDavid Marchand " Bad-l4csum: %-14"PRIu64 157553324971SDavid Marchand "Bad-outer-l4csum: %-14"PRIu64"\n", 157653324971SDavid Marchand ports_stats[pt_id].rx_bad_ip_csum, 157753324971SDavid Marchand ports_stats[pt_id].rx_bad_l4_csum, 157853324971SDavid Marchand ports_stats[pt_id].rx_bad_outer_l4_csum); 157953324971SDavid Marchand if (stats.ierrors + stats.rx_nombuf > 0) { 158053324971SDavid Marchand printf(" RX-error: %-"PRIu64"\n", 158153324971SDavid Marchand stats.ierrors); 158253324971SDavid Marchand printf(" RX-nombufs: %-14"PRIu64"\n", 158353324971SDavid Marchand stats.rx_nombuf); 158453324971SDavid Marchand } 158553324971SDavid Marchand 158653324971SDavid Marchand printf(" TX-packets: %-14"PRIu64 158753324971SDavid Marchand " TX-dropped: %-14"PRIu64 158853324971SDavid Marchand "TX-total: %-"PRIu64"\n", 158953324971SDavid Marchand stats.opackets, ports_stats[pt_id].tx_dropped, 159053324971SDavid Marchand stats.opackets + ports_stats[pt_id].tx_dropped); 159153324971SDavid Marchand } else { 159253324971SDavid Marchand printf(" RX-packets: %14"PRIu64 159353324971SDavid Marchand " RX-dropped:%14"PRIu64 159453324971SDavid Marchand " RX-total:%14"PRIu64"\n", 159553324971SDavid Marchand stats.ipackets, stats.imissed, 159653324971SDavid Marchand stats.ipackets + stats.imissed); 159753324971SDavid Marchand 159853324971SDavid Marchand if (cur_fwd_eng == &csum_fwd_engine) 159953324971SDavid Marchand printf(" Bad-ipcsum:%14"PRIu64 160053324971SDavid Marchand " Bad-l4csum:%14"PRIu64 160153324971SDavid Marchand " Bad-outer-l4csum: %-14"PRIu64"\n", 160253324971SDavid Marchand ports_stats[pt_id].rx_bad_ip_csum, 160353324971SDavid Marchand ports_stats[pt_id].rx_bad_l4_csum, 160453324971SDavid Marchand ports_stats[pt_id].rx_bad_outer_l4_csum); 160553324971SDavid Marchand if ((stats.ierrors + stats.rx_nombuf) > 0) { 160653324971SDavid Marchand printf(" RX-error:%"PRIu64"\n", stats.ierrors); 160753324971SDavid Marchand printf(" RX-nombufs: %14"PRIu64"\n", 160853324971SDavid Marchand stats.rx_nombuf); 160953324971SDavid Marchand } 161053324971SDavid Marchand 161153324971SDavid Marchand printf(" TX-packets: %14"PRIu64 161253324971SDavid Marchand " TX-dropped:%14"PRIu64 161353324971SDavid Marchand " TX-total:%14"PRIu64"\n", 161453324971SDavid Marchand stats.opackets, ports_stats[pt_id].tx_dropped, 161553324971SDavid Marchand stats.opackets + ports_stats[pt_id].tx_dropped); 161653324971SDavid Marchand } 161753324971SDavid Marchand 161853324971SDavid Marchand #ifdef RTE_TEST_PMD_RECORD_BURST_STATS 161953324971SDavid Marchand if (ports_stats[pt_id].rx_stream) 162053324971SDavid Marchand pkt_burst_stats_display("RX", 162153324971SDavid Marchand &ports_stats[pt_id].rx_stream->rx_burst_stats); 162253324971SDavid Marchand if (ports_stats[pt_id].tx_stream) 162353324971SDavid Marchand pkt_burst_stats_display("TX", 162453324971SDavid Marchand &ports_stats[pt_id].tx_stream->tx_burst_stats); 162553324971SDavid Marchand #endif 162653324971SDavid Marchand 162753324971SDavid Marchand if (port->rx_queue_stats_mapping_enabled) { 162853324971SDavid Marchand printf("\n"); 162953324971SDavid Marchand for (j = 0; j < RTE_ETHDEV_QUEUE_STAT_CNTRS; j++) { 163053324971SDavid Marchand printf(" Stats reg %2d RX-packets:%14"PRIu64 163153324971SDavid Marchand " RX-errors:%14"PRIu64 163253324971SDavid Marchand " RX-bytes:%14"PRIu64"\n", 163353324971SDavid Marchand j, stats.q_ipackets[j], 163453324971SDavid Marchand stats.q_errors[j], stats.q_ibytes[j]); 163553324971SDavid Marchand } 163653324971SDavid Marchand printf("\n"); 163753324971SDavid Marchand } 163853324971SDavid Marchand if (port->tx_queue_stats_mapping_enabled) { 163953324971SDavid Marchand for (j = 0; j < RTE_ETHDEV_QUEUE_STAT_CNTRS; j++) { 164053324971SDavid Marchand printf(" Stats reg %2d TX-packets:%14"PRIu64 164153324971SDavid Marchand " TX-bytes:%14" 164253324971SDavid Marchand PRIu64"\n", 164353324971SDavid Marchand j, stats.q_opackets[j], 164453324971SDavid Marchand stats.q_obytes[j]); 164553324971SDavid Marchand } 164653324971SDavid Marchand } 164753324971SDavid Marchand 164853324971SDavid Marchand printf(" %s--------------------------------%s\n", 164953324971SDavid Marchand fwd_stats_border, fwd_stats_border); 165053324971SDavid Marchand } 165153324971SDavid Marchand 165253324971SDavid Marchand printf("\n %s Accumulated forward statistics for all ports" 165353324971SDavid Marchand "%s\n", 165453324971SDavid Marchand acc_stats_border, acc_stats_border); 165553324971SDavid Marchand printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: " 165653324971SDavid Marchand "%-"PRIu64"\n" 165753324971SDavid Marchand " TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: " 165853324971SDavid Marchand "%-"PRIu64"\n", 165953324971SDavid Marchand total_recv, total_rx_dropped, total_recv + total_rx_dropped, 166053324971SDavid Marchand total_xmit, total_tx_dropped, total_xmit + total_tx_dropped); 166153324971SDavid Marchand if (total_rx_nombuf > 0) 166253324971SDavid Marchand printf(" RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf); 166353324971SDavid Marchand printf(" %s++++++++++++++++++++++++++++++++++++++++++++++" 166453324971SDavid Marchand "%s\n", 166553324971SDavid Marchand acc_stats_border, acc_stats_border); 166653324971SDavid Marchand #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES 166753324971SDavid Marchand if (total_recv > 0) 166853324971SDavid Marchand printf("\n CPU cycles/packet=%u (total cycles=" 166953324971SDavid Marchand "%"PRIu64" / total RX packets=%"PRIu64")\n", 167053324971SDavid Marchand (unsigned int)(fwd_cycles / total_recv), 167153324971SDavid Marchand fwd_cycles, total_recv); 167253324971SDavid Marchand #endif 167353324971SDavid Marchand } 167453324971SDavid Marchand 167553324971SDavid Marchand void 167653324971SDavid Marchand fwd_stats_reset(void) 167753324971SDavid Marchand { 167853324971SDavid Marchand streamid_t sm_id; 167953324971SDavid Marchand portid_t pt_id; 168053324971SDavid Marchand int i; 168153324971SDavid Marchand 168253324971SDavid Marchand for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) { 168353324971SDavid Marchand pt_id = fwd_ports_ids[i]; 168453324971SDavid Marchand rte_eth_stats_get(pt_id, &ports[pt_id].stats); 168553324971SDavid Marchand } 168653324971SDavid Marchand for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) { 168753324971SDavid Marchand struct fwd_stream *fs = fwd_streams[sm_id]; 168853324971SDavid Marchand 168953324971SDavid Marchand fs->rx_packets = 0; 169053324971SDavid Marchand fs->tx_packets = 0; 169153324971SDavid Marchand fs->fwd_dropped = 0; 169253324971SDavid Marchand fs->rx_bad_ip_csum = 0; 169353324971SDavid Marchand fs->rx_bad_l4_csum = 0; 169453324971SDavid Marchand fs->rx_bad_outer_l4_csum = 0; 169553324971SDavid Marchand 169653324971SDavid Marchand #ifdef RTE_TEST_PMD_RECORD_BURST_STATS 169753324971SDavid Marchand memset(&fs->rx_burst_stats, 0, sizeof(fs->rx_burst_stats)); 169853324971SDavid Marchand memset(&fs->tx_burst_stats, 0, sizeof(fs->tx_burst_stats)); 169953324971SDavid Marchand #endif 170053324971SDavid Marchand #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES 170153324971SDavid Marchand fs->core_cycles = 0; 170253324971SDavid Marchand #endif 170353324971SDavid Marchand } 170453324971SDavid Marchand } 170553324971SDavid Marchand 1706af75078fSIntel static void 17077741e4cfSIntel flush_fwd_rx_queues(void) 1708af75078fSIntel { 1709af75078fSIntel struct rte_mbuf *pkts_burst[MAX_PKT_BURST]; 1710af75078fSIntel portid_t rxp; 17117741e4cfSIntel portid_t port_id; 1712af75078fSIntel queueid_t rxq; 1713af75078fSIntel uint16_t nb_rx; 1714af75078fSIntel uint16_t i; 1715af75078fSIntel uint8_t j; 1716f487715fSReshma Pattan uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0; 1717594302c7SJames Poole uint64_t timer_period; 1718f487715fSReshma Pattan 1719f487715fSReshma Pattan /* convert to number of cycles */ 1720594302c7SJames Poole timer_period = rte_get_timer_hz(); /* 1 second timeout */ 1721af75078fSIntel 1722af75078fSIntel for (j = 0; j < 2; j++) { 17237741e4cfSIntel for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) { 1724af75078fSIntel for (rxq = 0; rxq < nb_rxq; rxq++) { 17257741e4cfSIntel port_id = fwd_ports_ids[rxp]; 1726f487715fSReshma Pattan /** 1727f487715fSReshma Pattan * testpmd can stuck in the below do while loop 1728f487715fSReshma Pattan * if rte_eth_rx_burst() always returns nonzero 1729f487715fSReshma Pattan * packets. So timer is added to exit this loop 1730f487715fSReshma Pattan * after 1sec timer expiry. 1731f487715fSReshma Pattan */ 1732f487715fSReshma Pattan prev_tsc = rte_rdtsc(); 1733af75078fSIntel do { 17347741e4cfSIntel nb_rx = rte_eth_rx_burst(port_id, rxq, 1735013af9b6SIntel pkts_burst, MAX_PKT_BURST); 1736af75078fSIntel for (i = 0; i < nb_rx; i++) 1737af75078fSIntel rte_pktmbuf_free(pkts_burst[i]); 1738f487715fSReshma Pattan 1739f487715fSReshma Pattan cur_tsc = rte_rdtsc(); 1740f487715fSReshma Pattan diff_tsc = cur_tsc - prev_tsc; 1741f487715fSReshma Pattan timer_tsc += diff_tsc; 1742f487715fSReshma Pattan } while ((nb_rx > 0) && 1743f487715fSReshma Pattan (timer_tsc < timer_period)); 1744f487715fSReshma Pattan timer_tsc = 0; 1745af75078fSIntel } 1746af75078fSIntel } 1747af75078fSIntel rte_delay_ms(10); /* wait 10 milli-seconds before retrying */ 1748af75078fSIntel } 1749af75078fSIntel } 1750af75078fSIntel 1751af75078fSIntel static void 1752af75078fSIntel run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd) 1753af75078fSIntel { 1754af75078fSIntel struct fwd_stream **fsm; 1755af75078fSIntel streamid_t nb_fs; 1756af75078fSIntel streamid_t sm_id; 17577e4441c8SRemy Horton #ifdef RTE_LIBRTE_BITRATE 17587e4441c8SRemy Horton uint64_t tics_per_1sec; 17597e4441c8SRemy Horton uint64_t tics_datum; 17607e4441c8SRemy Horton uint64_t tics_current; 17614918a357SXiaoyun Li uint16_t i, cnt_ports; 1762af75078fSIntel 17634918a357SXiaoyun Li cnt_ports = nb_ports; 17647e4441c8SRemy Horton tics_datum = rte_rdtsc(); 17657e4441c8SRemy Horton tics_per_1sec = rte_get_timer_hz(); 17667e4441c8SRemy Horton #endif 1767af75078fSIntel fsm = &fwd_streams[fc->stream_idx]; 1768af75078fSIntel nb_fs = fc->stream_nb; 1769af75078fSIntel do { 1770af75078fSIntel for (sm_id = 0; sm_id < nb_fs; sm_id++) 1771af75078fSIntel (*pkt_fwd)(fsm[sm_id]); 17727e4441c8SRemy Horton #ifdef RTE_LIBRTE_BITRATE 1773e25e6c70SRemy Horton if (bitrate_enabled != 0 && 1774e25e6c70SRemy Horton bitrate_lcore_id == rte_lcore_id()) { 17757e4441c8SRemy Horton tics_current = rte_rdtsc(); 17767e4441c8SRemy Horton if (tics_current - tics_datum >= tics_per_1sec) { 17777e4441c8SRemy Horton /* Periodic bitrate calculation */ 17784918a357SXiaoyun Li for (i = 0; i < cnt_ports; i++) 1779e25e6c70SRemy Horton rte_stats_bitrate_calc(bitrate_data, 17804918a357SXiaoyun Li ports_ids[i]); 17817e4441c8SRemy Horton tics_datum = tics_current; 17827e4441c8SRemy Horton } 1783e25e6c70SRemy Horton } 17847e4441c8SRemy Horton #endif 178562d3216dSReshma Pattan #ifdef RTE_LIBRTE_LATENCY_STATS 178665eb1e54SPablo de Lara if (latencystats_enabled != 0 && 178765eb1e54SPablo de Lara latencystats_lcore_id == rte_lcore_id()) 178862d3216dSReshma Pattan rte_latencystats_update(); 178962d3216dSReshma Pattan #endif 179062d3216dSReshma Pattan 1791af75078fSIntel } while (! fc->stopped); 1792af75078fSIntel } 1793af75078fSIntel 1794af75078fSIntel static int 1795af75078fSIntel start_pkt_forward_on_core(void *fwd_arg) 1796af75078fSIntel { 1797af75078fSIntel run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg, 1798af75078fSIntel cur_fwd_config.fwd_eng->packet_fwd); 1799af75078fSIntel return 0; 1800af75078fSIntel } 1801af75078fSIntel 1802af75078fSIntel /* 1803af75078fSIntel * Run the TXONLY packet forwarding engine to send a single burst of packets. 1804af75078fSIntel * Used to start communication flows in network loopback test configurations. 1805af75078fSIntel */ 1806af75078fSIntel static int 1807af75078fSIntel run_one_txonly_burst_on_core(void *fwd_arg) 1808af75078fSIntel { 1809af75078fSIntel struct fwd_lcore *fwd_lc; 1810af75078fSIntel struct fwd_lcore tmp_lcore; 1811af75078fSIntel 1812af75078fSIntel fwd_lc = (struct fwd_lcore *) fwd_arg; 1813af75078fSIntel tmp_lcore = *fwd_lc; 1814af75078fSIntel tmp_lcore.stopped = 1; 1815af75078fSIntel run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd); 1816af75078fSIntel return 0; 1817af75078fSIntel } 1818af75078fSIntel 1819af75078fSIntel /* 1820af75078fSIntel * Launch packet forwarding: 1821af75078fSIntel * - Setup per-port forwarding context. 1822af75078fSIntel * - launch logical cores with their forwarding configuration. 1823af75078fSIntel */ 1824af75078fSIntel static void 1825af75078fSIntel launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore) 1826af75078fSIntel { 1827af75078fSIntel port_fwd_begin_t port_fwd_begin; 1828af75078fSIntel unsigned int i; 1829af75078fSIntel unsigned int lc_id; 1830af75078fSIntel int diag; 1831af75078fSIntel 1832af75078fSIntel port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin; 1833af75078fSIntel if (port_fwd_begin != NULL) { 1834af75078fSIntel for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) 1835af75078fSIntel (*port_fwd_begin)(fwd_ports_ids[i]); 1836af75078fSIntel } 1837af75078fSIntel for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) { 1838af75078fSIntel lc_id = fwd_lcores_cpuids[i]; 1839af75078fSIntel if ((interactive == 0) || (lc_id != rte_lcore_id())) { 1840af75078fSIntel fwd_lcores[i]->stopped = 0; 1841af75078fSIntel diag = rte_eal_remote_launch(pkt_fwd_on_lcore, 1842af75078fSIntel fwd_lcores[i], lc_id); 1843af75078fSIntel if (diag != 0) 1844af75078fSIntel printf("launch lcore %u failed - diag=%d\n", 1845af75078fSIntel lc_id, diag); 1846af75078fSIntel } 1847af75078fSIntel } 1848af75078fSIntel } 1849af75078fSIntel 1850af75078fSIntel /* 1851af75078fSIntel * Launch packet forwarding configuration. 1852af75078fSIntel */ 1853af75078fSIntel void 1854af75078fSIntel start_packet_forwarding(int with_tx_first) 1855af75078fSIntel { 1856af75078fSIntel port_fwd_begin_t port_fwd_begin; 1857af75078fSIntel port_fwd_end_t port_fwd_end; 1858af75078fSIntel struct rte_port *port; 1859af75078fSIntel unsigned int i; 1860af75078fSIntel portid_t pt_id; 1861af75078fSIntel 18625a8fb55cSReshma Pattan if (strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") == 0 && !nb_rxq) 18635a8fb55cSReshma Pattan rte_exit(EXIT_FAILURE, "rxq are 0, cannot use rxonly fwd mode\n"); 18645a8fb55cSReshma Pattan 18655a8fb55cSReshma Pattan if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 && !nb_txq) 18665a8fb55cSReshma Pattan rte_exit(EXIT_FAILURE, "txq are 0, cannot use txonly fwd mode\n"); 18675a8fb55cSReshma Pattan 18685a8fb55cSReshma Pattan if ((strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") != 0 && 18695a8fb55cSReshma Pattan strcmp(cur_fwd_eng->fwd_mode_name, "txonly") != 0) && 18705a8fb55cSReshma Pattan (!nb_rxq || !nb_txq)) 18715a8fb55cSReshma Pattan rte_exit(EXIT_FAILURE, 18725a8fb55cSReshma Pattan "Either rxq or txq are 0, cannot use %s fwd mode\n", 18735a8fb55cSReshma Pattan cur_fwd_eng->fwd_mode_name); 18745a8fb55cSReshma Pattan 1875ce8d5614SIntel if (all_ports_started() == 0) { 1876ce8d5614SIntel printf("Not all ports were started\n"); 1877ce8d5614SIntel return; 1878ce8d5614SIntel } 1879af75078fSIntel if (test_done == 0) { 1880af75078fSIntel printf("Packet forwarding already started\n"); 1881af75078fSIntel return; 1882af75078fSIntel } 1883edf87b4aSBernard Iremonger 1884edf87b4aSBernard Iremonger 18857741e4cfSIntel if(dcb_test) { 18867741e4cfSIntel for (i = 0; i < nb_fwd_ports; i++) { 18877741e4cfSIntel pt_id = fwd_ports_ids[i]; 18887741e4cfSIntel port = &ports[pt_id]; 18897741e4cfSIntel if (!port->dcb_flag) { 18907741e4cfSIntel printf("In DCB mode, all forwarding ports must " 18917741e4cfSIntel "be configured in this mode.\n"); 1892013af9b6SIntel return; 1893013af9b6SIntel } 18947741e4cfSIntel } 18957741e4cfSIntel if (nb_fwd_lcores == 1) { 18967741e4cfSIntel printf("In DCB mode,the nb forwarding cores " 18977741e4cfSIntel "should be larger than 1.\n"); 18987741e4cfSIntel return; 18997741e4cfSIntel } 19007741e4cfSIntel } 1901af75078fSIntel test_done = 0; 19027741e4cfSIntel 190347a767b2SMatan Azrad fwd_config_setup(); 190447a767b2SMatan Azrad 19057741e4cfSIntel if(!no_flush_rx) 19067741e4cfSIntel flush_fwd_rx_queues(); 19077741e4cfSIntel 1908933617d8SZhihong Wang pkt_fwd_config_display(&cur_fwd_config); 1909af75078fSIntel rxtx_config_display(); 1910af75078fSIntel 191153324971SDavid Marchand fwd_stats_reset(); 1912af75078fSIntel for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) { 1913af75078fSIntel pt_id = fwd_ports_ids[i]; 1914af75078fSIntel port = &ports[pt_id]; 1915013af9b6SIntel map_port_queue_stats_mapping_registers(pt_id, port); 1916af75078fSIntel } 1917af75078fSIntel if (with_tx_first) { 1918af75078fSIntel port_fwd_begin = tx_only_engine.port_fwd_begin; 1919af75078fSIntel if (port_fwd_begin != NULL) { 1920af75078fSIntel for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) 1921af75078fSIntel (*port_fwd_begin)(fwd_ports_ids[i]); 1922af75078fSIntel } 1923acbf77a6SZhihong Wang while (with_tx_first--) { 1924acbf77a6SZhihong Wang launch_packet_forwarding( 1925acbf77a6SZhihong Wang run_one_txonly_burst_on_core); 1926af75078fSIntel rte_eal_mp_wait_lcore(); 1927acbf77a6SZhihong Wang } 1928af75078fSIntel port_fwd_end = tx_only_engine.port_fwd_end; 1929af75078fSIntel if (port_fwd_end != NULL) { 1930af75078fSIntel for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) 1931af75078fSIntel (*port_fwd_end)(fwd_ports_ids[i]); 1932af75078fSIntel } 1933af75078fSIntel } 1934af75078fSIntel launch_packet_forwarding(start_pkt_forward_on_core); 1935af75078fSIntel } 1936af75078fSIntel 1937af75078fSIntel void 1938af75078fSIntel stop_packet_forwarding(void) 1939af75078fSIntel { 1940af75078fSIntel port_fwd_end_t port_fwd_end; 1941af75078fSIntel lcoreid_t lc_id; 194253324971SDavid Marchand portid_t pt_id; 194353324971SDavid Marchand int i; 1944af75078fSIntel 1945af75078fSIntel if (test_done) { 1946af75078fSIntel printf("Packet forwarding not started\n"); 1947af75078fSIntel return; 1948af75078fSIntel } 1949af75078fSIntel printf("Telling cores to stop..."); 1950af75078fSIntel for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) 1951af75078fSIntel fwd_lcores[lc_id]->stopped = 1; 1952af75078fSIntel printf("\nWaiting for lcores to finish...\n"); 1953af75078fSIntel rte_eal_mp_wait_lcore(); 1954af75078fSIntel port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end; 1955af75078fSIntel if (port_fwd_end != NULL) { 1956af75078fSIntel for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) { 1957af75078fSIntel pt_id = fwd_ports_ids[i]; 1958af75078fSIntel (*port_fwd_end)(pt_id); 1959af75078fSIntel } 1960af75078fSIntel } 1961c185d42cSDavid Marchand 196253324971SDavid Marchand fwd_stats_display(); 196358d475b7SJerin Jacob 1964af75078fSIntel printf("\nDone.\n"); 1965af75078fSIntel test_done = 1; 1966af75078fSIntel } 1967af75078fSIntel 1968cfae07fdSOuyang Changchun void 1969cfae07fdSOuyang Changchun dev_set_link_up(portid_t pid) 1970cfae07fdSOuyang Changchun { 1971492ab604SZhiyong Yang if (rte_eth_dev_set_link_up(pid) < 0) 1972cfae07fdSOuyang Changchun printf("\nSet link up fail.\n"); 1973cfae07fdSOuyang Changchun } 1974cfae07fdSOuyang Changchun 1975cfae07fdSOuyang Changchun void 1976cfae07fdSOuyang Changchun dev_set_link_down(portid_t pid) 1977cfae07fdSOuyang Changchun { 1978492ab604SZhiyong Yang if (rte_eth_dev_set_link_down(pid) < 0) 1979cfae07fdSOuyang Changchun printf("\nSet link down fail.\n"); 1980cfae07fdSOuyang Changchun } 1981cfae07fdSOuyang Changchun 1982ce8d5614SIntel static int 1983ce8d5614SIntel all_ports_started(void) 1984ce8d5614SIntel { 1985ce8d5614SIntel portid_t pi; 1986ce8d5614SIntel struct rte_port *port; 1987ce8d5614SIntel 19887d89b261SGaetan Rivet RTE_ETH_FOREACH_DEV(pi) { 1989ce8d5614SIntel port = &ports[pi]; 1990ce8d5614SIntel /* Check if there is a port which is not started */ 199141b05095SBernard Iremonger if ((port->port_status != RTE_PORT_STARTED) && 199241b05095SBernard Iremonger (port->slave_flag == 0)) 1993ce8d5614SIntel return 0; 1994ce8d5614SIntel } 1995ce8d5614SIntel 1996ce8d5614SIntel /* No port is not started */ 1997ce8d5614SIntel return 1; 1998ce8d5614SIntel } 1999ce8d5614SIntel 2000148f963fSBruce Richardson int 20016018eb8cSShahaf Shuler port_is_stopped(portid_t port_id) 20026018eb8cSShahaf Shuler { 20036018eb8cSShahaf Shuler struct rte_port *port = &ports[port_id]; 20046018eb8cSShahaf Shuler 20056018eb8cSShahaf Shuler if ((port->port_status != RTE_PORT_STOPPED) && 20066018eb8cSShahaf Shuler (port->slave_flag == 0)) 20076018eb8cSShahaf Shuler return 0; 20086018eb8cSShahaf Shuler return 1; 20096018eb8cSShahaf Shuler } 20106018eb8cSShahaf Shuler 20116018eb8cSShahaf Shuler int 2012edab33b1STetsuya Mukawa all_ports_stopped(void) 2013edab33b1STetsuya Mukawa { 2014edab33b1STetsuya Mukawa portid_t pi; 2015edab33b1STetsuya Mukawa 20167d89b261SGaetan Rivet RTE_ETH_FOREACH_DEV(pi) { 20176018eb8cSShahaf Shuler if (!port_is_stopped(pi)) 2018edab33b1STetsuya Mukawa return 0; 2019edab33b1STetsuya Mukawa } 2020edab33b1STetsuya Mukawa 2021edab33b1STetsuya Mukawa return 1; 2022edab33b1STetsuya Mukawa } 2023edab33b1STetsuya Mukawa 2024edab33b1STetsuya Mukawa int 2025edab33b1STetsuya Mukawa port_is_started(portid_t port_id) 2026edab33b1STetsuya Mukawa { 2027edab33b1STetsuya Mukawa if (port_id_is_invalid(port_id, ENABLED_WARN)) 2028edab33b1STetsuya Mukawa return 0; 2029edab33b1STetsuya Mukawa 2030edab33b1STetsuya Mukawa if (ports[port_id].port_status != RTE_PORT_STARTED) 2031edab33b1STetsuya Mukawa return 0; 2032edab33b1STetsuya Mukawa 2033edab33b1STetsuya Mukawa return 1; 2034edab33b1STetsuya Mukawa } 2035edab33b1STetsuya Mukawa 2036edab33b1STetsuya Mukawa int 2037ce8d5614SIntel start_port(portid_t pid) 2038ce8d5614SIntel { 203992d2703eSMichael Qiu int diag, need_check_link_status = -1; 2040ce8d5614SIntel portid_t pi; 2041ce8d5614SIntel queueid_t qi; 2042ce8d5614SIntel struct rte_port *port; 20436d13ea8eSOlivier Matz struct rte_ether_addr mac_addr; 2044ce8d5614SIntel 20454468635fSMichael Qiu if (port_id_is_invalid(pid, ENABLED_WARN)) 20464468635fSMichael Qiu return 0; 20474468635fSMichael Qiu 2048ce8d5614SIntel if(dcb_config) 2049ce8d5614SIntel dcb_test = 1; 20507d89b261SGaetan Rivet RTE_ETH_FOREACH_DEV(pi) { 2051edab33b1STetsuya Mukawa if (pid != pi && pid != (portid_t)RTE_PORT_ALL) 2052ce8d5614SIntel continue; 2053ce8d5614SIntel 205492d2703eSMichael Qiu need_check_link_status = 0; 2055ce8d5614SIntel port = &ports[pi]; 2056ce8d5614SIntel if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED, 2057ce8d5614SIntel RTE_PORT_HANDLING) == 0) { 2058ce8d5614SIntel printf("Port %d is now not stopped\n", pi); 2059ce8d5614SIntel continue; 2060ce8d5614SIntel } 2061ce8d5614SIntel 2062ce8d5614SIntel if (port->need_reconfig > 0) { 2063ce8d5614SIntel port->need_reconfig = 0; 2064ce8d5614SIntel 20657ee3e944SVasily Philipov if (flow_isolate_all) { 20667ee3e944SVasily Philipov int ret = port_flow_isolate(pi, 1); 20677ee3e944SVasily Philipov if (ret) { 20687ee3e944SVasily Philipov printf("Failed to apply isolated" 20697ee3e944SVasily Philipov " mode on port %d\n", pi); 20707ee3e944SVasily Philipov return -1; 20717ee3e944SVasily Philipov } 20727ee3e944SVasily Philipov } 2073b5b38ed8SRaslan Darawsheh configure_rxtx_dump_callbacks(0); 20745706de65SJulien Cretin printf("Configuring Port %d (socket %u)\n", pi, 207520a0286fSLiu Xiaofeng port->socket_id); 2076ce8d5614SIntel /* configure port */ 2077ce8d5614SIntel diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq, 2078ce8d5614SIntel &(port->dev_conf)); 2079ce8d5614SIntel if (diag != 0) { 2080ce8d5614SIntel if (rte_atomic16_cmpset(&(port->port_status), 2081ce8d5614SIntel RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0) 2082ce8d5614SIntel printf("Port %d can not be set back " 2083ce8d5614SIntel "to stopped\n", pi); 2084ce8d5614SIntel printf("Fail to configure port %d\n", pi); 2085ce8d5614SIntel /* try to reconfigure port next time */ 2086ce8d5614SIntel port->need_reconfig = 1; 2087148f963fSBruce Richardson return -1; 2088ce8d5614SIntel } 2089ce8d5614SIntel } 2090ce8d5614SIntel if (port->need_reconfig_queues > 0) { 2091ce8d5614SIntel port->need_reconfig_queues = 0; 2092ce8d5614SIntel /* setup tx queues */ 2093ce8d5614SIntel for (qi = 0; qi < nb_txq; qi++) { 2094b6ea6408SIntel if ((numa_support) && 2095b6ea6408SIntel (txring_numa[pi] != NUMA_NO_CONFIG)) 2096b6ea6408SIntel diag = rte_eth_tx_queue_setup(pi, qi, 2097d44f8a48SQi Zhang port->nb_tx_desc[qi], 2098d44f8a48SQi Zhang txring_numa[pi], 2099d44f8a48SQi Zhang &(port->tx_conf[qi])); 2100b6ea6408SIntel else 2101b6ea6408SIntel diag = rte_eth_tx_queue_setup(pi, qi, 2102d44f8a48SQi Zhang port->nb_tx_desc[qi], 2103d44f8a48SQi Zhang port->socket_id, 2104d44f8a48SQi Zhang &(port->tx_conf[qi])); 2105b6ea6408SIntel 2106ce8d5614SIntel if (diag == 0) 2107ce8d5614SIntel continue; 2108ce8d5614SIntel 2109ce8d5614SIntel /* Fail to setup tx queue, return */ 2110ce8d5614SIntel if (rte_atomic16_cmpset(&(port->port_status), 2111ce8d5614SIntel RTE_PORT_HANDLING, 2112ce8d5614SIntel RTE_PORT_STOPPED) == 0) 2113ce8d5614SIntel printf("Port %d can not be set back " 2114ce8d5614SIntel "to stopped\n", pi); 2115d44f8a48SQi Zhang printf("Fail to configure port %d tx queues\n", 2116d44f8a48SQi Zhang pi); 2117ce8d5614SIntel /* try to reconfigure queues next time */ 2118ce8d5614SIntel port->need_reconfig_queues = 1; 2119148f963fSBruce Richardson return -1; 2120ce8d5614SIntel } 2121ce8d5614SIntel for (qi = 0; qi < nb_rxq; qi++) { 2122d44f8a48SQi Zhang /* setup rx queues */ 2123b6ea6408SIntel if ((numa_support) && 2124b6ea6408SIntel (rxring_numa[pi] != NUMA_NO_CONFIG)) { 2125b6ea6408SIntel struct rte_mempool * mp = 2126b6ea6408SIntel mbuf_pool_find(rxring_numa[pi]); 2127b6ea6408SIntel if (mp == NULL) { 2128b6ea6408SIntel printf("Failed to setup RX queue:" 2129b6ea6408SIntel "No mempool allocation" 2130b6ea6408SIntel " on the socket %d\n", 2131b6ea6408SIntel rxring_numa[pi]); 2132148f963fSBruce Richardson return -1; 2133b6ea6408SIntel } 2134b6ea6408SIntel 2135b6ea6408SIntel diag = rte_eth_rx_queue_setup(pi, qi, 2136d4930794SFerruh Yigit port->nb_rx_desc[qi], 2137d44f8a48SQi Zhang rxring_numa[pi], 2138d44f8a48SQi Zhang &(port->rx_conf[qi]), 2139d44f8a48SQi Zhang mp); 21401e1d6bddSBernard Iremonger } else { 21411e1d6bddSBernard Iremonger struct rte_mempool *mp = 21421e1d6bddSBernard Iremonger mbuf_pool_find(port->socket_id); 21431e1d6bddSBernard Iremonger if (mp == NULL) { 21441e1d6bddSBernard Iremonger printf("Failed to setup RX queue:" 21451e1d6bddSBernard Iremonger "No mempool allocation" 21461e1d6bddSBernard Iremonger " on the socket %d\n", 21471e1d6bddSBernard Iremonger port->socket_id); 21481e1d6bddSBernard Iremonger return -1; 2149b6ea6408SIntel } 2150b6ea6408SIntel diag = rte_eth_rx_queue_setup(pi, qi, 2151d4930794SFerruh Yigit port->nb_rx_desc[qi], 2152d44f8a48SQi Zhang port->socket_id, 2153d44f8a48SQi Zhang &(port->rx_conf[qi]), 2154d44f8a48SQi Zhang mp); 21551e1d6bddSBernard Iremonger } 2156ce8d5614SIntel if (diag == 0) 2157ce8d5614SIntel continue; 2158ce8d5614SIntel 2159ce8d5614SIntel /* Fail to setup rx queue, return */ 2160ce8d5614SIntel if (rte_atomic16_cmpset(&(port->port_status), 2161ce8d5614SIntel RTE_PORT_HANDLING, 2162ce8d5614SIntel RTE_PORT_STOPPED) == 0) 2163ce8d5614SIntel printf("Port %d can not be set back " 2164ce8d5614SIntel "to stopped\n", pi); 2165d44f8a48SQi Zhang printf("Fail to configure port %d rx queues\n", 2166d44f8a48SQi Zhang pi); 2167ce8d5614SIntel /* try to reconfigure queues next time */ 2168ce8d5614SIntel port->need_reconfig_queues = 1; 2169148f963fSBruce Richardson return -1; 2170ce8d5614SIntel } 2171ce8d5614SIntel } 2172b5b38ed8SRaslan Darawsheh configure_rxtx_dump_callbacks(verbose_level); 2173ce8d5614SIntel /* start port */ 2174ce8d5614SIntel if (rte_eth_dev_start(pi) < 0) { 2175ce8d5614SIntel printf("Fail to start port %d\n", pi); 2176ce8d5614SIntel 2177ce8d5614SIntel /* Fail to setup rx queue, return */ 2178ce8d5614SIntel if (rte_atomic16_cmpset(&(port->port_status), 2179ce8d5614SIntel RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0) 2180ce8d5614SIntel printf("Port %d can not be set back to " 2181ce8d5614SIntel "stopped\n", pi); 2182ce8d5614SIntel continue; 2183ce8d5614SIntel } 2184ce8d5614SIntel 2185ce8d5614SIntel if (rte_atomic16_cmpset(&(port->port_status), 2186ce8d5614SIntel RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0) 2187ce8d5614SIntel printf("Port %d can not be set into started\n", pi); 2188ce8d5614SIntel 21892950a769SDeclan Doherty rte_eth_macaddr_get(pi, &mac_addr); 2190d8c89163SZijie Pan printf("Port %d: %02X:%02X:%02X:%02X:%02X:%02X\n", pi, 21912950a769SDeclan Doherty mac_addr.addr_bytes[0], mac_addr.addr_bytes[1], 21922950a769SDeclan Doherty mac_addr.addr_bytes[2], mac_addr.addr_bytes[3], 21932950a769SDeclan Doherty mac_addr.addr_bytes[4], mac_addr.addr_bytes[5]); 2194d8c89163SZijie Pan 2195ce8d5614SIntel /* at least one port started, need checking link status */ 2196ce8d5614SIntel need_check_link_status = 1; 2197ce8d5614SIntel } 2198ce8d5614SIntel 219992d2703eSMichael Qiu if (need_check_link_status == 1 && !no_link_check) 2200edab33b1STetsuya Mukawa check_all_ports_link_status(RTE_PORT_ALL); 220192d2703eSMichael Qiu else if (need_check_link_status == 0) 2202ce8d5614SIntel printf("Please stop the ports first\n"); 2203ce8d5614SIntel 2204ce8d5614SIntel printf("Done\n"); 2205148f963fSBruce Richardson return 0; 2206ce8d5614SIntel } 2207ce8d5614SIntel 2208ce8d5614SIntel void 2209ce8d5614SIntel stop_port(portid_t pid) 2210ce8d5614SIntel { 2211ce8d5614SIntel portid_t pi; 2212ce8d5614SIntel struct rte_port *port; 2213ce8d5614SIntel int need_check_link_status = 0; 2214ce8d5614SIntel 2215ce8d5614SIntel if (dcb_test) { 2216ce8d5614SIntel dcb_test = 0; 2217ce8d5614SIntel dcb_config = 0; 2218ce8d5614SIntel } 22194468635fSMichael Qiu 22204468635fSMichael Qiu if (port_id_is_invalid(pid, ENABLED_WARN)) 22214468635fSMichael Qiu return; 22224468635fSMichael Qiu 2223ce8d5614SIntel printf("Stopping ports...\n"); 2224ce8d5614SIntel 22257d89b261SGaetan Rivet RTE_ETH_FOREACH_DEV(pi) { 22264468635fSMichael Qiu if (pid != pi && pid != (portid_t)RTE_PORT_ALL) 2227ce8d5614SIntel continue; 2228ce8d5614SIntel 2229a8ef3e3aSBernard Iremonger if (port_is_forwarding(pi) != 0 && test_done == 0) { 2230a8ef3e3aSBernard Iremonger printf("Please remove port %d from forwarding configuration.\n", pi); 2231a8ef3e3aSBernard Iremonger continue; 2232a8ef3e3aSBernard Iremonger } 2233a8ef3e3aSBernard Iremonger 22340e545d30SBernard Iremonger if (port_is_bonding_slave(pi)) { 22350e545d30SBernard Iremonger printf("Please remove port %d from bonded device.\n", pi); 22360e545d30SBernard Iremonger continue; 22370e545d30SBernard Iremonger } 22380e545d30SBernard Iremonger 2239ce8d5614SIntel port = &ports[pi]; 2240ce8d5614SIntel if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED, 2241ce8d5614SIntel RTE_PORT_HANDLING) == 0) 2242ce8d5614SIntel continue; 2243ce8d5614SIntel 2244ce8d5614SIntel rte_eth_dev_stop(pi); 2245ce8d5614SIntel 2246ce8d5614SIntel if (rte_atomic16_cmpset(&(port->port_status), 2247ce8d5614SIntel RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0) 2248ce8d5614SIntel printf("Port %d can not be set into stopped\n", pi); 2249ce8d5614SIntel need_check_link_status = 1; 2250ce8d5614SIntel } 2251bc202406SDavid Marchand if (need_check_link_status && !no_link_check) 2252edab33b1STetsuya Mukawa check_all_ports_link_status(RTE_PORT_ALL); 2253ce8d5614SIntel 2254ce8d5614SIntel printf("Done\n"); 2255ce8d5614SIntel } 2256ce8d5614SIntel 2257ce6959bfSWisam Jaddo static void 22584f1de450SThomas Monjalon remove_invalid_ports_in(portid_t *array, portid_t *total) 2259ce6959bfSWisam Jaddo { 22604f1de450SThomas Monjalon portid_t i; 22614f1de450SThomas Monjalon portid_t new_total = 0; 2262ce6959bfSWisam Jaddo 22634f1de450SThomas Monjalon for (i = 0; i < *total; i++) 22644f1de450SThomas Monjalon if (!port_id_is_invalid(array[i], DISABLED_WARN)) { 22654f1de450SThomas Monjalon array[new_total] = array[i]; 22664f1de450SThomas Monjalon new_total++; 2267ce6959bfSWisam Jaddo } 22684f1de450SThomas Monjalon *total = new_total; 22694f1de450SThomas Monjalon } 22704f1de450SThomas Monjalon 22714f1de450SThomas Monjalon static void 22724f1de450SThomas Monjalon remove_invalid_ports(void) 22734f1de450SThomas Monjalon { 22744f1de450SThomas Monjalon remove_invalid_ports_in(ports_ids, &nb_ports); 22754f1de450SThomas Monjalon remove_invalid_ports_in(fwd_ports_ids, &nb_fwd_ports); 22764f1de450SThomas Monjalon nb_cfg_ports = nb_fwd_ports; 2277ce6959bfSWisam Jaddo } 2278ce6959bfSWisam Jaddo 2279ce8d5614SIntel void 2280ce8d5614SIntel close_port(portid_t pid) 2281ce8d5614SIntel { 2282ce8d5614SIntel portid_t pi; 2283ce8d5614SIntel struct rte_port *port; 2284ce8d5614SIntel 22854468635fSMichael Qiu if (port_id_is_invalid(pid, ENABLED_WARN)) 22864468635fSMichael Qiu return; 22874468635fSMichael Qiu 2288ce8d5614SIntel printf("Closing ports...\n"); 2289ce8d5614SIntel 22907d89b261SGaetan Rivet RTE_ETH_FOREACH_DEV(pi) { 22914468635fSMichael Qiu if (pid != pi && pid != (portid_t)RTE_PORT_ALL) 2292ce8d5614SIntel continue; 2293ce8d5614SIntel 2294a8ef3e3aSBernard Iremonger if (port_is_forwarding(pi) != 0 && test_done == 0) { 2295a8ef3e3aSBernard Iremonger printf("Please remove port %d from forwarding configuration.\n", pi); 2296a8ef3e3aSBernard Iremonger continue; 2297a8ef3e3aSBernard Iremonger } 2298a8ef3e3aSBernard Iremonger 22990e545d30SBernard Iremonger if (port_is_bonding_slave(pi)) { 23000e545d30SBernard Iremonger printf("Please remove port %d from bonded device.\n", pi); 23010e545d30SBernard Iremonger continue; 23020e545d30SBernard Iremonger } 23030e545d30SBernard Iremonger 2304ce8d5614SIntel port = &ports[pi]; 2305ce8d5614SIntel if (rte_atomic16_cmpset(&(port->port_status), 2306d4e8ad64SMichael Qiu RTE_PORT_CLOSED, RTE_PORT_CLOSED) == 1) { 2307d4e8ad64SMichael Qiu printf("Port %d is already closed\n", pi); 2308d4e8ad64SMichael Qiu continue; 2309d4e8ad64SMichael Qiu } 2310d4e8ad64SMichael Qiu 2311d4e8ad64SMichael Qiu if (rte_atomic16_cmpset(&(port->port_status), 2312ce8d5614SIntel RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) { 2313ce8d5614SIntel printf("Port %d is now not stopped\n", pi); 2314ce8d5614SIntel continue; 2315ce8d5614SIntel } 2316ce8d5614SIntel 2317938a184aSAdrien Mazarguil if (port->flow_list) 2318938a184aSAdrien Mazarguil port_flow_flush(pi); 2319ce8d5614SIntel rte_eth_dev_close(pi); 2320ce8d5614SIntel 23214f1de450SThomas Monjalon remove_invalid_ports(); 232223ea57a2SThomas Monjalon 2323ce8d5614SIntel if (rte_atomic16_cmpset(&(port->port_status), 2324ce8d5614SIntel RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0) 2325b38bb262SPablo de Lara printf("Port %d cannot be set to closed\n", pi); 2326ce8d5614SIntel } 2327ce8d5614SIntel 2328ce8d5614SIntel printf("Done\n"); 2329ce8d5614SIntel } 2330ce8d5614SIntel 2331edab33b1STetsuya Mukawa void 233297f1e196SWei Dai reset_port(portid_t pid) 233397f1e196SWei Dai { 233497f1e196SWei Dai int diag; 233597f1e196SWei Dai portid_t pi; 233697f1e196SWei Dai struct rte_port *port; 233797f1e196SWei Dai 233897f1e196SWei Dai if (port_id_is_invalid(pid, ENABLED_WARN)) 233997f1e196SWei Dai return; 234097f1e196SWei Dai 234197f1e196SWei Dai printf("Resetting ports...\n"); 234297f1e196SWei Dai 234397f1e196SWei Dai RTE_ETH_FOREACH_DEV(pi) { 234497f1e196SWei Dai if (pid != pi && pid != (portid_t)RTE_PORT_ALL) 234597f1e196SWei Dai continue; 234697f1e196SWei Dai 234797f1e196SWei Dai if (port_is_forwarding(pi) != 0 && test_done == 0) { 234897f1e196SWei Dai printf("Please remove port %d from forwarding " 234997f1e196SWei Dai "configuration.\n", pi); 235097f1e196SWei Dai continue; 235197f1e196SWei Dai } 235297f1e196SWei Dai 235397f1e196SWei Dai if (port_is_bonding_slave(pi)) { 235497f1e196SWei Dai printf("Please remove port %d from bonded device.\n", 235597f1e196SWei Dai pi); 235697f1e196SWei Dai continue; 235797f1e196SWei Dai } 235897f1e196SWei Dai 235997f1e196SWei Dai diag = rte_eth_dev_reset(pi); 236097f1e196SWei Dai if (diag == 0) { 236197f1e196SWei Dai port = &ports[pi]; 236297f1e196SWei Dai port->need_reconfig = 1; 236397f1e196SWei Dai port->need_reconfig_queues = 1; 236497f1e196SWei Dai } else { 236597f1e196SWei Dai printf("Failed to reset port %d. diag=%d\n", pi, diag); 236697f1e196SWei Dai } 236797f1e196SWei Dai } 236897f1e196SWei Dai 236997f1e196SWei Dai printf("Done\n"); 237097f1e196SWei Dai } 237197f1e196SWei Dai 237297f1e196SWei Dai void 2373edab33b1STetsuya Mukawa attach_port(char *identifier) 2374ce8d5614SIntel { 23754f1ed78eSThomas Monjalon portid_t pi; 2376c9cce428SThomas Monjalon struct rte_dev_iterator iterator; 2377ce8d5614SIntel 2378edab33b1STetsuya Mukawa printf("Attaching a new port...\n"); 2379edab33b1STetsuya Mukawa 2380edab33b1STetsuya Mukawa if (identifier == NULL) { 2381edab33b1STetsuya Mukawa printf("Invalid parameters are specified\n"); 2382edab33b1STetsuya Mukawa return; 2383ce8d5614SIntel } 2384ce8d5614SIntel 2385c9cce428SThomas Monjalon if (rte_dev_probe(identifier) != 0) { 2386c9cce428SThomas Monjalon TESTPMD_LOG(ERR, "Failed to attach port %s\n", identifier); 2387edab33b1STetsuya Mukawa return; 2388c9cce428SThomas Monjalon } 2389c9cce428SThomas Monjalon 23904f1ed78eSThomas Monjalon /* first attach mode: event */ 23914f1ed78eSThomas Monjalon if (setup_on_probe_event) { 23924f1ed78eSThomas Monjalon /* new ports are detected on RTE_ETH_EVENT_NEW event */ 23934f1ed78eSThomas Monjalon for (pi = 0; pi < RTE_MAX_ETHPORTS; pi++) 23944f1ed78eSThomas Monjalon if (ports[pi].port_status == RTE_PORT_HANDLING && 23954f1ed78eSThomas Monjalon ports[pi].need_setup != 0) 23964f1ed78eSThomas Monjalon setup_attached_port(pi); 23974f1ed78eSThomas Monjalon return; 23984f1ed78eSThomas Monjalon } 23994f1ed78eSThomas Monjalon 24004f1ed78eSThomas Monjalon /* second attach mode: iterator */ 240186fa5de1SThomas Monjalon RTE_ETH_FOREACH_MATCHING_DEV(pi, identifier, &iterator) { 24024f1ed78eSThomas Monjalon /* setup ports matching the devargs used for probing */ 240386fa5de1SThomas Monjalon if (port_is_forwarding(pi)) 240486fa5de1SThomas Monjalon continue; /* port was already attached before */ 2405c9cce428SThomas Monjalon setup_attached_port(pi); 2406c9cce428SThomas Monjalon } 240786fa5de1SThomas Monjalon } 2408c9cce428SThomas Monjalon 2409c9cce428SThomas Monjalon static void 2410c9cce428SThomas Monjalon setup_attached_port(portid_t pi) 2411c9cce428SThomas Monjalon { 2412c9cce428SThomas Monjalon unsigned int socket_id; 2413edab33b1STetsuya Mukawa 2414931126baSBernard Iremonger socket_id = (unsigned)rte_eth_dev_socket_id(pi); 241529841336SPhil Yang /* if socket_id is invalid, set to the first available socket. */ 2416931126baSBernard Iremonger if (check_socket_id(socket_id) < 0) 241729841336SPhil Yang socket_id = socket_ids[0]; 2418931126baSBernard Iremonger reconfig(pi, socket_id); 2419edab33b1STetsuya Mukawa rte_eth_promiscuous_enable(pi); 2420edab33b1STetsuya Mukawa 24214f1de450SThomas Monjalon ports_ids[nb_ports++] = pi; 24224f1de450SThomas Monjalon fwd_ports_ids[nb_fwd_ports++] = pi; 24234f1de450SThomas Monjalon nb_cfg_ports = nb_fwd_ports; 24244f1ed78eSThomas Monjalon ports[pi].need_setup = 0; 2425edab33b1STetsuya Mukawa ports[pi].port_status = RTE_PORT_STOPPED; 2426edab33b1STetsuya Mukawa 2427edab33b1STetsuya Mukawa printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports); 2428edab33b1STetsuya Mukawa printf("Done\n"); 2429edab33b1STetsuya Mukawa } 2430edab33b1STetsuya Mukawa 2431edab33b1STetsuya Mukawa void 2432f8e5baa2SThomas Monjalon detach_port_device(portid_t port_id) 24335f4ec54fSChen Jing D(Mark) { 2434f8e5baa2SThomas Monjalon struct rte_device *dev; 2435f8e5baa2SThomas Monjalon portid_t sibling; 2436f8e5baa2SThomas Monjalon 2437c9cce428SThomas Monjalon printf("Removing a device...\n"); 24385f4ec54fSChen Jing D(Mark) 2439f8e5baa2SThomas Monjalon dev = rte_eth_devices[port_id].device; 2440f8e5baa2SThomas Monjalon if (dev == NULL) { 2441f8e5baa2SThomas Monjalon printf("Device already removed\n"); 2442f8e5baa2SThomas Monjalon return; 2443f8e5baa2SThomas Monjalon } 2444f8e5baa2SThomas Monjalon 244523ea57a2SThomas Monjalon if (ports[port_id].port_status != RTE_PORT_CLOSED) { 24463f4a8370SThomas Monjalon if (ports[port_id].port_status != RTE_PORT_STOPPED) { 24473f4a8370SThomas Monjalon printf("Port not stopped\n"); 2448edab33b1STetsuya Mukawa return; 2449edab33b1STetsuya Mukawa } 24503f4a8370SThomas Monjalon printf("Port was not closed\n"); 2451938a184aSAdrien Mazarguil if (ports[port_id].flow_list) 2452938a184aSAdrien Mazarguil port_flow_flush(port_id); 24533f4a8370SThomas Monjalon } 2454938a184aSAdrien Mazarguil 2455f8e5baa2SThomas Monjalon if (rte_dev_remove(dev) != 0) { 2456f8e5baa2SThomas Monjalon TESTPMD_LOG(ERR, "Failed to detach device %s\n", dev->name); 2457edab33b1STetsuya Mukawa return; 24583070419eSGaetan Rivet } 24597ca262b8SViacheslav Ovsiienko RTE_ETH_FOREACH_DEV_OF(sibling, dev) { 2460f8e5baa2SThomas Monjalon /* reset mapping between old ports and removed device */ 2461f8e5baa2SThomas Monjalon rte_eth_devices[sibling].device = NULL; 2462f8e5baa2SThomas Monjalon if (ports[sibling].port_status != RTE_PORT_CLOSED) { 2463f8e5baa2SThomas Monjalon /* sibling ports are forced to be closed */ 2464f8e5baa2SThomas Monjalon ports[sibling].port_status = RTE_PORT_CLOSED; 2465f8e5baa2SThomas Monjalon printf("Port %u is closed\n", sibling); 2466f8e5baa2SThomas Monjalon } 2467f8e5baa2SThomas Monjalon } 2468f8e5baa2SThomas Monjalon 24694f1de450SThomas Monjalon remove_invalid_ports(); 247003ce2c53SMatan Azrad 2471f8e5baa2SThomas Monjalon printf("Device of port %u is detached\n", port_id); 2472f8e5baa2SThomas Monjalon printf("Now total ports is %d\n", nb_ports); 2473edab33b1STetsuya Mukawa printf("Done\n"); 2474edab33b1STetsuya Mukawa return; 24755f4ec54fSChen Jing D(Mark) } 24765f4ec54fSChen Jing D(Mark) 2477af75078fSIntel void 2478af75078fSIntel pmd_test_exit(void) 2479af75078fSIntel { 2480124909d7SZhiyong Yang struct rte_device *device; 2481af75078fSIntel portid_t pt_id; 2482fb73e096SJeff Guo int ret; 2483401b744dSShahaf Shuler int i; 2484af75078fSIntel 24858210ec25SPablo de Lara if (test_done == 0) 24868210ec25SPablo de Lara stop_packet_forwarding(); 24878210ec25SPablo de Lara 24883a0968c8SShahaf Shuler for (i = 0 ; i < RTE_MAX_NUMA_NODES ; i++) { 24893a0968c8SShahaf Shuler if (mempools[i]) { 24903a0968c8SShahaf Shuler if (mp_alloc_type == MP_ALLOC_ANON) 24913a0968c8SShahaf Shuler rte_mempool_mem_iter(mempools[i], dma_unmap_cb, 24923a0968c8SShahaf Shuler NULL); 24933a0968c8SShahaf Shuler } 24943a0968c8SShahaf Shuler } 2495d3a274ceSZhihong Wang if (ports != NULL) { 2496d3a274ceSZhihong Wang no_link_check = 1; 24977d89b261SGaetan Rivet RTE_ETH_FOREACH_DEV(pt_id) { 249808fd782bSCristian Dumitrescu printf("\nStopping port %d...\n", pt_id); 2499af75078fSIntel fflush(stdout); 2500d3a274ceSZhihong Wang stop_port(pt_id); 250108fd782bSCristian Dumitrescu } 250208fd782bSCristian Dumitrescu RTE_ETH_FOREACH_DEV(pt_id) { 250308fd782bSCristian Dumitrescu printf("\nShutting down port %d...\n", pt_id); 250408fd782bSCristian Dumitrescu fflush(stdout); 2505d3a274ceSZhihong Wang close_port(pt_id); 2506124909d7SZhiyong Yang 2507124909d7SZhiyong Yang /* 2508124909d7SZhiyong Yang * This is a workaround to fix a virtio-user issue that 2509124909d7SZhiyong Yang * requires to call clean-up routine to remove existing 2510124909d7SZhiyong Yang * socket. 2511124909d7SZhiyong Yang * This workaround valid only for testpmd, needs a fix 2512124909d7SZhiyong Yang * valid for all applications. 2513124909d7SZhiyong Yang * TODO: Implement proper resource cleanup 2514124909d7SZhiyong Yang */ 2515124909d7SZhiyong Yang device = rte_eth_devices[pt_id].device; 2516124909d7SZhiyong Yang if (device && !strcmp(device->driver->name, "net_virtio_user")) 2517f8e5baa2SThomas Monjalon detach_port_device(pt_id); 2518af75078fSIntel } 2519d3a274ceSZhihong Wang } 2520fb73e096SJeff Guo 2521fb73e096SJeff Guo if (hot_plug) { 2522fb73e096SJeff Guo ret = rte_dev_event_monitor_stop(); 25232049c511SJeff Guo if (ret) { 2524fb73e096SJeff Guo RTE_LOG(ERR, EAL, 2525fb73e096SJeff Guo "fail to stop device event monitor."); 25262049c511SJeff Guo return; 25272049c511SJeff Guo } 2528fb73e096SJeff Guo 25292049c511SJeff Guo ret = rte_dev_event_callback_unregister(NULL, 2530cc1bf307SJeff Guo dev_event_callback, NULL); 25312049c511SJeff Guo if (ret < 0) { 2532fb73e096SJeff Guo RTE_LOG(ERR, EAL, 25332049c511SJeff Guo "fail to unregister device event callback.\n"); 25342049c511SJeff Guo return; 25352049c511SJeff Guo } 25362049c511SJeff Guo 25372049c511SJeff Guo ret = rte_dev_hotplug_handle_disable(); 25382049c511SJeff Guo if (ret) { 25392049c511SJeff Guo RTE_LOG(ERR, EAL, 25402049c511SJeff Guo "fail to disable hotplug handling.\n"); 25412049c511SJeff Guo return; 25422049c511SJeff Guo } 2543fb73e096SJeff Guo } 2544401b744dSShahaf Shuler for (i = 0 ; i < RTE_MAX_NUMA_NODES ; i++) { 2545401b744dSShahaf Shuler if (mempools[i]) 2546401b744dSShahaf Shuler rte_mempool_free(mempools[i]); 2547401b744dSShahaf Shuler } 2548fb73e096SJeff Guo 2549d3a274ceSZhihong Wang printf("\nBye...\n"); 2550af75078fSIntel } 2551af75078fSIntel 2552af75078fSIntel typedef void (*cmd_func_t)(void); 2553af75078fSIntel struct pmd_test_command { 2554af75078fSIntel const char *cmd_name; 2555af75078fSIntel cmd_func_t cmd_func; 2556af75078fSIntel }; 2557af75078fSIntel 2558af75078fSIntel #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0])) 2559af75078fSIntel 2560ce8d5614SIntel /* Check the link status of all ports in up to 9s, and print them finally */ 2561af75078fSIntel static void 2562edab33b1STetsuya Mukawa check_all_ports_link_status(uint32_t port_mask) 2563af75078fSIntel { 2564ce8d5614SIntel #define CHECK_INTERVAL 100 /* 100ms */ 2565ce8d5614SIntel #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */ 2566f8244c63SZhiyong Yang portid_t portid; 2567f8244c63SZhiyong Yang uint8_t count, all_ports_up, print_flag = 0; 2568ce8d5614SIntel struct rte_eth_link link; 2569ce8d5614SIntel 2570ce8d5614SIntel printf("Checking link statuses...\n"); 2571ce8d5614SIntel fflush(stdout); 2572ce8d5614SIntel for (count = 0; count <= MAX_CHECK_TIME; count++) { 2573ce8d5614SIntel all_ports_up = 1; 25747d89b261SGaetan Rivet RTE_ETH_FOREACH_DEV(portid) { 2575ce8d5614SIntel if ((port_mask & (1 << portid)) == 0) 2576ce8d5614SIntel continue; 2577ce8d5614SIntel memset(&link, 0, sizeof(link)); 2578ce8d5614SIntel rte_eth_link_get_nowait(portid, &link); 2579ce8d5614SIntel /* print link status if flag set */ 2580ce8d5614SIntel if (print_flag == 1) { 2581ce8d5614SIntel if (link.link_status) 2582f8244c63SZhiyong Yang printf( 2583f8244c63SZhiyong Yang "Port%d Link Up. speed %u Mbps- %s\n", 2584f8244c63SZhiyong Yang portid, link.link_speed, 2585ce8d5614SIntel (link.link_duplex == ETH_LINK_FULL_DUPLEX) ? 2586ce8d5614SIntel ("full-duplex") : ("half-duplex\n")); 2587ce8d5614SIntel else 2588f8244c63SZhiyong Yang printf("Port %d Link Down\n", portid); 2589ce8d5614SIntel continue; 2590ce8d5614SIntel } 2591ce8d5614SIntel /* clear all_ports_up flag if any link down */ 259209419f23SThomas Monjalon if (link.link_status == ETH_LINK_DOWN) { 2593ce8d5614SIntel all_ports_up = 0; 2594ce8d5614SIntel break; 2595ce8d5614SIntel } 2596ce8d5614SIntel } 2597ce8d5614SIntel /* after finally printing all link status, get out */ 2598ce8d5614SIntel if (print_flag == 1) 2599ce8d5614SIntel break; 2600ce8d5614SIntel 2601ce8d5614SIntel if (all_ports_up == 0) { 2602ce8d5614SIntel fflush(stdout); 2603ce8d5614SIntel rte_delay_ms(CHECK_INTERVAL); 2604ce8d5614SIntel } 2605ce8d5614SIntel 2606ce8d5614SIntel /* set the print_flag if all ports up or timeout */ 2607ce8d5614SIntel if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) { 2608ce8d5614SIntel print_flag = 1; 2609ce8d5614SIntel } 26108ea656f8SGaetan Rivet 26118ea656f8SGaetan Rivet if (lsc_interrupt) 26128ea656f8SGaetan Rivet break; 2613ce8d5614SIntel } 2614af75078fSIntel } 2615af75078fSIntel 2616cc1bf307SJeff Guo /* 2617cc1bf307SJeff Guo * This callback is for remove a port for a device. It has limitation because 2618cc1bf307SJeff Guo * it is not for multiple port removal for a device. 2619cc1bf307SJeff Guo * TODO: the device detach invoke will plan to be removed from user side to 2620cc1bf307SJeff Guo * eal. And convert all PMDs to free port resources on ether device closing. 2621cc1bf307SJeff Guo */ 2622284c908cSGaetan Rivet static void 2623cc1bf307SJeff Guo rmv_port_callback(void *arg) 2624284c908cSGaetan Rivet { 26253b97888aSMatan Azrad int need_to_start = 0; 26260da2a62bSMatan Azrad int org_no_link_check = no_link_check; 262728caa76aSZhiyong Yang portid_t port_id = (intptr_t)arg; 2628284c908cSGaetan Rivet 2629284c908cSGaetan Rivet RTE_ETH_VALID_PORTID_OR_RET(port_id); 2630284c908cSGaetan Rivet 26313b97888aSMatan Azrad if (!test_done && port_is_forwarding(port_id)) { 26323b97888aSMatan Azrad need_to_start = 1; 26333b97888aSMatan Azrad stop_packet_forwarding(); 26343b97888aSMatan Azrad } 26350da2a62bSMatan Azrad no_link_check = 1; 2636284c908cSGaetan Rivet stop_port(port_id); 26370da2a62bSMatan Azrad no_link_check = org_no_link_check; 2638284c908cSGaetan Rivet close_port(port_id); 2639f8e5baa2SThomas Monjalon detach_port_device(port_id); 26403b97888aSMatan Azrad if (need_to_start) 26413b97888aSMatan Azrad start_packet_forwarding(0); 2642284c908cSGaetan Rivet } 2643284c908cSGaetan Rivet 264476ad4a2dSGaetan Rivet /* This function is used by the interrupt thread */ 2645d6af1a13SBernard Iremonger static int 2646f8244c63SZhiyong Yang eth_event_callback(portid_t port_id, enum rte_eth_event_type type, void *param, 2647d6af1a13SBernard Iremonger void *ret_param) 264876ad4a2dSGaetan Rivet { 264976ad4a2dSGaetan Rivet RTE_SET_USED(param); 2650d6af1a13SBernard Iremonger RTE_SET_USED(ret_param); 265176ad4a2dSGaetan Rivet 265276ad4a2dSGaetan Rivet if (type >= RTE_ETH_EVENT_MAX) { 2653f431e010SHerakliusz Lipiec fprintf(stderr, "\nPort %" PRIu16 ": %s called upon invalid event %d\n", 265476ad4a2dSGaetan Rivet port_id, __func__, type); 265576ad4a2dSGaetan Rivet fflush(stderr); 26563af72783SGaetan Rivet } else if (event_print_mask & (UINT32_C(1) << type)) { 2657f431e010SHerakliusz Lipiec printf("\nPort %" PRIu16 ": %s event\n", port_id, 265897b5d8b5SThomas Monjalon eth_event_desc[type]); 265976ad4a2dSGaetan Rivet fflush(stdout); 266076ad4a2dSGaetan Rivet } 2661284c908cSGaetan Rivet 2662284c908cSGaetan Rivet switch (type) { 26634f1ed78eSThomas Monjalon case RTE_ETH_EVENT_NEW: 26644f1ed78eSThomas Monjalon ports[port_id].need_setup = 1; 26654f1ed78eSThomas Monjalon ports[port_id].port_status = RTE_PORT_HANDLING; 26664f1ed78eSThomas Monjalon break; 2667284c908cSGaetan Rivet case RTE_ETH_EVENT_INTR_RMV: 26684f1ed78eSThomas Monjalon if (port_id_is_invalid(port_id, DISABLED_WARN)) 26694f1ed78eSThomas Monjalon break; 2670284c908cSGaetan Rivet if (rte_eal_alarm_set(100000, 2671cc1bf307SJeff Guo rmv_port_callback, (void *)(intptr_t)port_id)) 2672284c908cSGaetan Rivet fprintf(stderr, "Could not set up deferred device removal\n"); 2673284c908cSGaetan Rivet break; 2674284c908cSGaetan Rivet default: 2675284c908cSGaetan Rivet break; 2676284c908cSGaetan Rivet } 2677d6af1a13SBernard Iremonger return 0; 267876ad4a2dSGaetan Rivet } 267976ad4a2dSGaetan Rivet 268097b5d8b5SThomas Monjalon static int 268197b5d8b5SThomas Monjalon register_eth_event_callback(void) 268297b5d8b5SThomas Monjalon { 268397b5d8b5SThomas Monjalon int ret; 268497b5d8b5SThomas Monjalon enum rte_eth_event_type event; 268597b5d8b5SThomas Monjalon 268697b5d8b5SThomas Monjalon for (event = RTE_ETH_EVENT_UNKNOWN; 268797b5d8b5SThomas Monjalon event < RTE_ETH_EVENT_MAX; event++) { 268897b5d8b5SThomas Monjalon ret = rte_eth_dev_callback_register(RTE_ETH_ALL, 268997b5d8b5SThomas Monjalon event, 269097b5d8b5SThomas Monjalon eth_event_callback, 269197b5d8b5SThomas Monjalon NULL); 269297b5d8b5SThomas Monjalon if (ret != 0) { 269397b5d8b5SThomas Monjalon TESTPMD_LOG(ERR, "Failed to register callback for " 269497b5d8b5SThomas Monjalon "%s event\n", eth_event_desc[event]); 269597b5d8b5SThomas Monjalon return -1; 269697b5d8b5SThomas Monjalon } 269797b5d8b5SThomas Monjalon } 269897b5d8b5SThomas Monjalon 269997b5d8b5SThomas Monjalon return 0; 270097b5d8b5SThomas Monjalon } 270197b5d8b5SThomas Monjalon 2702fb73e096SJeff Guo /* This function is used by the interrupt thread */ 2703fb73e096SJeff Guo static void 2704cc1bf307SJeff Guo dev_event_callback(const char *device_name, enum rte_dev_event_type type, 2705fb73e096SJeff Guo __rte_unused void *arg) 2706fb73e096SJeff Guo { 27072049c511SJeff Guo uint16_t port_id; 27082049c511SJeff Guo int ret; 27092049c511SJeff Guo 2710fb73e096SJeff Guo if (type >= RTE_DEV_EVENT_MAX) { 2711fb73e096SJeff Guo fprintf(stderr, "%s called upon invalid event %d\n", 2712fb73e096SJeff Guo __func__, type); 2713fb73e096SJeff Guo fflush(stderr); 2714fb73e096SJeff Guo } 2715fb73e096SJeff Guo 2716fb73e096SJeff Guo switch (type) { 2717fb73e096SJeff Guo case RTE_DEV_EVENT_REMOVE: 2718cc1bf307SJeff Guo RTE_LOG(DEBUG, EAL, "The device: %s has been removed!\n", 2719fb73e096SJeff Guo device_name); 27202049c511SJeff Guo ret = rte_eth_dev_get_port_by_name(device_name, &port_id); 27212049c511SJeff Guo if (ret) { 27222049c511SJeff Guo RTE_LOG(ERR, EAL, "can not get port by device %s!\n", 27232049c511SJeff Guo device_name); 27242049c511SJeff Guo return; 27252049c511SJeff Guo } 2726cc1bf307SJeff Guo /* 2727cc1bf307SJeff Guo * Because the user's callback is invoked in eal interrupt 2728cc1bf307SJeff Guo * callback, the interrupt callback need to be finished before 2729cc1bf307SJeff Guo * it can be unregistered when detaching device. So finish 2730cc1bf307SJeff Guo * callback soon and use a deferred removal to detach device 2731cc1bf307SJeff Guo * is need. It is a workaround, once the device detaching be 2732cc1bf307SJeff Guo * moved into the eal in the future, the deferred removal could 2733cc1bf307SJeff Guo * be deleted. 2734cc1bf307SJeff Guo */ 2735cc1bf307SJeff Guo if (rte_eal_alarm_set(100000, 2736cc1bf307SJeff Guo rmv_port_callback, (void *)(intptr_t)port_id)) 2737cc1bf307SJeff Guo RTE_LOG(ERR, EAL, 2738cc1bf307SJeff Guo "Could not set up deferred device removal\n"); 2739fb73e096SJeff Guo break; 2740fb73e096SJeff Guo case RTE_DEV_EVENT_ADD: 2741fb73e096SJeff Guo RTE_LOG(ERR, EAL, "The device: %s has been added!\n", 2742fb73e096SJeff Guo device_name); 2743fb73e096SJeff Guo /* TODO: After finish kernel driver binding, 2744fb73e096SJeff Guo * begin to attach port. 2745fb73e096SJeff Guo */ 2746fb73e096SJeff Guo break; 2747fb73e096SJeff Guo default: 2748fb73e096SJeff Guo break; 2749fb73e096SJeff Guo } 2750fb73e096SJeff Guo } 2751fb73e096SJeff Guo 2752013af9b6SIntel static int 275328caa76aSZhiyong Yang set_tx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port) 2754af75078fSIntel { 2755013af9b6SIntel uint16_t i; 2756af75078fSIntel int diag; 2757013af9b6SIntel uint8_t mapping_found = 0; 2758af75078fSIntel 2759013af9b6SIntel for (i = 0; i < nb_tx_queue_stats_mappings; i++) { 2760013af9b6SIntel if ((tx_queue_stats_mappings[i].port_id == port_id) && 2761013af9b6SIntel (tx_queue_stats_mappings[i].queue_id < nb_txq )) { 2762013af9b6SIntel diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id, 2763013af9b6SIntel tx_queue_stats_mappings[i].queue_id, 2764013af9b6SIntel tx_queue_stats_mappings[i].stats_counter_id); 2765013af9b6SIntel if (diag != 0) 2766013af9b6SIntel return diag; 2767013af9b6SIntel mapping_found = 1; 2768af75078fSIntel } 2769013af9b6SIntel } 2770013af9b6SIntel if (mapping_found) 2771013af9b6SIntel port->tx_queue_stats_mapping_enabled = 1; 2772013af9b6SIntel return 0; 2773013af9b6SIntel } 2774013af9b6SIntel 2775013af9b6SIntel static int 277628caa76aSZhiyong Yang set_rx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port) 2777013af9b6SIntel { 2778013af9b6SIntel uint16_t i; 2779013af9b6SIntel int diag; 2780013af9b6SIntel uint8_t mapping_found = 0; 2781013af9b6SIntel 2782013af9b6SIntel for (i = 0; i < nb_rx_queue_stats_mappings; i++) { 2783013af9b6SIntel if ((rx_queue_stats_mappings[i].port_id == port_id) && 2784013af9b6SIntel (rx_queue_stats_mappings[i].queue_id < nb_rxq )) { 2785013af9b6SIntel diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id, 2786013af9b6SIntel rx_queue_stats_mappings[i].queue_id, 2787013af9b6SIntel rx_queue_stats_mappings[i].stats_counter_id); 2788013af9b6SIntel if (diag != 0) 2789013af9b6SIntel return diag; 2790013af9b6SIntel mapping_found = 1; 2791013af9b6SIntel } 2792013af9b6SIntel } 2793013af9b6SIntel if (mapping_found) 2794013af9b6SIntel port->rx_queue_stats_mapping_enabled = 1; 2795013af9b6SIntel return 0; 2796013af9b6SIntel } 2797013af9b6SIntel 2798013af9b6SIntel static void 279928caa76aSZhiyong Yang map_port_queue_stats_mapping_registers(portid_t pi, struct rte_port *port) 2800013af9b6SIntel { 2801013af9b6SIntel int diag = 0; 2802013af9b6SIntel 2803013af9b6SIntel diag = set_tx_queue_stats_mapping_registers(pi, port); 2804af75078fSIntel if (diag != 0) { 2805013af9b6SIntel if (diag == -ENOTSUP) { 2806013af9b6SIntel port->tx_queue_stats_mapping_enabled = 0; 2807013af9b6SIntel printf("TX queue stats mapping not supported port id=%d\n", pi); 2808013af9b6SIntel } 2809013af9b6SIntel else 2810013af9b6SIntel rte_exit(EXIT_FAILURE, 2811013af9b6SIntel "set_tx_queue_stats_mapping_registers " 2812013af9b6SIntel "failed for port id=%d diag=%d\n", 2813af75078fSIntel pi, diag); 2814af75078fSIntel } 2815013af9b6SIntel 2816013af9b6SIntel diag = set_rx_queue_stats_mapping_registers(pi, port); 2817af75078fSIntel if (diag != 0) { 2818013af9b6SIntel if (diag == -ENOTSUP) { 2819013af9b6SIntel port->rx_queue_stats_mapping_enabled = 0; 2820013af9b6SIntel printf("RX queue stats mapping not supported port id=%d\n", pi); 2821013af9b6SIntel } 2822013af9b6SIntel else 2823013af9b6SIntel rte_exit(EXIT_FAILURE, 2824013af9b6SIntel "set_rx_queue_stats_mapping_registers " 2825013af9b6SIntel "failed for port id=%d diag=%d\n", 2826af75078fSIntel pi, diag); 2827af75078fSIntel } 2828af75078fSIntel } 2829af75078fSIntel 2830f2c5125aSPablo de Lara static void 2831f2c5125aSPablo de Lara rxtx_port_config(struct rte_port *port) 2832f2c5125aSPablo de Lara { 2833d44f8a48SQi Zhang uint16_t qid; 28345e91aeefSWei Zhao uint64_t offloads; 2835f2c5125aSPablo de Lara 2836d44f8a48SQi Zhang for (qid = 0; qid < nb_rxq; qid++) { 28375e91aeefSWei Zhao offloads = port->rx_conf[qid].offloads; 2838d44f8a48SQi Zhang port->rx_conf[qid] = port->dev_info.default_rxconf; 28395e91aeefSWei Zhao port->rx_conf[qid].offloads |= offloads; 2840d44f8a48SQi Zhang 2841d44f8a48SQi Zhang /* Check if any Rx parameters have been passed */ 2842f2c5125aSPablo de Lara if (rx_pthresh != RTE_PMD_PARAM_UNSET) 2843d44f8a48SQi Zhang port->rx_conf[qid].rx_thresh.pthresh = rx_pthresh; 2844f2c5125aSPablo de Lara 2845f2c5125aSPablo de Lara if (rx_hthresh != RTE_PMD_PARAM_UNSET) 2846d44f8a48SQi Zhang port->rx_conf[qid].rx_thresh.hthresh = rx_hthresh; 2847f2c5125aSPablo de Lara 2848f2c5125aSPablo de Lara if (rx_wthresh != RTE_PMD_PARAM_UNSET) 2849d44f8a48SQi Zhang port->rx_conf[qid].rx_thresh.wthresh = rx_wthresh; 2850f2c5125aSPablo de Lara 2851f2c5125aSPablo de Lara if (rx_free_thresh != RTE_PMD_PARAM_UNSET) 2852d44f8a48SQi Zhang port->rx_conf[qid].rx_free_thresh = rx_free_thresh; 2853f2c5125aSPablo de Lara 2854f2c5125aSPablo de Lara if (rx_drop_en != RTE_PMD_PARAM_UNSET) 2855d44f8a48SQi Zhang port->rx_conf[qid].rx_drop_en = rx_drop_en; 2856f2c5125aSPablo de Lara 2857d44f8a48SQi Zhang port->nb_rx_desc[qid] = nb_rxd; 2858d44f8a48SQi Zhang } 2859d44f8a48SQi Zhang 2860d44f8a48SQi Zhang for (qid = 0; qid < nb_txq; qid++) { 28615e91aeefSWei Zhao offloads = port->tx_conf[qid].offloads; 2862d44f8a48SQi Zhang port->tx_conf[qid] = port->dev_info.default_txconf; 28635e91aeefSWei Zhao port->tx_conf[qid].offloads |= offloads; 2864d44f8a48SQi Zhang 2865d44f8a48SQi Zhang /* Check if any Tx parameters have been passed */ 2866f2c5125aSPablo de Lara if (tx_pthresh != RTE_PMD_PARAM_UNSET) 2867d44f8a48SQi Zhang port->tx_conf[qid].tx_thresh.pthresh = tx_pthresh; 2868f2c5125aSPablo de Lara 2869f2c5125aSPablo de Lara if (tx_hthresh != RTE_PMD_PARAM_UNSET) 2870d44f8a48SQi Zhang port->tx_conf[qid].tx_thresh.hthresh = tx_hthresh; 2871f2c5125aSPablo de Lara 2872f2c5125aSPablo de Lara if (tx_wthresh != RTE_PMD_PARAM_UNSET) 2873d44f8a48SQi Zhang port->tx_conf[qid].tx_thresh.wthresh = tx_wthresh; 2874f2c5125aSPablo de Lara 2875f2c5125aSPablo de Lara if (tx_rs_thresh != RTE_PMD_PARAM_UNSET) 2876d44f8a48SQi Zhang port->tx_conf[qid].tx_rs_thresh = tx_rs_thresh; 2877f2c5125aSPablo de Lara 2878f2c5125aSPablo de Lara if (tx_free_thresh != RTE_PMD_PARAM_UNSET) 2879d44f8a48SQi Zhang port->tx_conf[qid].tx_free_thresh = tx_free_thresh; 2880d44f8a48SQi Zhang 2881d44f8a48SQi Zhang port->nb_tx_desc[qid] = nb_txd; 2882d44f8a48SQi Zhang } 2883f2c5125aSPablo de Lara } 2884f2c5125aSPablo de Lara 2885013af9b6SIntel void 2886013af9b6SIntel init_port_config(void) 2887013af9b6SIntel { 2888013af9b6SIntel portid_t pid; 2889013af9b6SIntel struct rte_port *port; 2890013af9b6SIntel 28917d89b261SGaetan Rivet RTE_ETH_FOREACH_DEV(pid) { 2892013af9b6SIntel port = &ports[pid]; 2893013af9b6SIntel port->dev_conf.fdir_conf = fdir_conf; 2894422515b9SAdrien Mazarguil rte_eth_dev_info_get(pid, &port->dev_info); 28953ce690d3SBruce Richardson if (nb_rxq > 1) { 2896013af9b6SIntel port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL; 289790892962SQi Zhang port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 2898422515b9SAdrien Mazarguil rss_hf & port->dev_info.flow_type_rss_offloads; 2899af75078fSIntel } else { 2900013af9b6SIntel port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL; 2901013af9b6SIntel port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0; 2902af75078fSIntel } 29033ce690d3SBruce Richardson 29045f592039SJingjing Wu if (port->dcb_flag == 0) { 29053ce690d3SBruce Richardson if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0) 29063ce690d3SBruce Richardson port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_RSS; 29073ce690d3SBruce Richardson else 29083ce690d3SBruce Richardson port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE; 29093ce690d3SBruce Richardson } 29103ce690d3SBruce Richardson 2911f2c5125aSPablo de Lara rxtx_port_config(port); 2912013af9b6SIntel 2913013af9b6SIntel rte_eth_macaddr_get(pid, &port->eth_addr); 2914013af9b6SIntel 2915013af9b6SIntel map_port_queue_stats_mapping_registers(pid, port); 291650c4440eSThomas Monjalon #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS 2917e261265eSRadu Nicolau rte_pmd_ixgbe_bypass_init(pid); 29187b7e5ba7SIntel #endif 29198ea656f8SGaetan Rivet 29208ea656f8SGaetan Rivet if (lsc_interrupt && 29218ea656f8SGaetan Rivet (rte_eth_devices[pid].data->dev_flags & 29228ea656f8SGaetan Rivet RTE_ETH_DEV_INTR_LSC)) 29238ea656f8SGaetan Rivet port->dev_conf.intr_conf.lsc = 1; 2924284c908cSGaetan Rivet if (rmv_interrupt && 2925284c908cSGaetan Rivet (rte_eth_devices[pid].data->dev_flags & 2926284c908cSGaetan Rivet RTE_ETH_DEV_INTR_RMV)) 2927284c908cSGaetan Rivet port->dev_conf.intr_conf.rmv = 1; 2928013af9b6SIntel } 2929013af9b6SIntel } 2930013af9b6SIntel 293141b05095SBernard Iremonger void set_port_slave_flag(portid_t slave_pid) 293241b05095SBernard Iremonger { 293341b05095SBernard Iremonger struct rte_port *port; 293441b05095SBernard Iremonger 293541b05095SBernard Iremonger port = &ports[slave_pid]; 293641b05095SBernard Iremonger port->slave_flag = 1; 293741b05095SBernard Iremonger } 293841b05095SBernard Iremonger 293941b05095SBernard Iremonger void clear_port_slave_flag(portid_t slave_pid) 294041b05095SBernard Iremonger { 294141b05095SBernard Iremonger struct rte_port *port; 294241b05095SBernard Iremonger 294341b05095SBernard Iremonger port = &ports[slave_pid]; 294441b05095SBernard Iremonger port->slave_flag = 0; 294541b05095SBernard Iremonger } 294641b05095SBernard Iremonger 29470e545d30SBernard Iremonger uint8_t port_is_bonding_slave(portid_t slave_pid) 29480e545d30SBernard Iremonger { 29490e545d30SBernard Iremonger struct rte_port *port; 29500e545d30SBernard Iremonger 29510e545d30SBernard Iremonger port = &ports[slave_pid]; 2952b8b8b344SMatan Azrad if ((rte_eth_devices[slave_pid].data->dev_flags & 2953b8b8b344SMatan Azrad RTE_ETH_DEV_BONDED_SLAVE) || (port->slave_flag == 1)) 2954b8b8b344SMatan Azrad return 1; 2955b8b8b344SMatan Azrad return 0; 29560e545d30SBernard Iremonger } 29570e545d30SBernard Iremonger 2958013af9b6SIntel const uint16_t vlan_tags[] = { 2959013af9b6SIntel 0, 1, 2, 3, 4, 5, 6, 7, 2960013af9b6SIntel 8, 9, 10, 11, 12, 13, 14, 15, 2961013af9b6SIntel 16, 17, 18, 19, 20, 21, 22, 23, 2962013af9b6SIntel 24, 25, 26, 27, 28, 29, 30, 31 2963013af9b6SIntel }; 2964013af9b6SIntel 2965013af9b6SIntel static int 2966ac7c491cSKonstantin Ananyev get_eth_dcb_conf(portid_t pid, struct rte_eth_conf *eth_conf, 29671a572499SJingjing Wu enum dcb_mode_enable dcb_mode, 29681a572499SJingjing Wu enum rte_eth_nb_tcs num_tcs, 29691a572499SJingjing Wu uint8_t pfc_en) 2970013af9b6SIntel { 2971013af9b6SIntel uint8_t i; 2972ac7c491cSKonstantin Ananyev int32_t rc; 2973ac7c491cSKonstantin Ananyev struct rte_eth_rss_conf rss_conf; 2974af75078fSIntel 2975af75078fSIntel /* 2976013af9b6SIntel * Builds up the correct configuration for dcb+vt based on the vlan tags array 2977013af9b6SIntel * given above, and the number of traffic classes available for use. 2978af75078fSIntel */ 29791a572499SJingjing Wu if (dcb_mode == DCB_VT_ENABLED) { 29801a572499SJingjing Wu struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf = 29811a572499SJingjing Wu ð_conf->rx_adv_conf.vmdq_dcb_conf; 29821a572499SJingjing Wu struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf = 29831a572499SJingjing Wu ð_conf->tx_adv_conf.vmdq_dcb_tx_conf; 2984013af9b6SIntel 2985547d946cSNirmoy Das /* VMDQ+DCB RX and TX configurations */ 29861a572499SJingjing Wu vmdq_rx_conf->enable_default_pool = 0; 29871a572499SJingjing Wu vmdq_rx_conf->default_pool = 0; 29881a572499SJingjing Wu vmdq_rx_conf->nb_queue_pools = 29891a572499SJingjing Wu (num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS); 29901a572499SJingjing Wu vmdq_tx_conf->nb_queue_pools = 29911a572499SJingjing Wu (num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS); 2992013af9b6SIntel 29931a572499SJingjing Wu vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools; 29941a572499SJingjing Wu for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) { 29951a572499SJingjing Wu vmdq_rx_conf->pool_map[i].vlan_id = vlan_tags[i]; 29961a572499SJingjing Wu vmdq_rx_conf->pool_map[i].pools = 29971a572499SJingjing Wu 1 << (i % vmdq_rx_conf->nb_queue_pools); 2998af75078fSIntel } 2999013af9b6SIntel for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) { 3000f59908feSWei Dai vmdq_rx_conf->dcb_tc[i] = i % num_tcs; 3001f59908feSWei Dai vmdq_tx_conf->dcb_tc[i] = i % num_tcs; 3002013af9b6SIntel } 3003013af9b6SIntel 3004013af9b6SIntel /* set DCB mode of RX and TX of multiple queues */ 300532e7aa0bSIntel eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB; 300632e7aa0bSIntel eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB; 30071a572499SJingjing Wu } else { 30081a572499SJingjing Wu struct rte_eth_dcb_rx_conf *rx_conf = 30091a572499SJingjing Wu ð_conf->rx_adv_conf.dcb_rx_conf; 30101a572499SJingjing Wu struct rte_eth_dcb_tx_conf *tx_conf = 30111a572499SJingjing Wu ð_conf->tx_adv_conf.dcb_tx_conf; 3012013af9b6SIntel 3013ac7c491cSKonstantin Ananyev rc = rte_eth_dev_rss_hash_conf_get(pid, &rss_conf); 3014ac7c491cSKonstantin Ananyev if (rc != 0) 3015ac7c491cSKonstantin Ananyev return rc; 3016ac7c491cSKonstantin Ananyev 30171a572499SJingjing Wu rx_conf->nb_tcs = num_tcs; 30181a572499SJingjing Wu tx_conf->nb_tcs = num_tcs; 30191a572499SJingjing Wu 3020bcd0e432SJingjing Wu for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) { 3021bcd0e432SJingjing Wu rx_conf->dcb_tc[i] = i % num_tcs; 3022bcd0e432SJingjing Wu tx_conf->dcb_tc[i] = i % num_tcs; 3023013af9b6SIntel } 3024ac7c491cSKonstantin Ananyev 30251a572499SJingjing Wu eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB_RSS; 3026ac7c491cSKonstantin Ananyev eth_conf->rx_adv_conf.rss_conf = rss_conf; 302732e7aa0bSIntel eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB; 30281a572499SJingjing Wu } 30291a572499SJingjing Wu 30301a572499SJingjing Wu if (pfc_en) 30311a572499SJingjing Wu eth_conf->dcb_capability_en = 30321a572499SJingjing Wu ETH_DCB_PG_SUPPORT | ETH_DCB_PFC_SUPPORT; 3033013af9b6SIntel else 3034013af9b6SIntel eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT; 3035013af9b6SIntel 3036013af9b6SIntel return 0; 3037013af9b6SIntel } 3038013af9b6SIntel 3039013af9b6SIntel int 30401a572499SJingjing Wu init_port_dcb_config(portid_t pid, 30411a572499SJingjing Wu enum dcb_mode_enable dcb_mode, 30421a572499SJingjing Wu enum rte_eth_nb_tcs num_tcs, 30431a572499SJingjing Wu uint8_t pfc_en) 3044013af9b6SIntel { 3045013af9b6SIntel struct rte_eth_conf port_conf; 3046013af9b6SIntel struct rte_port *rte_port; 3047013af9b6SIntel int retval; 3048013af9b6SIntel uint16_t i; 3049013af9b6SIntel 30502a977b89SWenzhuo Lu rte_port = &ports[pid]; 3051013af9b6SIntel 3052013af9b6SIntel memset(&port_conf, 0, sizeof(struct rte_eth_conf)); 3053013af9b6SIntel /* Enter DCB configuration status */ 3054013af9b6SIntel dcb_config = 1; 3055013af9b6SIntel 3056d5354e89SYanglong Wu port_conf.rxmode = rte_port->dev_conf.rxmode; 3057d5354e89SYanglong Wu port_conf.txmode = rte_port->dev_conf.txmode; 3058d5354e89SYanglong Wu 3059013af9b6SIntel /*set configuration of DCB in vt mode and DCB in non-vt mode*/ 3060ac7c491cSKonstantin Ananyev retval = get_eth_dcb_conf(pid, &port_conf, dcb_mode, num_tcs, pfc_en); 3061013af9b6SIntel if (retval < 0) 3062013af9b6SIntel return retval; 30630074d02fSShahaf Shuler port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER; 3064013af9b6SIntel 30652f203d44SQi Zhang /* re-configure the device . */ 30662b0e0ebaSChenbo Xia retval = rte_eth_dev_configure(pid, nb_rxq, nb_rxq, &port_conf); 30672b0e0ebaSChenbo Xia if (retval < 0) 30682b0e0ebaSChenbo Xia return retval; 30692a977b89SWenzhuo Lu rte_eth_dev_info_get(pid, &rte_port->dev_info); 30702a977b89SWenzhuo Lu 30712a977b89SWenzhuo Lu /* If dev_info.vmdq_pool_base is greater than 0, 30722a977b89SWenzhuo Lu * the queue id of vmdq pools is started after pf queues. 30732a977b89SWenzhuo Lu */ 30742a977b89SWenzhuo Lu if (dcb_mode == DCB_VT_ENABLED && 30752a977b89SWenzhuo Lu rte_port->dev_info.vmdq_pool_base > 0) { 30762a977b89SWenzhuo Lu printf("VMDQ_DCB multi-queue mode is nonsensical" 30772a977b89SWenzhuo Lu " for port %d.", pid); 30782a977b89SWenzhuo Lu return -1; 30792a977b89SWenzhuo Lu } 30802a977b89SWenzhuo Lu 30812a977b89SWenzhuo Lu /* Assume the ports in testpmd have the same dcb capability 30822a977b89SWenzhuo Lu * and has the same number of rxq and txq in dcb mode 30832a977b89SWenzhuo Lu */ 30842a977b89SWenzhuo Lu if (dcb_mode == DCB_VT_ENABLED) { 308586ef65eeSBernard Iremonger if (rte_port->dev_info.max_vfs > 0) { 308686ef65eeSBernard Iremonger nb_rxq = rte_port->dev_info.nb_rx_queues; 308786ef65eeSBernard Iremonger nb_txq = rte_port->dev_info.nb_tx_queues; 308886ef65eeSBernard Iremonger } else { 30892a977b89SWenzhuo Lu nb_rxq = rte_port->dev_info.max_rx_queues; 30902a977b89SWenzhuo Lu nb_txq = rte_port->dev_info.max_tx_queues; 309186ef65eeSBernard Iremonger } 30922a977b89SWenzhuo Lu } else { 30932a977b89SWenzhuo Lu /*if vt is disabled, use all pf queues */ 30942a977b89SWenzhuo Lu if (rte_port->dev_info.vmdq_pool_base == 0) { 30952a977b89SWenzhuo Lu nb_rxq = rte_port->dev_info.max_rx_queues; 30962a977b89SWenzhuo Lu nb_txq = rte_port->dev_info.max_tx_queues; 30972a977b89SWenzhuo Lu } else { 30982a977b89SWenzhuo Lu nb_rxq = (queueid_t)num_tcs; 30992a977b89SWenzhuo Lu nb_txq = (queueid_t)num_tcs; 31002a977b89SWenzhuo Lu 31012a977b89SWenzhuo Lu } 31022a977b89SWenzhuo Lu } 31032a977b89SWenzhuo Lu rx_free_thresh = 64; 31042a977b89SWenzhuo Lu 3105013af9b6SIntel memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf)); 3106013af9b6SIntel 3107f2c5125aSPablo de Lara rxtx_port_config(rte_port); 3108013af9b6SIntel /* VLAN filter */ 31090074d02fSShahaf Shuler rte_port->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER; 31101a572499SJingjing Wu for (i = 0; i < RTE_DIM(vlan_tags); i++) 3111013af9b6SIntel rx_vft_set(pid, vlan_tags[i], 1); 3112013af9b6SIntel 3113013af9b6SIntel rte_eth_macaddr_get(pid, &rte_port->eth_addr); 3114013af9b6SIntel map_port_queue_stats_mapping_registers(pid, rte_port); 3115013af9b6SIntel 31167741e4cfSIntel rte_port->dcb_flag = 1; 31177741e4cfSIntel 3118013af9b6SIntel return 0; 3119af75078fSIntel } 3120af75078fSIntel 3121ffc468ffSTetsuya Mukawa static void 3122ffc468ffSTetsuya Mukawa init_port(void) 3123ffc468ffSTetsuya Mukawa { 3124ffc468ffSTetsuya Mukawa /* Configuration of Ethernet ports. */ 3125ffc468ffSTetsuya Mukawa ports = rte_zmalloc("testpmd: ports", 3126ffc468ffSTetsuya Mukawa sizeof(struct rte_port) * RTE_MAX_ETHPORTS, 3127ffc468ffSTetsuya Mukawa RTE_CACHE_LINE_SIZE); 3128ffc468ffSTetsuya Mukawa if (ports == NULL) { 3129ffc468ffSTetsuya Mukawa rte_exit(EXIT_FAILURE, 3130ffc468ffSTetsuya Mukawa "rte_zmalloc(%d struct rte_port) failed\n", 3131ffc468ffSTetsuya Mukawa RTE_MAX_ETHPORTS); 3132ffc468ffSTetsuya Mukawa } 313329841336SPhil Yang 313429841336SPhil Yang /* Initialize ports NUMA structures */ 313529841336SPhil Yang memset(port_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS); 313629841336SPhil Yang memset(rxring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS); 313729841336SPhil Yang memset(txring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS); 3138ffc468ffSTetsuya Mukawa } 3139ffc468ffSTetsuya Mukawa 3140d3a274ceSZhihong Wang static void 3141d3a274ceSZhihong Wang force_quit(void) 3142d3a274ceSZhihong Wang { 3143d3a274ceSZhihong Wang pmd_test_exit(); 3144d3a274ceSZhihong Wang prompt_exit(); 3145d3a274ceSZhihong Wang } 3146d3a274ceSZhihong Wang 3147d3a274ceSZhihong Wang static void 3148cfea1f30SPablo de Lara print_stats(void) 3149cfea1f30SPablo de Lara { 3150cfea1f30SPablo de Lara uint8_t i; 3151cfea1f30SPablo de Lara const char clr[] = { 27, '[', '2', 'J', '\0' }; 3152cfea1f30SPablo de Lara const char top_left[] = { 27, '[', '1', ';', '1', 'H', '\0' }; 3153cfea1f30SPablo de Lara 3154cfea1f30SPablo de Lara /* Clear screen and move to top left */ 3155cfea1f30SPablo de Lara printf("%s%s", clr, top_left); 3156cfea1f30SPablo de Lara 3157cfea1f30SPablo de Lara printf("\nPort statistics ===================================="); 3158cfea1f30SPablo de Lara for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) 3159cfea1f30SPablo de Lara nic_stats_display(fwd_ports_ids[i]); 3160683d1e82SIgor Romanov 3161683d1e82SIgor Romanov fflush(stdout); 3162cfea1f30SPablo de Lara } 3163cfea1f30SPablo de Lara 3164cfea1f30SPablo de Lara static void 3165d3a274ceSZhihong Wang signal_handler(int signum) 3166d3a274ceSZhihong Wang { 3167d3a274ceSZhihong Wang if (signum == SIGINT || signum == SIGTERM) { 3168d3a274ceSZhihong Wang printf("\nSignal %d received, preparing to exit...\n", 3169d3a274ceSZhihong Wang signum); 3170102b7329SReshma Pattan #ifdef RTE_LIBRTE_PDUMP 3171102b7329SReshma Pattan /* uninitialize packet capture framework */ 3172102b7329SReshma Pattan rte_pdump_uninit(); 3173102b7329SReshma Pattan #endif 317462d3216dSReshma Pattan #ifdef RTE_LIBRTE_LATENCY_STATS 317562d3216dSReshma Pattan rte_latencystats_uninit(); 317662d3216dSReshma Pattan #endif 3177d3a274ceSZhihong Wang force_quit(); 3178d9a191a0SPhil Yang /* Set flag to indicate the force termination. */ 3179d9a191a0SPhil Yang f_quit = 1; 3180d3a274ceSZhihong Wang /* exit with the expected status */ 3181d3a274ceSZhihong Wang signal(signum, SIG_DFL); 3182d3a274ceSZhihong Wang kill(getpid(), signum); 3183d3a274ceSZhihong Wang } 3184d3a274ceSZhihong Wang } 3185d3a274ceSZhihong Wang 3186af75078fSIntel int 3187af75078fSIntel main(int argc, char** argv) 3188af75078fSIntel { 3189af75078fSIntel int diag; 3190f8244c63SZhiyong Yang portid_t port_id; 31914918a357SXiaoyun Li uint16_t count; 3192fb73e096SJeff Guo int ret; 3193af75078fSIntel 3194d3a274ceSZhihong Wang signal(SIGINT, signal_handler); 3195d3a274ceSZhihong Wang signal(SIGTERM, signal_handler); 3196d3a274ceSZhihong Wang 3197af75078fSIntel diag = rte_eal_init(argc, argv); 3198af75078fSIntel if (diag < 0) 3199af75078fSIntel rte_panic("Cannot init EAL\n"); 3200af75078fSIntel 3201285fd101SOlivier Matz testpmd_logtype = rte_log_register("testpmd"); 3202285fd101SOlivier Matz if (testpmd_logtype < 0) 3203285fd101SOlivier Matz rte_panic("Cannot register log type"); 3204285fd101SOlivier Matz rte_log_set_level(testpmd_logtype, RTE_LOG_DEBUG); 3205285fd101SOlivier Matz 320697b5d8b5SThomas Monjalon ret = register_eth_event_callback(); 320797b5d8b5SThomas Monjalon if (ret != 0) 320897b5d8b5SThomas Monjalon rte_panic("Cannot register for ethdev events"); 320997b5d8b5SThomas Monjalon 32104aa0d012SAnatoly Burakov #ifdef RTE_LIBRTE_PDUMP 32114aa0d012SAnatoly Burakov /* initialize packet capture framework */ 3212e9436f54STiwei Bie rte_pdump_init(); 32134aa0d012SAnatoly Burakov #endif 32144aa0d012SAnatoly Burakov 32154918a357SXiaoyun Li count = 0; 32164918a357SXiaoyun Li RTE_ETH_FOREACH_DEV(port_id) { 32174918a357SXiaoyun Li ports_ids[count] = port_id; 32184918a357SXiaoyun Li count++; 32194918a357SXiaoyun Li } 32204918a357SXiaoyun Li nb_ports = (portid_t) count; 32214aa0d012SAnatoly Burakov if (nb_ports == 0) 32224aa0d012SAnatoly Burakov TESTPMD_LOG(WARNING, "No probed ethernet devices\n"); 32234aa0d012SAnatoly Burakov 32244aa0d012SAnatoly Burakov /* allocate port structures, and init them */ 32254aa0d012SAnatoly Burakov init_port(); 32264aa0d012SAnatoly Burakov 32274aa0d012SAnatoly Burakov set_def_fwd_config(); 32284aa0d012SAnatoly Burakov if (nb_lcores == 0) 32294aa0d012SAnatoly Burakov rte_panic("Empty set of forwarding logical cores - check the " 32304aa0d012SAnatoly Burakov "core mask supplied in the command parameters\n"); 32314aa0d012SAnatoly Burakov 3232e505d84cSAnatoly Burakov /* Bitrate/latency stats disabled by default */ 3233e505d84cSAnatoly Burakov #ifdef RTE_LIBRTE_BITRATE 3234e505d84cSAnatoly Burakov bitrate_enabled = 0; 3235e505d84cSAnatoly Burakov #endif 3236e505d84cSAnatoly Burakov #ifdef RTE_LIBRTE_LATENCY_STATS 3237e505d84cSAnatoly Burakov latencystats_enabled = 0; 3238e505d84cSAnatoly Burakov #endif 3239e505d84cSAnatoly Burakov 3240fb7b8b32SAnatoly Burakov /* on FreeBSD, mlockall() is disabled by default */ 32415fbc1d49SBruce Richardson #ifdef RTE_EXEC_ENV_FREEBSD 3242fb7b8b32SAnatoly Burakov do_mlockall = 0; 3243fb7b8b32SAnatoly Burakov #else 3244fb7b8b32SAnatoly Burakov do_mlockall = 1; 3245fb7b8b32SAnatoly Burakov #endif 3246fb7b8b32SAnatoly Burakov 3247e505d84cSAnatoly Burakov argc -= diag; 3248e505d84cSAnatoly Burakov argv += diag; 3249e505d84cSAnatoly Burakov if (argc > 1) 3250e505d84cSAnatoly Burakov launch_args_parse(argc, argv); 3251e505d84cSAnatoly Burakov 3252e505d84cSAnatoly Burakov if (do_mlockall && mlockall(MCL_CURRENT | MCL_FUTURE)) { 3253285fd101SOlivier Matz TESTPMD_LOG(NOTICE, "mlockall() failed with error \"%s\"\n", 32541c036b16SEelco Chaudron strerror(errno)); 32551c036b16SEelco Chaudron } 32561c036b16SEelco Chaudron 325799cabef0SPablo de Lara if (tx_first && interactive) 325899cabef0SPablo de Lara rte_exit(EXIT_FAILURE, "--tx-first cannot be used on " 325999cabef0SPablo de Lara "interactive mode.\n"); 32608820cba4SDavid Hunt 32618820cba4SDavid Hunt if (tx_first && lsc_interrupt) { 32628820cba4SDavid Hunt printf("Warning: lsc_interrupt needs to be off when " 32638820cba4SDavid Hunt " using tx_first. Disabling.\n"); 32648820cba4SDavid Hunt lsc_interrupt = 0; 32658820cba4SDavid Hunt } 32668820cba4SDavid Hunt 32675a8fb55cSReshma Pattan if (!nb_rxq && !nb_txq) 32685a8fb55cSReshma Pattan printf("Warning: Either rx or tx queues should be non-zero\n"); 32695a8fb55cSReshma Pattan 32705a8fb55cSReshma Pattan if (nb_rxq > 1 && nb_rxq > nb_txq) 3271af75078fSIntel printf("Warning: nb_rxq=%d enables RSS configuration, " 3272af75078fSIntel "but nb_txq=%d will prevent to fully test it.\n", 3273af75078fSIntel nb_rxq, nb_txq); 3274af75078fSIntel 3275af75078fSIntel init_config(); 3276fb73e096SJeff Guo 3277fb73e096SJeff Guo if (hot_plug) { 32782049c511SJeff Guo ret = rte_dev_hotplug_handle_enable(); 3279fb73e096SJeff Guo if (ret) { 32802049c511SJeff Guo RTE_LOG(ERR, EAL, 32812049c511SJeff Guo "fail to enable hotplug handling."); 3282fb73e096SJeff Guo return -1; 3283fb73e096SJeff Guo } 3284fb73e096SJeff Guo 32852049c511SJeff Guo ret = rte_dev_event_monitor_start(); 32862049c511SJeff Guo if (ret) { 32872049c511SJeff Guo RTE_LOG(ERR, EAL, 32882049c511SJeff Guo "fail to start device event monitoring."); 32892049c511SJeff Guo return -1; 32902049c511SJeff Guo } 32912049c511SJeff Guo 32922049c511SJeff Guo ret = rte_dev_event_callback_register(NULL, 3293cc1bf307SJeff Guo dev_event_callback, NULL); 32942049c511SJeff Guo if (ret) { 32952049c511SJeff Guo RTE_LOG(ERR, EAL, 32962049c511SJeff Guo "fail to register device event callback\n"); 32972049c511SJeff Guo return -1; 32982049c511SJeff Guo } 3299fb73e096SJeff Guo } 3300fb73e096SJeff Guo 3301148f963fSBruce Richardson if (start_port(RTE_PORT_ALL) != 0) 3302148f963fSBruce Richardson rte_exit(EXIT_FAILURE, "Start ports failed\n"); 3303af75078fSIntel 3304ce8d5614SIntel /* set all ports to promiscuous mode by default */ 33057d89b261SGaetan Rivet RTE_ETH_FOREACH_DEV(port_id) 3306ce8d5614SIntel rte_eth_promiscuous_enable(port_id); 3307af75078fSIntel 33087e4441c8SRemy Horton /* Init metrics library */ 33097e4441c8SRemy Horton rte_metrics_init(rte_socket_id()); 33107e4441c8SRemy Horton 331162d3216dSReshma Pattan #ifdef RTE_LIBRTE_LATENCY_STATS 331262d3216dSReshma Pattan if (latencystats_enabled != 0) { 331362d3216dSReshma Pattan int ret = rte_latencystats_init(1, NULL); 331462d3216dSReshma Pattan if (ret) 331562d3216dSReshma Pattan printf("Warning: latencystats init()" 331662d3216dSReshma Pattan " returned error %d\n", ret); 331762d3216dSReshma Pattan printf("Latencystats running on lcore %d\n", 331862d3216dSReshma Pattan latencystats_lcore_id); 331962d3216dSReshma Pattan } 332062d3216dSReshma Pattan #endif 332162d3216dSReshma Pattan 33227e4441c8SRemy Horton /* Setup bitrate stats */ 33237e4441c8SRemy Horton #ifdef RTE_LIBRTE_BITRATE 3324e25e6c70SRemy Horton if (bitrate_enabled != 0) { 33257e4441c8SRemy Horton bitrate_data = rte_stats_bitrate_create(); 33267e4441c8SRemy Horton if (bitrate_data == NULL) 3327e25e6c70SRemy Horton rte_exit(EXIT_FAILURE, 3328e25e6c70SRemy Horton "Could not allocate bitrate data.\n"); 33297e4441c8SRemy Horton rte_stats_bitrate_reg(bitrate_data); 3330e25e6c70SRemy Horton } 33317e4441c8SRemy Horton #endif 33327e4441c8SRemy Horton 33330d56cb81SThomas Monjalon #ifdef RTE_LIBRTE_CMDLINE 333481ef862bSAllain Legacy if (strlen(cmdline_filename) != 0) 333581ef862bSAllain Legacy cmdline_read_from_file(cmdline_filename); 333681ef862bSAllain Legacy 3337ca7feb22SCyril Chemparathy if (interactive == 1) { 3338ca7feb22SCyril Chemparathy if (auto_start) { 3339ca7feb22SCyril Chemparathy printf("Start automatic packet forwarding\n"); 3340ca7feb22SCyril Chemparathy start_packet_forwarding(0); 3341ca7feb22SCyril Chemparathy } 3342af75078fSIntel prompt(); 33430de738cfSJiayu Hu pmd_test_exit(); 3344ca7feb22SCyril Chemparathy } else 33450d56cb81SThomas Monjalon #endif 33460d56cb81SThomas Monjalon { 3347af75078fSIntel char c; 3348af75078fSIntel int rc; 3349af75078fSIntel 3350d9a191a0SPhil Yang f_quit = 0; 3351d9a191a0SPhil Yang 3352af75078fSIntel printf("No commandline core given, start packet forwarding\n"); 335399cabef0SPablo de Lara start_packet_forwarding(tx_first); 3354cfea1f30SPablo de Lara if (stats_period != 0) { 3355cfea1f30SPablo de Lara uint64_t prev_time = 0, cur_time, diff_time = 0; 3356cfea1f30SPablo de Lara uint64_t timer_period; 3357cfea1f30SPablo de Lara 3358cfea1f30SPablo de Lara /* Convert to number of cycles */ 3359cfea1f30SPablo de Lara timer_period = stats_period * rte_get_timer_hz(); 3360cfea1f30SPablo de Lara 3361d9a191a0SPhil Yang while (f_quit == 0) { 3362cfea1f30SPablo de Lara cur_time = rte_get_timer_cycles(); 3363cfea1f30SPablo de Lara diff_time += cur_time - prev_time; 3364cfea1f30SPablo de Lara 3365cfea1f30SPablo de Lara if (diff_time >= timer_period) { 3366cfea1f30SPablo de Lara print_stats(); 3367cfea1f30SPablo de Lara /* Reset the timer */ 3368cfea1f30SPablo de Lara diff_time = 0; 3369cfea1f30SPablo de Lara } 3370cfea1f30SPablo de Lara /* Sleep to avoid unnecessary checks */ 3371cfea1f30SPablo de Lara prev_time = cur_time; 3372cfea1f30SPablo de Lara sleep(1); 3373cfea1f30SPablo de Lara } 3374cfea1f30SPablo de Lara } 3375cfea1f30SPablo de Lara 3376af75078fSIntel printf("Press enter to exit\n"); 3377af75078fSIntel rc = read(0, &c, 1); 3378d3a274ceSZhihong Wang pmd_test_exit(); 3379af75078fSIntel if (rc < 0) 3380af75078fSIntel return 1; 3381af75078fSIntel } 3382af75078fSIntel 3383af75078fSIntel return 0; 3384af75078fSIntel } 3385