1174a1631SBruce Richardson /* SPDX-License-Identifier: BSD-3-Clause 2174a1631SBruce Richardson * Copyright(c) 2010-2017 Intel Corporation 3af75078fSIntel */ 4af75078fSIntel 5af75078fSIntel #include <stdarg.h> 6af75078fSIntel #include <stdio.h> 7af75078fSIntel #include <stdlib.h> 8af75078fSIntel #include <signal.h> 9af75078fSIntel #include <string.h> 10af75078fSIntel #include <time.h> 11af75078fSIntel #include <fcntl.h> 121c036b16SEelco Chaudron #include <sys/mman.h> 13af75078fSIntel #include <sys/types.h> 14af75078fSIntel #include <errno.h> 15fb73e096SJeff Guo #include <stdbool.h> 16af75078fSIntel 17af75078fSIntel #include <sys/queue.h> 18af75078fSIntel #include <sys/stat.h> 19af75078fSIntel 20af75078fSIntel #include <stdint.h> 21af75078fSIntel #include <unistd.h> 22af75078fSIntel #include <inttypes.h> 23af75078fSIntel 24af75078fSIntel #include <rte_common.h> 25d1eb542eSOlivier Matz #include <rte_errno.h> 26af75078fSIntel #include <rte_byteorder.h> 27af75078fSIntel #include <rte_log.h> 28af75078fSIntel #include <rte_debug.h> 29af75078fSIntel #include <rte_cycles.h> 30af75078fSIntel #include <rte_memory.h> 31af75078fSIntel #include <rte_memcpy.h> 32af75078fSIntel #include <rte_launch.h> 33af75078fSIntel #include <rte_eal.h> 34284c908cSGaetan Rivet #include <rte_alarm.h> 35af75078fSIntel #include <rte_per_lcore.h> 36af75078fSIntel #include <rte_lcore.h> 37af75078fSIntel #include <rte_atomic.h> 38af75078fSIntel #include <rte_branch_prediction.h> 39af75078fSIntel #include <rte_mempool.h> 40af75078fSIntel #include <rte_malloc.h> 41af75078fSIntel #include <rte_mbuf.h> 420e798567SPavan Nikhilesh #include <rte_mbuf_pool_ops.h> 43af75078fSIntel #include <rte_interrupts.h> 44af75078fSIntel #include <rte_pci.h> 45af75078fSIntel #include <rte_ether.h> 46af75078fSIntel #include <rte_ethdev.h> 47edab33b1STetsuya Mukawa #include <rte_dev.h> 48af75078fSIntel #include <rte_string_fns.h> 49a8d0d473SBruce Richardson #ifdef RTE_NET_IXGBE 50e261265eSRadu Nicolau #include <rte_pmd_ixgbe.h> 51e261265eSRadu Nicolau #endif 52a8d0d473SBruce Richardson #ifdef RTE_LIB_PDUMP 53102b7329SReshma Pattan #include <rte_pdump.h> 54102b7329SReshma Pattan #endif 55938a184aSAdrien Mazarguil #include <rte_flow.h> 567e4441c8SRemy Horton #include <rte_metrics.h> 57a8d0d473SBruce Richardson #ifdef RTE_LIB_BITRATESTATS 587e4441c8SRemy Horton #include <rte_bitrate.h> 597e4441c8SRemy Horton #endif 60a8d0d473SBruce Richardson #ifdef RTE_LIB_LATENCYSTATS 6162d3216dSReshma Pattan #include <rte_latencystats.h> 6262d3216dSReshma Pattan #endif 63af75078fSIntel 64af75078fSIntel #include "testpmd.h" 65af75078fSIntel 66c7f5dba7SAnatoly Burakov #ifndef MAP_HUGETLB 67c7f5dba7SAnatoly Burakov /* FreeBSD may not have MAP_HUGETLB (in fact, it probably doesn't) */ 68c7f5dba7SAnatoly Burakov #define HUGE_FLAG (0x40000) 69c7f5dba7SAnatoly Burakov #else 70c7f5dba7SAnatoly Burakov #define HUGE_FLAG MAP_HUGETLB 71c7f5dba7SAnatoly Burakov #endif 72c7f5dba7SAnatoly Burakov 73c7f5dba7SAnatoly Burakov #ifndef MAP_HUGE_SHIFT 74c7f5dba7SAnatoly Burakov /* older kernels (or FreeBSD) will not have this define */ 75c7f5dba7SAnatoly Burakov #define HUGE_SHIFT (26) 76c7f5dba7SAnatoly Burakov #else 77c7f5dba7SAnatoly Burakov #define HUGE_SHIFT MAP_HUGE_SHIFT 78c7f5dba7SAnatoly Burakov #endif 79c7f5dba7SAnatoly Burakov 80c7f5dba7SAnatoly Burakov #define EXTMEM_HEAP_NAME "extmem" 8172512e18SViacheslav Ovsiienko #define EXTBUF_ZONE_SIZE RTE_PGSIZE_2M 82c7f5dba7SAnatoly Burakov 83af75078fSIntel uint16_t verbose_level = 0; /**< Silent by default. */ 84285fd101SOlivier Matz int testpmd_logtype; /**< Log type for testpmd logs */ 85af75078fSIntel 86cb056611SStephen Hemminger /* use main core for command line ? */ 87af75078fSIntel uint8_t interactive = 0; 88ca7feb22SCyril Chemparathy uint8_t auto_start = 0; 8999cabef0SPablo de Lara uint8_t tx_first; 9081ef862bSAllain Legacy char cmdline_filename[PATH_MAX] = {0}; 91af75078fSIntel 92af75078fSIntel /* 93af75078fSIntel * NUMA support configuration. 94af75078fSIntel * When set, the NUMA support attempts to dispatch the allocation of the 95af75078fSIntel * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the 96af75078fSIntel * probed ports among the CPU sockets 0 and 1. 97af75078fSIntel * Otherwise, all memory is allocated from CPU socket 0. 98af75078fSIntel */ 99999b2ee0SBruce Richardson uint8_t numa_support = 1; /**< numa enabled by default */ 100af75078fSIntel 101af75078fSIntel /* 102b6ea6408SIntel * In UMA mode,all memory is allocated from socket 0 if --socket-num is 103b6ea6408SIntel * not configured. 104b6ea6408SIntel */ 105b6ea6408SIntel uint8_t socket_num = UMA_NO_CONFIG; 106b6ea6408SIntel 107b6ea6408SIntel /* 108c7f5dba7SAnatoly Burakov * Select mempool allocation type: 109c7f5dba7SAnatoly Burakov * - native: use regular DPDK memory 110c7f5dba7SAnatoly Burakov * - anon: use regular DPDK memory to create mempool, but populate using 111c7f5dba7SAnatoly Burakov * anonymous memory (may not be IOVA-contiguous) 112c7f5dba7SAnatoly Burakov * - xmem: use externally allocated hugepage memory 113148f963fSBruce Richardson */ 114c7f5dba7SAnatoly Burakov uint8_t mp_alloc_type = MP_ALLOC_NATIVE; 115148f963fSBruce Richardson 116148f963fSBruce Richardson /* 11763531389SGeorgios Katsikas * Store specified sockets on which memory pool to be used by ports 11863531389SGeorgios Katsikas * is allocated. 11963531389SGeorgios Katsikas */ 12063531389SGeorgios Katsikas uint8_t port_numa[RTE_MAX_ETHPORTS]; 12163531389SGeorgios Katsikas 12263531389SGeorgios Katsikas /* 12363531389SGeorgios Katsikas * Store specified sockets on which RX ring to be used by ports 12463531389SGeorgios Katsikas * is allocated. 12563531389SGeorgios Katsikas */ 12663531389SGeorgios Katsikas uint8_t rxring_numa[RTE_MAX_ETHPORTS]; 12763531389SGeorgios Katsikas 12863531389SGeorgios Katsikas /* 12963531389SGeorgios Katsikas * Store specified sockets on which TX ring to be used by ports 13063531389SGeorgios Katsikas * is allocated. 13163531389SGeorgios Katsikas */ 13263531389SGeorgios Katsikas uint8_t txring_numa[RTE_MAX_ETHPORTS]; 13363531389SGeorgios Katsikas 13463531389SGeorgios Katsikas /* 135af75078fSIntel * Record the Ethernet address of peer target ports to which packets are 136af75078fSIntel * forwarded. 137547d946cSNirmoy Das * Must be instantiated with the ethernet addresses of peer traffic generator 138af75078fSIntel * ports. 139af75078fSIntel */ 1406d13ea8eSOlivier Matz struct rte_ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS]; 141af75078fSIntel portid_t nb_peer_eth_addrs = 0; 142af75078fSIntel 143af75078fSIntel /* 144af75078fSIntel * Probed Target Environment. 145af75078fSIntel */ 146af75078fSIntel struct rte_port *ports; /**< For all probed ethernet ports. */ 147af75078fSIntel portid_t nb_ports; /**< Number of probed ethernet ports. */ 148af75078fSIntel struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */ 149af75078fSIntel lcoreid_t nb_lcores; /**< Number of probed logical cores. */ 150af75078fSIntel 1514918a357SXiaoyun Li portid_t ports_ids[RTE_MAX_ETHPORTS]; /**< Store all port ids. */ 1524918a357SXiaoyun Li 153af75078fSIntel /* 154af75078fSIntel * Test Forwarding Configuration. 155af75078fSIntel * nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores 156af75078fSIntel * nb_fwd_ports <= nb_cfg_ports <= nb_ports 157af75078fSIntel */ 158af75078fSIntel lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */ 159af75078fSIntel lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */ 160af75078fSIntel portid_t nb_cfg_ports; /**< Number of configured ports. */ 161af75078fSIntel portid_t nb_fwd_ports; /**< Number of forwarding ports. */ 162af75078fSIntel 163af75078fSIntel unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */ 164af75078fSIntel portid_t fwd_ports_ids[RTE_MAX_ETHPORTS]; /**< Port ids configuration. */ 165af75078fSIntel 166af75078fSIntel struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */ 167af75078fSIntel streamid_t nb_fwd_streams; /**< Is equal to (nb_ports * nb_rxq). */ 168af75078fSIntel 169af75078fSIntel /* 170af75078fSIntel * Forwarding engines. 171af75078fSIntel */ 172af75078fSIntel struct fwd_engine * fwd_engines[] = { 173af75078fSIntel &io_fwd_engine, 174af75078fSIntel &mac_fwd_engine, 175d47388f1SCyril Chemparathy &mac_swap_engine, 176e9e23a61SCyril Chemparathy &flow_gen_engine, 177af75078fSIntel &rx_only_engine, 178af75078fSIntel &tx_only_engine, 179af75078fSIntel &csum_fwd_engine, 180168dfa61SIvan Boule &icmp_echo_engine, 1813c156061SJens Freimann &noisy_vnf_engine, 1822564abdaSShiri Kuzin &five_tuple_swap_fwd_engine, 183af75078fSIntel #ifdef RTE_LIBRTE_IEEE1588 184af75078fSIntel &ieee1588_fwd_engine, 185af75078fSIntel #endif 186af75078fSIntel NULL, 187af75078fSIntel }; 188af75078fSIntel 18926cbb419SViacheslav Ovsiienko struct rte_mempool *mempools[RTE_MAX_NUMA_NODES * MAX_SEGS_BUFFER_SPLIT]; 19059fcf854SShahaf Shuler uint16_t mempool_flags; 191401b744dSShahaf Shuler 192af75078fSIntel struct fwd_config cur_fwd_config; 193af75078fSIntel struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */ 194bf56fce1SZhihong Wang uint32_t retry_enabled; 195bf56fce1SZhihong Wang uint32_t burst_tx_delay_time = BURST_TX_WAIT_US; 196bf56fce1SZhihong Wang uint32_t burst_tx_retry_num = BURST_TX_RETRIES; 197af75078fSIntel 19826cbb419SViacheslav Ovsiienko uint32_t mbuf_data_size_n = 1; /* Number of specified mbuf sizes. */ 19926cbb419SViacheslav Ovsiienko uint16_t mbuf_data_size[MAX_SEGS_BUFFER_SPLIT] = { 20026cbb419SViacheslav Ovsiienko DEFAULT_MBUF_DATA_SIZE 20126cbb419SViacheslav Ovsiienko }; /**< Mbuf data space size. */ 202c8798818SIntel uint32_t param_total_num_mbufs = 0; /**< number of mbufs in all pools - if 203c8798818SIntel * specified on command-line. */ 204cfea1f30SPablo de Lara uint16_t stats_period; /**< Period to show statistics (disabled by default) */ 205d9a191a0SPhil Yang 206d9a191a0SPhil Yang /* 207d9a191a0SPhil Yang * In container, it cannot terminate the process which running with 'stats-period' 208d9a191a0SPhil Yang * option. Set flag to exit stats period loop after received SIGINT/SIGTERM. 209d9a191a0SPhil Yang */ 210d9a191a0SPhil Yang uint8_t f_quit; 211d9a191a0SPhil Yang 212af75078fSIntel /* 2130f2096d7SViacheslav Ovsiienko * Configuration of packet segments used to scatter received packets 2140f2096d7SViacheslav Ovsiienko * if some of split features is configured. 2150f2096d7SViacheslav Ovsiienko */ 2160f2096d7SViacheslav Ovsiienko uint16_t rx_pkt_seg_lengths[MAX_SEGS_BUFFER_SPLIT]; 2170f2096d7SViacheslav Ovsiienko uint8_t rx_pkt_nb_segs; /**< Number of segments to split */ 21891c78e09SViacheslav Ovsiienko uint16_t rx_pkt_seg_offsets[MAX_SEGS_BUFFER_SPLIT]; 21991c78e09SViacheslav Ovsiienko uint8_t rx_pkt_nb_offs; /**< Number of specified offsets */ 2200f2096d7SViacheslav Ovsiienko 2210f2096d7SViacheslav Ovsiienko /* 222af75078fSIntel * Configuration of packet segments used by the "txonly" processing engine. 223af75078fSIntel */ 224af75078fSIntel uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */ 225af75078fSIntel uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = { 226af75078fSIntel TXONLY_DEF_PACKET_LEN, 227af75078fSIntel }; 228af75078fSIntel uint8_t tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */ 229af75078fSIntel 23079bec05bSKonstantin Ananyev enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF; 23179bec05bSKonstantin Ananyev /**< Split policy for packets to TX. */ 23279bec05bSKonstantin Ananyev 23382010ef5SYongseok Koh uint8_t txonly_multi_flow; 23482010ef5SYongseok Koh /**< Whether multiple flows are generated in TXONLY mode. */ 23582010ef5SYongseok Koh 2364940344dSViacheslav Ovsiienko uint32_t tx_pkt_times_inter; 2374940344dSViacheslav Ovsiienko /**< Timings for send scheduling in TXONLY mode, time between bursts. */ 2384940344dSViacheslav Ovsiienko 2394940344dSViacheslav Ovsiienko uint32_t tx_pkt_times_intra; 2404940344dSViacheslav Ovsiienko /**< Timings for send scheduling in TXONLY mode, time between packets. */ 2414940344dSViacheslav Ovsiienko 242af75078fSIntel uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */ 2436c02043eSIgor Russkikh uint16_t nb_pkt_flowgen_clones; /**< Number of Tx packet clones to send in flowgen mode. */ 244e9378bbcSCunming Liang uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */ 245af75078fSIntel 246900550deSIntel /* current configuration is in DCB or not,0 means it is not in DCB mode */ 247900550deSIntel uint8_t dcb_config = 0; 248900550deSIntel 249900550deSIntel /* Whether the dcb is in testing status */ 250900550deSIntel uint8_t dcb_test = 0; 251900550deSIntel 252af75078fSIntel /* 253af75078fSIntel * Configurable number of RX/TX queues. 254af75078fSIntel */ 2551c69df45SOri Kam queueid_t nb_hairpinq; /**< Number of hairpin queues per port. */ 256af75078fSIntel queueid_t nb_rxq = 1; /**< Number of RX queues per port. */ 257af75078fSIntel queueid_t nb_txq = 1; /**< Number of TX queues per port. */ 258af75078fSIntel 259af75078fSIntel /* 260af75078fSIntel * Configurable number of RX/TX ring descriptors. 2618599ed31SRemy Horton * Defaults are supplied by drivers via ethdev. 262af75078fSIntel */ 2638599ed31SRemy Horton #define RTE_TEST_RX_DESC_DEFAULT 0 2648599ed31SRemy Horton #define RTE_TEST_TX_DESC_DEFAULT 0 265af75078fSIntel uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */ 266af75078fSIntel uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */ 267af75078fSIntel 268f2c5125aSPablo de Lara #define RTE_PMD_PARAM_UNSET -1 269af75078fSIntel /* 270af75078fSIntel * Configurable values of RX and TX ring threshold registers. 271af75078fSIntel */ 272af75078fSIntel 273f2c5125aSPablo de Lara int8_t rx_pthresh = RTE_PMD_PARAM_UNSET; 274f2c5125aSPablo de Lara int8_t rx_hthresh = RTE_PMD_PARAM_UNSET; 275f2c5125aSPablo de Lara int8_t rx_wthresh = RTE_PMD_PARAM_UNSET; 276af75078fSIntel 277f2c5125aSPablo de Lara int8_t tx_pthresh = RTE_PMD_PARAM_UNSET; 278f2c5125aSPablo de Lara int8_t tx_hthresh = RTE_PMD_PARAM_UNSET; 279f2c5125aSPablo de Lara int8_t tx_wthresh = RTE_PMD_PARAM_UNSET; 280af75078fSIntel 281af75078fSIntel /* 282af75078fSIntel * Configurable value of RX free threshold. 283af75078fSIntel */ 284f2c5125aSPablo de Lara int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET; 285af75078fSIntel 286af75078fSIntel /* 287ce8d5614SIntel * Configurable value of RX drop enable. 288ce8d5614SIntel */ 289f2c5125aSPablo de Lara int8_t rx_drop_en = RTE_PMD_PARAM_UNSET; 290ce8d5614SIntel 291ce8d5614SIntel /* 292af75078fSIntel * Configurable value of TX free threshold. 293af75078fSIntel */ 294f2c5125aSPablo de Lara int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET; 295af75078fSIntel 296af75078fSIntel /* 297af75078fSIntel * Configurable value of TX RS bit threshold. 298af75078fSIntel */ 299f2c5125aSPablo de Lara int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET; 300af75078fSIntel 301af75078fSIntel /* 3023c156061SJens Freimann * Configurable value of buffered packets before sending. 3033c156061SJens Freimann */ 3043c156061SJens Freimann uint16_t noisy_tx_sw_bufsz; 3053c156061SJens Freimann 3063c156061SJens Freimann /* 3073c156061SJens Freimann * Configurable value of packet buffer timeout. 3083c156061SJens Freimann */ 3093c156061SJens Freimann uint16_t noisy_tx_sw_buf_flush_time; 3103c156061SJens Freimann 3113c156061SJens Freimann /* 3123c156061SJens Freimann * Configurable value for size of VNF internal memory area 3133c156061SJens Freimann * used for simulating noisy neighbour behaviour 3143c156061SJens Freimann */ 3153c156061SJens Freimann uint64_t noisy_lkup_mem_sz; 3163c156061SJens Freimann 3173c156061SJens Freimann /* 3183c156061SJens Freimann * Configurable value of number of random writes done in 3193c156061SJens Freimann * VNF simulation memory area. 3203c156061SJens Freimann */ 3213c156061SJens Freimann uint64_t noisy_lkup_num_writes; 3223c156061SJens Freimann 3233c156061SJens Freimann /* 3243c156061SJens Freimann * Configurable value of number of random reads done in 3253c156061SJens Freimann * VNF simulation memory area. 3263c156061SJens Freimann */ 3273c156061SJens Freimann uint64_t noisy_lkup_num_reads; 3283c156061SJens Freimann 3293c156061SJens Freimann /* 3303c156061SJens Freimann * Configurable value of number of random reads/writes done in 3313c156061SJens Freimann * VNF simulation memory area. 3323c156061SJens Freimann */ 3333c156061SJens Freimann uint64_t noisy_lkup_num_reads_writes; 3343c156061SJens Freimann 3353c156061SJens Freimann /* 336af75078fSIntel * Receive Side Scaling (RSS) configuration. 337af75078fSIntel */ 3388a387fa8SHelin Zhang uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */ 339af75078fSIntel 340af75078fSIntel /* 341af75078fSIntel * Port topology configuration 342af75078fSIntel */ 343af75078fSIntel uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */ 344af75078fSIntel 3457741e4cfSIntel /* 3467741e4cfSIntel * Avoids to flush all the RX streams before starts forwarding. 3477741e4cfSIntel */ 3487741e4cfSIntel uint8_t no_flush_rx = 0; /* flush by default */ 3497741e4cfSIntel 350af75078fSIntel /* 3517ee3e944SVasily Philipov * Flow API isolated mode. 3527ee3e944SVasily Philipov */ 3537ee3e944SVasily Philipov uint8_t flow_isolate_all; 3547ee3e944SVasily Philipov 3557ee3e944SVasily Philipov /* 356bc202406SDavid Marchand * Avoids to check link status when starting/stopping a port. 357bc202406SDavid Marchand */ 358bc202406SDavid Marchand uint8_t no_link_check = 0; /* check by default */ 359bc202406SDavid Marchand 360bc202406SDavid Marchand /* 3616937d210SStephen Hemminger * Don't automatically start all ports in interactive mode. 3626937d210SStephen Hemminger */ 3636937d210SStephen Hemminger uint8_t no_device_start = 0; 3646937d210SStephen Hemminger 3656937d210SStephen Hemminger /* 3668ea656f8SGaetan Rivet * Enable link status change notification 3678ea656f8SGaetan Rivet */ 3688ea656f8SGaetan Rivet uint8_t lsc_interrupt = 1; /* enabled by default */ 3698ea656f8SGaetan Rivet 3708ea656f8SGaetan Rivet /* 371284c908cSGaetan Rivet * Enable device removal notification. 372284c908cSGaetan Rivet */ 373284c908cSGaetan Rivet uint8_t rmv_interrupt = 1; /* enabled by default */ 374284c908cSGaetan Rivet 375fb73e096SJeff Guo uint8_t hot_plug = 0; /**< hotplug disabled by default. */ 376fb73e096SJeff Guo 3774f1ed78eSThomas Monjalon /* After attach, port setup is called on event or by iterator */ 3784f1ed78eSThomas Monjalon bool setup_on_probe_event = true; 3794f1ed78eSThomas Monjalon 380b0a9354aSPavan Nikhilesh /* Clear ptypes on port initialization. */ 381b0a9354aSPavan Nikhilesh uint8_t clear_ptypes = true; 382b0a9354aSPavan Nikhilesh 38301817b10SBing Zhao /* Hairpin ports configuration mode. */ 38401817b10SBing Zhao uint16_t hairpin_mode; 38501817b10SBing Zhao 38697b5d8b5SThomas Monjalon /* Pretty printing of ethdev events */ 38797b5d8b5SThomas Monjalon static const char * const eth_event_desc[] = { 38897b5d8b5SThomas Monjalon [RTE_ETH_EVENT_UNKNOWN] = "unknown", 38997b5d8b5SThomas Monjalon [RTE_ETH_EVENT_INTR_LSC] = "link state change", 39097b5d8b5SThomas Monjalon [RTE_ETH_EVENT_QUEUE_STATE] = "queue state", 39197b5d8b5SThomas Monjalon [RTE_ETH_EVENT_INTR_RESET] = "reset", 39297b5d8b5SThomas Monjalon [RTE_ETH_EVENT_VF_MBOX] = "VF mbox", 39397b5d8b5SThomas Monjalon [RTE_ETH_EVENT_IPSEC] = "IPsec", 39497b5d8b5SThomas Monjalon [RTE_ETH_EVENT_MACSEC] = "MACsec", 39597b5d8b5SThomas Monjalon [RTE_ETH_EVENT_INTR_RMV] = "device removal", 39697b5d8b5SThomas Monjalon [RTE_ETH_EVENT_NEW] = "device probed", 39797b5d8b5SThomas Monjalon [RTE_ETH_EVENT_DESTROY] = "device released", 3980e459ffaSDong Zhou [RTE_ETH_EVENT_FLOW_AGED] = "flow aged", 39997b5d8b5SThomas Monjalon [RTE_ETH_EVENT_MAX] = NULL, 40097b5d8b5SThomas Monjalon }; 40197b5d8b5SThomas Monjalon 402284c908cSGaetan Rivet /* 4033af72783SGaetan Rivet * Display or mask ether events 4043af72783SGaetan Rivet * Default to all events except VF_MBOX 4053af72783SGaetan Rivet */ 4063af72783SGaetan Rivet uint32_t event_print_mask = (UINT32_C(1) << RTE_ETH_EVENT_UNKNOWN) | 4073af72783SGaetan Rivet (UINT32_C(1) << RTE_ETH_EVENT_INTR_LSC) | 4083af72783SGaetan Rivet (UINT32_C(1) << RTE_ETH_EVENT_QUEUE_STATE) | 4093af72783SGaetan Rivet (UINT32_C(1) << RTE_ETH_EVENT_INTR_RESET) | 410badb87c1SAnoob Joseph (UINT32_C(1) << RTE_ETH_EVENT_IPSEC) | 4113af72783SGaetan Rivet (UINT32_C(1) << RTE_ETH_EVENT_MACSEC) | 4120e459ffaSDong Zhou (UINT32_C(1) << RTE_ETH_EVENT_INTR_RMV) | 4130e459ffaSDong Zhou (UINT32_C(1) << RTE_ETH_EVENT_FLOW_AGED); 414e505d84cSAnatoly Burakov /* 415e505d84cSAnatoly Burakov * Decide if all memory are locked for performance. 416e505d84cSAnatoly Burakov */ 417e505d84cSAnatoly Burakov int do_mlockall = 0; 4183af72783SGaetan Rivet 4193af72783SGaetan Rivet /* 4207b7e5ba7SIntel * NIC bypass mode configuration options. 4217b7e5ba7SIntel */ 4227b7e5ba7SIntel 423a8d0d473SBruce Richardson #if defined RTE_NET_IXGBE && defined RTE_LIBRTE_IXGBE_BYPASS 4247b7e5ba7SIntel /* The NIC bypass watchdog timeout. */ 425e261265eSRadu Nicolau uint32_t bypass_timeout = RTE_PMD_IXGBE_BYPASS_TMT_OFF; 4267b7e5ba7SIntel #endif 4277b7e5ba7SIntel 428e261265eSRadu Nicolau 429a8d0d473SBruce Richardson #ifdef RTE_LIB_LATENCYSTATS 43062d3216dSReshma Pattan 43162d3216dSReshma Pattan /* 43262d3216dSReshma Pattan * Set when latency stats is enabled in the commandline 43362d3216dSReshma Pattan */ 43462d3216dSReshma Pattan uint8_t latencystats_enabled; 43562d3216dSReshma Pattan 43662d3216dSReshma Pattan /* 43762d3216dSReshma Pattan * Lcore ID to serive latency statistics. 43862d3216dSReshma Pattan */ 43962d3216dSReshma Pattan lcoreid_t latencystats_lcore_id = -1; 44062d3216dSReshma Pattan 44162d3216dSReshma Pattan #endif 44262d3216dSReshma Pattan 4437b7e5ba7SIntel /* 444af75078fSIntel * Ethernet device configuration. 445af75078fSIntel */ 446af75078fSIntel struct rte_eth_rxmode rx_mode = { 4470c4abd36SSteve Yang /* Default maximum frame length. 4480c4abd36SSteve Yang * Zero is converted to "RTE_ETHER_MTU + PMD Ethernet overhead" 4490c4abd36SSteve Yang * in init_config(). 4500c4abd36SSteve Yang */ 4510c4abd36SSteve Yang .max_rx_pkt_len = 0, 452af75078fSIntel }; 453af75078fSIntel 45407e5f7bdSShahaf Shuler struct rte_eth_txmode tx_mode = { 45507e5f7bdSShahaf Shuler .offloads = DEV_TX_OFFLOAD_MBUF_FAST_FREE, 45607e5f7bdSShahaf Shuler }; 457fd8c20aaSShahaf Shuler 458af75078fSIntel struct rte_fdir_conf fdir_conf = { 459af75078fSIntel .mode = RTE_FDIR_MODE_NONE, 460af75078fSIntel .pballoc = RTE_FDIR_PBALLOC_64K, 461af75078fSIntel .status = RTE_FDIR_REPORT_STATUS, 462d9d5e6f2SJingjing Wu .mask = { 46326f579aaSWei Zhao .vlan_tci_mask = 0xFFEF, 464d9d5e6f2SJingjing Wu .ipv4_mask = { 465d9d5e6f2SJingjing Wu .src_ip = 0xFFFFFFFF, 466d9d5e6f2SJingjing Wu .dst_ip = 0xFFFFFFFF, 467d9d5e6f2SJingjing Wu }, 468d9d5e6f2SJingjing Wu .ipv6_mask = { 469d9d5e6f2SJingjing Wu .src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF}, 470d9d5e6f2SJingjing Wu .dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF}, 471d9d5e6f2SJingjing Wu }, 472d9d5e6f2SJingjing Wu .src_port_mask = 0xFFFF, 473d9d5e6f2SJingjing Wu .dst_port_mask = 0xFFFF, 47447b3ac6bSWenzhuo Lu .mac_addr_byte_mask = 0xFF, 47547b3ac6bSWenzhuo Lu .tunnel_type_mask = 1, 47647b3ac6bSWenzhuo Lu .tunnel_id_mask = 0xFFFFFFFF, 477d9d5e6f2SJingjing Wu }, 478af75078fSIntel .drop_queue = 127, 479af75078fSIntel }; 480af75078fSIntel 4812950a769SDeclan Doherty volatile int test_done = 1; /* stop packet forwarding when set to 1. */ 482af75078fSIntel 483a4fd5eeeSElza Mathew /* 484a4fd5eeeSElza Mathew * Display zero values by default for xstats 485a4fd5eeeSElza Mathew */ 486a4fd5eeeSElza Mathew uint8_t xstats_hide_zero; 487a4fd5eeeSElza Mathew 488bc700b67SDharmik Thakkar /* 489bc700b67SDharmik Thakkar * Measure of CPU cycles disabled by default 490bc700b67SDharmik Thakkar */ 491bc700b67SDharmik Thakkar uint8_t record_core_cycles; 492bc700b67SDharmik Thakkar 4930e4b1963SDharmik Thakkar /* 4940e4b1963SDharmik Thakkar * Display of RX and TX bursts disabled by default 4950e4b1963SDharmik Thakkar */ 4960e4b1963SDharmik Thakkar uint8_t record_burst_stats; 4970e4b1963SDharmik Thakkar 498c9cafcc8SShahaf Shuler unsigned int num_sockets = 0; 499c9cafcc8SShahaf Shuler unsigned int socket_ids[RTE_MAX_NUMA_NODES]; 5007acf894dSStephen Hurd 501a8d0d473SBruce Richardson #ifdef RTE_LIB_BITRATESTATS 5027e4441c8SRemy Horton /* Bitrate statistics */ 5037e4441c8SRemy Horton struct rte_stats_bitrates *bitrate_data; 504e25e6c70SRemy Horton lcoreid_t bitrate_lcore_id; 505e25e6c70SRemy Horton uint8_t bitrate_enabled; 506e25e6c70SRemy Horton #endif 5077e4441c8SRemy Horton 508b40f8d78SJiayu Hu struct gro_status gro_ports[RTE_MAX_ETHPORTS]; 509b7091f1dSJiayu Hu uint8_t gro_flush_cycles = GRO_DEFAULT_FLUSH_CYCLES; 510b40f8d78SJiayu Hu 511f9295aa2SXiaoyu Min /* 512f9295aa2SXiaoyu Min * hexadecimal bitmask of RX mq mode can be enabled. 513f9295aa2SXiaoyu Min */ 514f9295aa2SXiaoyu Min enum rte_eth_rx_mq_mode rx_mq_mode = ETH_MQ_RX_VMDQ_DCB_RSS; 515f9295aa2SXiaoyu Min 516*b7b78a08SAjit Khaparde /* 517*b7b78a08SAjit Khaparde * Used to set forced link speed 518*b7b78a08SAjit Khaparde */ 519*b7b78a08SAjit Khaparde uint32_t eth_link_speed; 520*b7b78a08SAjit Khaparde 521ed30d9b6SIntel /* Forward function declarations */ 522c9cce428SThomas Monjalon static void setup_attached_port(portid_t pi); 523edab33b1STetsuya Mukawa static void check_all_ports_link_status(uint32_t port_mask); 524f8244c63SZhiyong Yang static int eth_event_callback(portid_t port_id, 52576ad4a2dSGaetan Rivet enum rte_eth_event_type type, 526d6af1a13SBernard Iremonger void *param, void *ret_param); 527cc1bf307SJeff Guo static void dev_event_callback(const char *device_name, 528fb73e096SJeff Guo enum rte_dev_event_type type, 529fb73e096SJeff Guo void *param); 530ce8d5614SIntel 531ce8d5614SIntel /* 532ce8d5614SIntel * Check if all the ports are started. 533ce8d5614SIntel * If yes, return positive value. If not, return zero. 534ce8d5614SIntel */ 535ce8d5614SIntel static int all_ports_started(void); 536ed30d9b6SIntel 53752f38a20SJiayu Hu struct gso_status gso_ports[RTE_MAX_ETHPORTS]; 53835b2d13fSOlivier Matz uint16_t gso_max_segment_size = RTE_ETHER_MAX_LEN - RTE_ETHER_CRC_LEN; 53952f38a20SJiayu Hu 540b57b66a9SOri Kam /* Holds the registered mbuf dynamic flags names. */ 541b57b66a9SOri Kam char dynf_names[64][RTE_MBUF_DYN_NAMESIZE]; 542b57b66a9SOri Kam 543af75078fSIntel /* 54498a7ea33SJerin Jacob * Helper function to check if socket is already discovered. 545c9cafcc8SShahaf Shuler * If yes, return positive value. If not, return zero. 546c9cafcc8SShahaf Shuler */ 547c9cafcc8SShahaf Shuler int 548c9cafcc8SShahaf Shuler new_socket_id(unsigned int socket_id) 549c9cafcc8SShahaf Shuler { 550c9cafcc8SShahaf Shuler unsigned int i; 551c9cafcc8SShahaf Shuler 552c9cafcc8SShahaf Shuler for (i = 0; i < num_sockets; i++) { 553c9cafcc8SShahaf Shuler if (socket_ids[i] == socket_id) 554c9cafcc8SShahaf Shuler return 0; 555c9cafcc8SShahaf Shuler } 556c9cafcc8SShahaf Shuler return 1; 557c9cafcc8SShahaf Shuler } 558c9cafcc8SShahaf Shuler 559c9cafcc8SShahaf Shuler /* 560af75078fSIntel * Setup default configuration. 561af75078fSIntel */ 562af75078fSIntel static void 563af75078fSIntel set_default_fwd_lcores_config(void) 564af75078fSIntel { 565af75078fSIntel unsigned int i; 566af75078fSIntel unsigned int nb_lc; 5677acf894dSStephen Hurd unsigned int sock_num; 568af75078fSIntel 569af75078fSIntel nb_lc = 0; 570af75078fSIntel for (i = 0; i < RTE_MAX_LCORE; i++) { 571dbfb8ec7SPhil Yang if (!rte_lcore_is_enabled(i)) 572dbfb8ec7SPhil Yang continue; 573c9cafcc8SShahaf Shuler sock_num = rte_lcore_to_socket_id(i); 574c9cafcc8SShahaf Shuler if (new_socket_id(sock_num)) { 575c9cafcc8SShahaf Shuler if (num_sockets >= RTE_MAX_NUMA_NODES) { 576c9cafcc8SShahaf Shuler rte_exit(EXIT_FAILURE, 577c9cafcc8SShahaf Shuler "Total sockets greater than %u\n", 578c9cafcc8SShahaf Shuler RTE_MAX_NUMA_NODES); 579c9cafcc8SShahaf Shuler } 580c9cafcc8SShahaf Shuler socket_ids[num_sockets++] = sock_num; 5817acf894dSStephen Hurd } 582cb056611SStephen Hemminger if (i == rte_get_main_lcore()) 583f54fe5eeSStephen Hurd continue; 584f54fe5eeSStephen Hurd fwd_lcores_cpuids[nb_lc++] = i; 585af75078fSIntel } 586af75078fSIntel nb_lcores = (lcoreid_t) nb_lc; 587af75078fSIntel nb_cfg_lcores = nb_lcores; 588af75078fSIntel nb_fwd_lcores = 1; 589af75078fSIntel } 590af75078fSIntel 591af75078fSIntel static void 592af75078fSIntel set_def_peer_eth_addrs(void) 593af75078fSIntel { 594af75078fSIntel portid_t i; 595af75078fSIntel 596af75078fSIntel for (i = 0; i < RTE_MAX_ETHPORTS; i++) { 59735b2d13fSOlivier Matz peer_eth_addrs[i].addr_bytes[0] = RTE_ETHER_LOCAL_ADMIN_ADDR; 598af75078fSIntel peer_eth_addrs[i].addr_bytes[5] = i; 599af75078fSIntel } 600af75078fSIntel } 601af75078fSIntel 602af75078fSIntel static void 603af75078fSIntel set_default_fwd_ports_config(void) 604af75078fSIntel { 605af75078fSIntel portid_t pt_id; 60665a7360cSMatan Azrad int i = 0; 607af75078fSIntel 608effdb8bbSPhil Yang RTE_ETH_FOREACH_DEV(pt_id) { 60965a7360cSMatan Azrad fwd_ports_ids[i++] = pt_id; 610af75078fSIntel 611effdb8bbSPhil Yang /* Update sockets info according to the attached device */ 612effdb8bbSPhil Yang int socket_id = rte_eth_dev_socket_id(pt_id); 613effdb8bbSPhil Yang if (socket_id >= 0 && new_socket_id(socket_id)) { 614effdb8bbSPhil Yang if (num_sockets >= RTE_MAX_NUMA_NODES) { 615effdb8bbSPhil Yang rte_exit(EXIT_FAILURE, 616effdb8bbSPhil Yang "Total sockets greater than %u\n", 617effdb8bbSPhil Yang RTE_MAX_NUMA_NODES); 618effdb8bbSPhil Yang } 619effdb8bbSPhil Yang socket_ids[num_sockets++] = socket_id; 620effdb8bbSPhil Yang } 621effdb8bbSPhil Yang } 622effdb8bbSPhil Yang 623af75078fSIntel nb_cfg_ports = nb_ports; 624af75078fSIntel nb_fwd_ports = nb_ports; 625af75078fSIntel } 626af75078fSIntel 627af75078fSIntel void 628af75078fSIntel set_def_fwd_config(void) 629af75078fSIntel { 630af75078fSIntel set_default_fwd_lcores_config(); 631af75078fSIntel set_def_peer_eth_addrs(); 632af75078fSIntel set_default_fwd_ports_config(); 633af75078fSIntel } 634af75078fSIntel 635c7f5dba7SAnatoly Burakov /* extremely pessimistic estimation of memory required to create a mempool */ 636c7f5dba7SAnatoly Burakov static int 637c7f5dba7SAnatoly Burakov calc_mem_size(uint32_t nb_mbufs, uint32_t mbuf_sz, size_t pgsz, size_t *out) 638c7f5dba7SAnatoly Burakov { 639c7f5dba7SAnatoly Burakov unsigned int n_pages, mbuf_per_pg, leftover; 640c7f5dba7SAnatoly Burakov uint64_t total_mem, mbuf_mem, obj_sz; 641c7f5dba7SAnatoly Burakov 642c7f5dba7SAnatoly Burakov /* there is no good way to predict how much space the mempool will 643c7f5dba7SAnatoly Burakov * occupy because it will allocate chunks on the fly, and some of those 644c7f5dba7SAnatoly Burakov * will come from default DPDK memory while some will come from our 645c7f5dba7SAnatoly Burakov * external memory, so just assume 128MB will be enough for everyone. 646c7f5dba7SAnatoly Burakov */ 647c7f5dba7SAnatoly Burakov uint64_t hdr_mem = 128 << 20; 648c7f5dba7SAnatoly Burakov 649c7f5dba7SAnatoly Burakov /* account for possible non-contiguousness */ 650c7f5dba7SAnatoly Burakov obj_sz = rte_mempool_calc_obj_size(mbuf_sz, 0, NULL); 651c7f5dba7SAnatoly Burakov if (obj_sz > pgsz) { 652c7f5dba7SAnatoly Burakov TESTPMD_LOG(ERR, "Object size is bigger than page size\n"); 653c7f5dba7SAnatoly Burakov return -1; 654c7f5dba7SAnatoly Burakov } 655c7f5dba7SAnatoly Burakov 656c7f5dba7SAnatoly Burakov mbuf_per_pg = pgsz / obj_sz; 657c7f5dba7SAnatoly Burakov leftover = (nb_mbufs % mbuf_per_pg) > 0; 658c7f5dba7SAnatoly Burakov n_pages = (nb_mbufs / mbuf_per_pg) + leftover; 659c7f5dba7SAnatoly Burakov 660c7f5dba7SAnatoly Burakov mbuf_mem = n_pages * pgsz; 661c7f5dba7SAnatoly Burakov 662c7f5dba7SAnatoly Burakov total_mem = RTE_ALIGN(hdr_mem + mbuf_mem, pgsz); 663c7f5dba7SAnatoly Burakov 664c7f5dba7SAnatoly Burakov if (total_mem > SIZE_MAX) { 665c7f5dba7SAnatoly Burakov TESTPMD_LOG(ERR, "Memory size too big\n"); 666c7f5dba7SAnatoly Burakov return -1; 667c7f5dba7SAnatoly Burakov } 668c7f5dba7SAnatoly Burakov *out = (size_t)total_mem; 669c7f5dba7SAnatoly Burakov 670c7f5dba7SAnatoly Burakov return 0; 671c7f5dba7SAnatoly Burakov } 672c7f5dba7SAnatoly Burakov 673c7f5dba7SAnatoly Burakov static int 674c7f5dba7SAnatoly Burakov pagesz_flags(uint64_t page_sz) 675c7f5dba7SAnatoly Burakov { 676c7f5dba7SAnatoly Burakov /* as per mmap() manpage, all page sizes are log2 of page size 677c7f5dba7SAnatoly Burakov * shifted by MAP_HUGE_SHIFT 678c7f5dba7SAnatoly Burakov */ 6799d650537SAnatoly Burakov int log2 = rte_log2_u64(page_sz); 680c7f5dba7SAnatoly Burakov 681c7f5dba7SAnatoly Burakov return (log2 << HUGE_SHIFT); 682c7f5dba7SAnatoly Burakov } 683c7f5dba7SAnatoly Burakov 684c7f5dba7SAnatoly Burakov static void * 685c7f5dba7SAnatoly Burakov alloc_mem(size_t memsz, size_t pgsz, bool huge) 686c7f5dba7SAnatoly Burakov { 687c7f5dba7SAnatoly Burakov void *addr; 688c7f5dba7SAnatoly Burakov int flags; 689c7f5dba7SAnatoly Burakov 690c7f5dba7SAnatoly Burakov /* allocate anonymous hugepages */ 691c7f5dba7SAnatoly Burakov flags = MAP_ANONYMOUS | MAP_PRIVATE; 692c7f5dba7SAnatoly Burakov if (huge) 693c7f5dba7SAnatoly Burakov flags |= HUGE_FLAG | pagesz_flags(pgsz); 694c7f5dba7SAnatoly Burakov 695c7f5dba7SAnatoly Burakov addr = mmap(NULL, memsz, PROT_READ | PROT_WRITE, flags, -1, 0); 696c7f5dba7SAnatoly Burakov if (addr == MAP_FAILED) 697c7f5dba7SAnatoly Burakov return NULL; 698c7f5dba7SAnatoly Burakov 699c7f5dba7SAnatoly Burakov return addr; 700c7f5dba7SAnatoly Burakov } 701c7f5dba7SAnatoly Burakov 702c7f5dba7SAnatoly Burakov struct extmem_param { 703c7f5dba7SAnatoly Burakov void *addr; 704c7f5dba7SAnatoly Burakov size_t len; 705c7f5dba7SAnatoly Burakov size_t pgsz; 706c7f5dba7SAnatoly Burakov rte_iova_t *iova_table; 707c7f5dba7SAnatoly Burakov unsigned int iova_table_len; 708c7f5dba7SAnatoly Burakov }; 709c7f5dba7SAnatoly Burakov 710c7f5dba7SAnatoly Burakov static int 711c7f5dba7SAnatoly Burakov create_extmem(uint32_t nb_mbufs, uint32_t mbuf_sz, struct extmem_param *param, 712c7f5dba7SAnatoly Burakov bool huge) 713c7f5dba7SAnatoly Burakov { 714c7f5dba7SAnatoly Burakov uint64_t pgsizes[] = {RTE_PGSIZE_2M, RTE_PGSIZE_1G, /* x86_64, ARM */ 715c7f5dba7SAnatoly Burakov RTE_PGSIZE_16M, RTE_PGSIZE_16G}; /* POWER */ 716c7f5dba7SAnatoly Burakov unsigned int cur_page, n_pages, pgsz_idx; 717c7f5dba7SAnatoly Burakov size_t mem_sz, cur_pgsz; 718c7f5dba7SAnatoly Burakov rte_iova_t *iovas = NULL; 719c7f5dba7SAnatoly Burakov void *addr; 720c7f5dba7SAnatoly Burakov int ret; 721c7f5dba7SAnatoly Burakov 722c7f5dba7SAnatoly Burakov for (pgsz_idx = 0; pgsz_idx < RTE_DIM(pgsizes); pgsz_idx++) { 723c7f5dba7SAnatoly Burakov /* skip anything that is too big */ 724c7f5dba7SAnatoly Burakov if (pgsizes[pgsz_idx] > SIZE_MAX) 725c7f5dba7SAnatoly Burakov continue; 726c7f5dba7SAnatoly Burakov 727c7f5dba7SAnatoly Burakov cur_pgsz = pgsizes[pgsz_idx]; 728c7f5dba7SAnatoly Burakov 729c7f5dba7SAnatoly Burakov /* if we were told not to allocate hugepages, override */ 730c7f5dba7SAnatoly Burakov if (!huge) 731c7f5dba7SAnatoly Burakov cur_pgsz = sysconf(_SC_PAGESIZE); 732c7f5dba7SAnatoly Burakov 733c7f5dba7SAnatoly Burakov ret = calc_mem_size(nb_mbufs, mbuf_sz, cur_pgsz, &mem_sz); 734c7f5dba7SAnatoly Burakov if (ret < 0) { 735c7f5dba7SAnatoly Burakov TESTPMD_LOG(ERR, "Cannot calculate memory size\n"); 736c7f5dba7SAnatoly Burakov return -1; 737c7f5dba7SAnatoly Burakov } 738c7f5dba7SAnatoly Burakov 739c7f5dba7SAnatoly Burakov /* allocate our memory */ 740c7f5dba7SAnatoly Burakov addr = alloc_mem(mem_sz, cur_pgsz, huge); 741c7f5dba7SAnatoly Burakov 742c7f5dba7SAnatoly Burakov /* if we couldn't allocate memory with a specified page size, 743c7f5dba7SAnatoly Burakov * that doesn't mean we can't do it with other page sizes, so 744c7f5dba7SAnatoly Burakov * try another one. 745c7f5dba7SAnatoly Burakov */ 746c7f5dba7SAnatoly Burakov if (addr == NULL) 747c7f5dba7SAnatoly Burakov continue; 748c7f5dba7SAnatoly Burakov 749c7f5dba7SAnatoly Burakov /* store IOVA addresses for every page in this memory area */ 750c7f5dba7SAnatoly Burakov n_pages = mem_sz / cur_pgsz; 751c7f5dba7SAnatoly Burakov 752c7f5dba7SAnatoly Burakov iovas = malloc(sizeof(*iovas) * n_pages); 753c7f5dba7SAnatoly Burakov 754c7f5dba7SAnatoly Burakov if (iovas == NULL) { 755c7f5dba7SAnatoly Burakov TESTPMD_LOG(ERR, "Cannot allocate memory for iova addresses\n"); 756c7f5dba7SAnatoly Burakov goto fail; 757c7f5dba7SAnatoly Burakov } 758c7f5dba7SAnatoly Burakov /* lock memory if it's not huge pages */ 759c7f5dba7SAnatoly Burakov if (!huge) 760c7f5dba7SAnatoly Burakov mlock(addr, mem_sz); 761c7f5dba7SAnatoly Burakov 762c7f5dba7SAnatoly Burakov /* populate IOVA addresses */ 763c7f5dba7SAnatoly Burakov for (cur_page = 0; cur_page < n_pages; cur_page++) { 764c7f5dba7SAnatoly Burakov rte_iova_t iova; 765c7f5dba7SAnatoly Burakov size_t offset; 766c7f5dba7SAnatoly Burakov void *cur; 767c7f5dba7SAnatoly Burakov 768c7f5dba7SAnatoly Burakov offset = cur_pgsz * cur_page; 769c7f5dba7SAnatoly Burakov cur = RTE_PTR_ADD(addr, offset); 770c7f5dba7SAnatoly Burakov 771c7f5dba7SAnatoly Burakov /* touch the page before getting its IOVA */ 772c7f5dba7SAnatoly Burakov *(volatile char *)cur = 0; 773c7f5dba7SAnatoly Burakov 774c7f5dba7SAnatoly Burakov iova = rte_mem_virt2iova(cur); 775c7f5dba7SAnatoly Burakov 776c7f5dba7SAnatoly Burakov iovas[cur_page] = iova; 777c7f5dba7SAnatoly Burakov } 778c7f5dba7SAnatoly Burakov 779c7f5dba7SAnatoly Burakov break; 780c7f5dba7SAnatoly Burakov } 781c7f5dba7SAnatoly Burakov /* if we couldn't allocate anything */ 782c7f5dba7SAnatoly Burakov if (iovas == NULL) 783c7f5dba7SAnatoly Burakov return -1; 784c7f5dba7SAnatoly Burakov 785c7f5dba7SAnatoly Burakov param->addr = addr; 786c7f5dba7SAnatoly Burakov param->len = mem_sz; 787c7f5dba7SAnatoly Burakov param->pgsz = cur_pgsz; 788c7f5dba7SAnatoly Burakov param->iova_table = iovas; 789c7f5dba7SAnatoly Burakov param->iova_table_len = n_pages; 790c7f5dba7SAnatoly Burakov 791c7f5dba7SAnatoly Burakov return 0; 792c7f5dba7SAnatoly Burakov fail: 793c7f5dba7SAnatoly Burakov if (iovas) 794c7f5dba7SAnatoly Burakov free(iovas); 795c7f5dba7SAnatoly Burakov if (addr) 796c7f5dba7SAnatoly Burakov munmap(addr, mem_sz); 797c7f5dba7SAnatoly Burakov 798c7f5dba7SAnatoly Burakov return -1; 799c7f5dba7SAnatoly Burakov } 800c7f5dba7SAnatoly Burakov 801c7f5dba7SAnatoly Burakov static int 802c7f5dba7SAnatoly Burakov setup_extmem(uint32_t nb_mbufs, uint32_t mbuf_sz, bool huge) 803c7f5dba7SAnatoly Burakov { 804c7f5dba7SAnatoly Burakov struct extmem_param param; 805c7f5dba7SAnatoly Burakov int socket_id, ret; 806c7f5dba7SAnatoly Burakov 807c7f5dba7SAnatoly Burakov memset(¶m, 0, sizeof(param)); 808c7f5dba7SAnatoly Burakov 809c7f5dba7SAnatoly Burakov /* check if our heap exists */ 810c7f5dba7SAnatoly Burakov socket_id = rte_malloc_heap_get_socket(EXTMEM_HEAP_NAME); 811c7f5dba7SAnatoly Burakov if (socket_id < 0) { 812c7f5dba7SAnatoly Burakov /* create our heap */ 813c7f5dba7SAnatoly Burakov ret = rte_malloc_heap_create(EXTMEM_HEAP_NAME); 814c7f5dba7SAnatoly Burakov if (ret < 0) { 815c7f5dba7SAnatoly Burakov TESTPMD_LOG(ERR, "Cannot create heap\n"); 816c7f5dba7SAnatoly Burakov return -1; 817c7f5dba7SAnatoly Burakov } 818c7f5dba7SAnatoly Burakov } 819c7f5dba7SAnatoly Burakov 820c7f5dba7SAnatoly Burakov ret = create_extmem(nb_mbufs, mbuf_sz, ¶m, huge); 821c7f5dba7SAnatoly Burakov if (ret < 0) { 822c7f5dba7SAnatoly Burakov TESTPMD_LOG(ERR, "Cannot create memory area\n"); 823c7f5dba7SAnatoly Burakov return -1; 824c7f5dba7SAnatoly Burakov } 825c7f5dba7SAnatoly Burakov 826c7f5dba7SAnatoly Burakov /* we now have a valid memory area, so add it to heap */ 827c7f5dba7SAnatoly Burakov ret = rte_malloc_heap_memory_add(EXTMEM_HEAP_NAME, 828c7f5dba7SAnatoly Burakov param.addr, param.len, param.iova_table, 829c7f5dba7SAnatoly Burakov param.iova_table_len, param.pgsz); 830c7f5dba7SAnatoly Burakov 831c7f5dba7SAnatoly Burakov /* when using VFIO, memory is automatically mapped for DMA by EAL */ 832c7f5dba7SAnatoly Burakov 833c7f5dba7SAnatoly Burakov /* not needed any more */ 834c7f5dba7SAnatoly Burakov free(param.iova_table); 835c7f5dba7SAnatoly Burakov 836c7f5dba7SAnatoly Burakov if (ret < 0) { 837c7f5dba7SAnatoly Burakov TESTPMD_LOG(ERR, "Cannot add memory to heap\n"); 838c7f5dba7SAnatoly Burakov munmap(param.addr, param.len); 839c7f5dba7SAnatoly Burakov return -1; 840c7f5dba7SAnatoly Burakov } 841c7f5dba7SAnatoly Burakov 842c7f5dba7SAnatoly Burakov /* success */ 843c7f5dba7SAnatoly Burakov 844c7f5dba7SAnatoly Burakov TESTPMD_LOG(DEBUG, "Allocated %zuMB of external memory\n", 845c7f5dba7SAnatoly Burakov param.len >> 20); 846c7f5dba7SAnatoly Burakov 847c7f5dba7SAnatoly Burakov return 0; 848c7f5dba7SAnatoly Burakov } 8493a0968c8SShahaf Shuler static void 8503a0968c8SShahaf Shuler dma_unmap_cb(struct rte_mempool *mp __rte_unused, void *opaque __rte_unused, 8513a0968c8SShahaf Shuler struct rte_mempool_memhdr *memhdr, unsigned mem_idx __rte_unused) 8523a0968c8SShahaf Shuler { 8533a0968c8SShahaf Shuler uint16_t pid = 0; 8543a0968c8SShahaf Shuler int ret; 8553a0968c8SShahaf Shuler 8563a0968c8SShahaf Shuler RTE_ETH_FOREACH_DEV(pid) { 8573a0968c8SShahaf Shuler struct rte_eth_dev *dev = 8583a0968c8SShahaf Shuler &rte_eth_devices[pid]; 8593a0968c8SShahaf Shuler 8603a0968c8SShahaf Shuler ret = rte_dev_dma_unmap(dev->device, memhdr->addr, 0, 8613a0968c8SShahaf Shuler memhdr->len); 8623a0968c8SShahaf Shuler if (ret) { 8633a0968c8SShahaf Shuler TESTPMD_LOG(DEBUG, 8643a0968c8SShahaf Shuler "unable to DMA unmap addr 0x%p " 8653a0968c8SShahaf Shuler "for device %s\n", 8663a0968c8SShahaf Shuler memhdr->addr, dev->data->name); 8673a0968c8SShahaf Shuler } 8683a0968c8SShahaf Shuler } 8693a0968c8SShahaf Shuler ret = rte_extmem_unregister(memhdr->addr, memhdr->len); 8703a0968c8SShahaf Shuler if (ret) { 8713a0968c8SShahaf Shuler TESTPMD_LOG(DEBUG, 8723a0968c8SShahaf Shuler "unable to un-register addr 0x%p\n", memhdr->addr); 8733a0968c8SShahaf Shuler } 8743a0968c8SShahaf Shuler } 8753a0968c8SShahaf Shuler 8763a0968c8SShahaf Shuler static void 8773a0968c8SShahaf Shuler dma_map_cb(struct rte_mempool *mp __rte_unused, void *opaque __rte_unused, 8783a0968c8SShahaf Shuler struct rte_mempool_memhdr *memhdr, unsigned mem_idx __rte_unused) 8793a0968c8SShahaf Shuler { 8803a0968c8SShahaf Shuler uint16_t pid = 0; 8813a0968c8SShahaf Shuler size_t page_size = sysconf(_SC_PAGESIZE); 8823a0968c8SShahaf Shuler int ret; 8833a0968c8SShahaf Shuler 8843a0968c8SShahaf Shuler ret = rte_extmem_register(memhdr->addr, memhdr->len, NULL, 0, 8853a0968c8SShahaf Shuler page_size); 8863a0968c8SShahaf Shuler if (ret) { 8873a0968c8SShahaf Shuler TESTPMD_LOG(DEBUG, 8883a0968c8SShahaf Shuler "unable to register addr 0x%p\n", memhdr->addr); 8893a0968c8SShahaf Shuler return; 8903a0968c8SShahaf Shuler } 8913a0968c8SShahaf Shuler RTE_ETH_FOREACH_DEV(pid) { 8923a0968c8SShahaf Shuler struct rte_eth_dev *dev = 8933a0968c8SShahaf Shuler &rte_eth_devices[pid]; 8943a0968c8SShahaf Shuler 8953a0968c8SShahaf Shuler ret = rte_dev_dma_map(dev->device, memhdr->addr, 0, 8963a0968c8SShahaf Shuler memhdr->len); 8973a0968c8SShahaf Shuler if (ret) { 8983a0968c8SShahaf Shuler TESTPMD_LOG(DEBUG, 8993a0968c8SShahaf Shuler "unable to DMA map addr 0x%p " 9003a0968c8SShahaf Shuler "for device %s\n", 9013a0968c8SShahaf Shuler memhdr->addr, dev->data->name); 9023a0968c8SShahaf Shuler } 9033a0968c8SShahaf Shuler } 9043a0968c8SShahaf Shuler } 905c7f5dba7SAnatoly Burakov 90672512e18SViacheslav Ovsiienko static unsigned int 90772512e18SViacheslav Ovsiienko setup_extbuf(uint32_t nb_mbufs, uint16_t mbuf_sz, unsigned int socket_id, 90872512e18SViacheslav Ovsiienko char *pool_name, struct rte_pktmbuf_extmem **ext_mem) 90972512e18SViacheslav Ovsiienko { 91072512e18SViacheslav Ovsiienko struct rte_pktmbuf_extmem *xmem; 91172512e18SViacheslav Ovsiienko unsigned int ext_num, zone_num, elt_num; 91272512e18SViacheslav Ovsiienko uint16_t elt_size; 91372512e18SViacheslav Ovsiienko 91472512e18SViacheslav Ovsiienko elt_size = RTE_ALIGN_CEIL(mbuf_sz, RTE_CACHE_LINE_SIZE); 91572512e18SViacheslav Ovsiienko elt_num = EXTBUF_ZONE_SIZE / elt_size; 91672512e18SViacheslav Ovsiienko zone_num = (nb_mbufs + elt_num - 1) / elt_num; 91772512e18SViacheslav Ovsiienko 91872512e18SViacheslav Ovsiienko xmem = malloc(sizeof(struct rte_pktmbuf_extmem) * zone_num); 91972512e18SViacheslav Ovsiienko if (xmem == NULL) { 92072512e18SViacheslav Ovsiienko TESTPMD_LOG(ERR, "Cannot allocate memory for " 92172512e18SViacheslav Ovsiienko "external buffer descriptors\n"); 92272512e18SViacheslav Ovsiienko *ext_mem = NULL; 92372512e18SViacheslav Ovsiienko return 0; 92472512e18SViacheslav Ovsiienko } 92572512e18SViacheslav Ovsiienko for (ext_num = 0; ext_num < zone_num; ext_num++) { 92672512e18SViacheslav Ovsiienko struct rte_pktmbuf_extmem *xseg = xmem + ext_num; 92772512e18SViacheslav Ovsiienko const struct rte_memzone *mz; 92872512e18SViacheslav Ovsiienko char mz_name[RTE_MEMZONE_NAMESIZE]; 92972512e18SViacheslav Ovsiienko int ret; 93072512e18SViacheslav Ovsiienko 93172512e18SViacheslav Ovsiienko ret = snprintf(mz_name, sizeof(mz_name), 93272512e18SViacheslav Ovsiienko RTE_MEMPOOL_MZ_FORMAT "_xb_%u", pool_name, ext_num); 93372512e18SViacheslav Ovsiienko if (ret < 0 || ret >= (int)sizeof(mz_name)) { 93472512e18SViacheslav Ovsiienko errno = ENAMETOOLONG; 93572512e18SViacheslav Ovsiienko ext_num = 0; 93672512e18SViacheslav Ovsiienko break; 93772512e18SViacheslav Ovsiienko } 93872512e18SViacheslav Ovsiienko mz = rte_memzone_reserve_aligned(mz_name, EXTBUF_ZONE_SIZE, 93972512e18SViacheslav Ovsiienko socket_id, 94072512e18SViacheslav Ovsiienko RTE_MEMZONE_IOVA_CONTIG | 94172512e18SViacheslav Ovsiienko RTE_MEMZONE_1GB | 94272512e18SViacheslav Ovsiienko RTE_MEMZONE_SIZE_HINT_ONLY, 94372512e18SViacheslav Ovsiienko EXTBUF_ZONE_SIZE); 94472512e18SViacheslav Ovsiienko if (mz == NULL) { 94572512e18SViacheslav Ovsiienko /* 94672512e18SViacheslav Ovsiienko * The caller exits on external buffer creation 94772512e18SViacheslav Ovsiienko * error, so there is no need to free memzones. 94872512e18SViacheslav Ovsiienko */ 94972512e18SViacheslav Ovsiienko errno = ENOMEM; 95072512e18SViacheslav Ovsiienko ext_num = 0; 95172512e18SViacheslav Ovsiienko break; 95272512e18SViacheslav Ovsiienko } 95372512e18SViacheslav Ovsiienko xseg->buf_ptr = mz->addr; 95472512e18SViacheslav Ovsiienko xseg->buf_iova = mz->iova; 95572512e18SViacheslav Ovsiienko xseg->buf_len = EXTBUF_ZONE_SIZE; 95672512e18SViacheslav Ovsiienko xseg->elt_size = elt_size; 95772512e18SViacheslav Ovsiienko } 95872512e18SViacheslav Ovsiienko if (ext_num == 0 && xmem != NULL) { 95972512e18SViacheslav Ovsiienko free(xmem); 96072512e18SViacheslav Ovsiienko xmem = NULL; 96172512e18SViacheslav Ovsiienko } 96272512e18SViacheslav Ovsiienko *ext_mem = xmem; 96372512e18SViacheslav Ovsiienko return ext_num; 96472512e18SViacheslav Ovsiienko } 96572512e18SViacheslav Ovsiienko 966af75078fSIntel /* 967af75078fSIntel * Configuration initialisation done once at init time. 968af75078fSIntel */ 969401b744dSShahaf Shuler static struct rte_mempool * 970af75078fSIntel mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf, 97126cbb419SViacheslav Ovsiienko unsigned int socket_id, uint16_t size_idx) 972af75078fSIntel { 973af75078fSIntel char pool_name[RTE_MEMPOOL_NAMESIZE]; 974bece7b6cSChristian Ehrhardt struct rte_mempool *rte_mp = NULL; 975af75078fSIntel uint32_t mb_size; 976af75078fSIntel 977dfb03bbeSOlivier Matz mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size; 97826cbb419SViacheslav Ovsiienko mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name), size_idx); 979148f963fSBruce Richardson 980285fd101SOlivier Matz TESTPMD_LOG(INFO, 981d1eb542eSOlivier Matz "create a new mbuf pool <%s>: n=%u, size=%u, socket=%u\n", 982d1eb542eSOlivier Matz pool_name, nb_mbuf, mbuf_seg_size, socket_id); 983d1eb542eSOlivier Matz 984c7f5dba7SAnatoly Burakov switch (mp_alloc_type) { 985c7f5dba7SAnatoly Burakov case MP_ALLOC_NATIVE: 986c7f5dba7SAnatoly Burakov { 987c7f5dba7SAnatoly Burakov /* wrapper to rte_mempool_create() */ 988c7f5dba7SAnatoly Burakov TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n", 989c7f5dba7SAnatoly Burakov rte_mbuf_best_mempool_ops()); 990c7f5dba7SAnatoly Burakov rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf, 991c7f5dba7SAnatoly Burakov mb_mempool_cache, 0, mbuf_seg_size, socket_id); 992c7f5dba7SAnatoly Burakov break; 993c7f5dba7SAnatoly Burakov } 994c7f5dba7SAnatoly Burakov case MP_ALLOC_ANON: 995c7f5dba7SAnatoly Burakov { 996b19a0c75SOlivier Matz rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf, 997c7f5dba7SAnatoly Burakov mb_size, (unsigned int) mb_mempool_cache, 998148f963fSBruce Richardson sizeof(struct rte_pktmbuf_pool_private), 99959fcf854SShahaf Shuler socket_id, mempool_flags); 100024427bb9SOlivier Matz if (rte_mp == NULL) 100124427bb9SOlivier Matz goto err; 1002b19a0c75SOlivier Matz 1003b19a0c75SOlivier Matz if (rte_mempool_populate_anon(rte_mp) == 0) { 1004b19a0c75SOlivier Matz rte_mempool_free(rte_mp); 1005b19a0c75SOlivier Matz rte_mp = NULL; 100624427bb9SOlivier Matz goto err; 1007b19a0c75SOlivier Matz } 1008b19a0c75SOlivier Matz rte_pktmbuf_pool_init(rte_mp, NULL); 1009b19a0c75SOlivier Matz rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL); 10103a0968c8SShahaf Shuler rte_mempool_mem_iter(rte_mp, dma_map_cb, NULL); 1011c7f5dba7SAnatoly Burakov break; 1012c7f5dba7SAnatoly Burakov } 1013c7f5dba7SAnatoly Burakov case MP_ALLOC_XMEM: 1014c7f5dba7SAnatoly Burakov case MP_ALLOC_XMEM_HUGE: 1015c7f5dba7SAnatoly Burakov { 1016c7f5dba7SAnatoly Burakov int heap_socket; 1017c7f5dba7SAnatoly Burakov bool huge = mp_alloc_type == MP_ALLOC_XMEM_HUGE; 1018c7f5dba7SAnatoly Burakov 1019c7f5dba7SAnatoly Burakov if (setup_extmem(nb_mbuf, mbuf_seg_size, huge) < 0) 1020c7f5dba7SAnatoly Burakov rte_exit(EXIT_FAILURE, "Could not create external memory\n"); 1021c7f5dba7SAnatoly Burakov 1022c7f5dba7SAnatoly Burakov heap_socket = 1023c7f5dba7SAnatoly Burakov rte_malloc_heap_get_socket(EXTMEM_HEAP_NAME); 1024c7f5dba7SAnatoly Burakov if (heap_socket < 0) 1025c7f5dba7SAnatoly Burakov rte_exit(EXIT_FAILURE, "Could not get external memory socket ID\n"); 1026c7f5dba7SAnatoly Burakov 10270e798567SPavan Nikhilesh TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n", 10280e798567SPavan Nikhilesh rte_mbuf_best_mempool_ops()); 1029ea0c20eaSOlivier Matz rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf, 1030c7f5dba7SAnatoly Burakov mb_mempool_cache, 0, mbuf_seg_size, 1031c7f5dba7SAnatoly Burakov heap_socket); 1032c7f5dba7SAnatoly Burakov break; 1033c7f5dba7SAnatoly Burakov } 103472512e18SViacheslav Ovsiienko case MP_ALLOC_XBUF: 103572512e18SViacheslav Ovsiienko { 103672512e18SViacheslav Ovsiienko struct rte_pktmbuf_extmem *ext_mem; 103772512e18SViacheslav Ovsiienko unsigned int ext_num; 103872512e18SViacheslav Ovsiienko 103972512e18SViacheslav Ovsiienko ext_num = setup_extbuf(nb_mbuf, mbuf_seg_size, 104072512e18SViacheslav Ovsiienko socket_id, pool_name, &ext_mem); 104172512e18SViacheslav Ovsiienko if (ext_num == 0) 104272512e18SViacheslav Ovsiienko rte_exit(EXIT_FAILURE, 104372512e18SViacheslav Ovsiienko "Can't create pinned data buffers\n"); 104472512e18SViacheslav Ovsiienko 104572512e18SViacheslav Ovsiienko TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n", 104672512e18SViacheslav Ovsiienko rte_mbuf_best_mempool_ops()); 104772512e18SViacheslav Ovsiienko rte_mp = rte_pktmbuf_pool_create_extbuf 104872512e18SViacheslav Ovsiienko (pool_name, nb_mbuf, mb_mempool_cache, 104972512e18SViacheslav Ovsiienko 0, mbuf_seg_size, socket_id, 105072512e18SViacheslav Ovsiienko ext_mem, ext_num); 105172512e18SViacheslav Ovsiienko free(ext_mem); 105272512e18SViacheslav Ovsiienko break; 105372512e18SViacheslav Ovsiienko } 1054c7f5dba7SAnatoly Burakov default: 1055c7f5dba7SAnatoly Burakov { 1056c7f5dba7SAnatoly Burakov rte_exit(EXIT_FAILURE, "Invalid mempool creation mode\n"); 1057c7f5dba7SAnatoly Burakov } 1058bece7b6cSChristian Ehrhardt } 1059148f963fSBruce Richardson 106024427bb9SOlivier Matz err: 1061af75078fSIntel if (rte_mp == NULL) { 1062d1eb542eSOlivier Matz rte_exit(EXIT_FAILURE, 1063d1eb542eSOlivier Matz "Creation of mbuf pool for socket %u failed: %s\n", 1064d1eb542eSOlivier Matz socket_id, rte_strerror(rte_errno)); 1065148f963fSBruce Richardson } else if (verbose_level > 0) { 1066591a9d79SStephen Hemminger rte_mempool_dump(stdout, rte_mp); 1067af75078fSIntel } 1068401b744dSShahaf Shuler return rte_mp; 1069af75078fSIntel } 1070af75078fSIntel 107120a0286fSLiu Xiaofeng /* 107220a0286fSLiu Xiaofeng * Check given socket id is valid or not with NUMA mode, 107320a0286fSLiu Xiaofeng * if valid, return 0, else return -1 107420a0286fSLiu Xiaofeng */ 107520a0286fSLiu Xiaofeng static int 107620a0286fSLiu Xiaofeng check_socket_id(const unsigned int socket_id) 107720a0286fSLiu Xiaofeng { 107820a0286fSLiu Xiaofeng static int warning_once = 0; 107920a0286fSLiu Xiaofeng 1080c9cafcc8SShahaf Shuler if (new_socket_id(socket_id)) { 108120a0286fSLiu Xiaofeng if (!warning_once && numa_support) 108220a0286fSLiu Xiaofeng printf("Warning: NUMA should be configured manually by" 108320a0286fSLiu Xiaofeng " using --port-numa-config and" 108420a0286fSLiu Xiaofeng " --ring-numa-config parameters along with" 108520a0286fSLiu Xiaofeng " --numa.\n"); 108620a0286fSLiu Xiaofeng warning_once = 1; 108720a0286fSLiu Xiaofeng return -1; 108820a0286fSLiu Xiaofeng } 108920a0286fSLiu Xiaofeng return 0; 109020a0286fSLiu Xiaofeng } 109120a0286fSLiu Xiaofeng 10923f7311baSWei Dai /* 10933f7311baSWei Dai * Get the allowed maximum number of RX queues. 10943f7311baSWei Dai * *pid return the port id which has minimal value of 10953f7311baSWei Dai * max_rx_queues in all ports. 10963f7311baSWei Dai */ 10973f7311baSWei Dai queueid_t 10983f7311baSWei Dai get_allowed_max_nb_rxq(portid_t *pid) 10993f7311baSWei Dai { 11009e6b36c3SDavid Marchand queueid_t allowed_max_rxq = RTE_MAX_QUEUES_PER_PORT; 11016f51deb9SIvan Ilchenko bool max_rxq_valid = false; 11023f7311baSWei Dai portid_t pi; 11033f7311baSWei Dai struct rte_eth_dev_info dev_info; 11043f7311baSWei Dai 11053f7311baSWei Dai RTE_ETH_FOREACH_DEV(pi) { 11066f51deb9SIvan Ilchenko if (eth_dev_info_get_print_err(pi, &dev_info) != 0) 11076f51deb9SIvan Ilchenko continue; 11086f51deb9SIvan Ilchenko 11096f51deb9SIvan Ilchenko max_rxq_valid = true; 11103f7311baSWei Dai if (dev_info.max_rx_queues < allowed_max_rxq) { 11113f7311baSWei Dai allowed_max_rxq = dev_info.max_rx_queues; 11123f7311baSWei Dai *pid = pi; 11133f7311baSWei Dai } 11143f7311baSWei Dai } 11156f51deb9SIvan Ilchenko return max_rxq_valid ? allowed_max_rxq : 0; 11163f7311baSWei Dai } 11173f7311baSWei Dai 11183f7311baSWei Dai /* 11193f7311baSWei Dai * Check input rxq is valid or not. 11203f7311baSWei Dai * If input rxq is not greater than any of maximum number 11213f7311baSWei Dai * of RX queues of all ports, it is valid. 11223f7311baSWei Dai * if valid, return 0, else return -1 11233f7311baSWei Dai */ 11243f7311baSWei Dai int 11253f7311baSWei Dai check_nb_rxq(queueid_t rxq) 11263f7311baSWei Dai { 11273f7311baSWei Dai queueid_t allowed_max_rxq; 11283f7311baSWei Dai portid_t pid = 0; 11293f7311baSWei Dai 11303f7311baSWei Dai allowed_max_rxq = get_allowed_max_nb_rxq(&pid); 11313f7311baSWei Dai if (rxq > allowed_max_rxq) { 11323f7311baSWei Dai printf("Fail: input rxq (%u) can't be greater " 11333f7311baSWei Dai "than max_rx_queues (%u) of port %u\n", 11343f7311baSWei Dai rxq, 11353f7311baSWei Dai allowed_max_rxq, 11363f7311baSWei Dai pid); 11373f7311baSWei Dai return -1; 11383f7311baSWei Dai } 11393f7311baSWei Dai return 0; 11403f7311baSWei Dai } 11413f7311baSWei Dai 114236db4f6cSWei Dai /* 114336db4f6cSWei Dai * Get the allowed maximum number of TX queues. 114436db4f6cSWei Dai * *pid return the port id which has minimal value of 114536db4f6cSWei Dai * max_tx_queues in all ports. 114636db4f6cSWei Dai */ 114736db4f6cSWei Dai queueid_t 114836db4f6cSWei Dai get_allowed_max_nb_txq(portid_t *pid) 114936db4f6cSWei Dai { 11509e6b36c3SDavid Marchand queueid_t allowed_max_txq = RTE_MAX_QUEUES_PER_PORT; 11516f51deb9SIvan Ilchenko bool max_txq_valid = false; 115236db4f6cSWei Dai portid_t pi; 115336db4f6cSWei Dai struct rte_eth_dev_info dev_info; 115436db4f6cSWei Dai 115536db4f6cSWei Dai RTE_ETH_FOREACH_DEV(pi) { 11566f51deb9SIvan Ilchenko if (eth_dev_info_get_print_err(pi, &dev_info) != 0) 11576f51deb9SIvan Ilchenko continue; 11586f51deb9SIvan Ilchenko 11596f51deb9SIvan Ilchenko max_txq_valid = true; 116036db4f6cSWei Dai if (dev_info.max_tx_queues < allowed_max_txq) { 116136db4f6cSWei Dai allowed_max_txq = dev_info.max_tx_queues; 116236db4f6cSWei Dai *pid = pi; 116336db4f6cSWei Dai } 116436db4f6cSWei Dai } 11656f51deb9SIvan Ilchenko return max_txq_valid ? allowed_max_txq : 0; 116636db4f6cSWei Dai } 116736db4f6cSWei Dai 116836db4f6cSWei Dai /* 116936db4f6cSWei Dai * Check input txq is valid or not. 117036db4f6cSWei Dai * If input txq is not greater than any of maximum number 117136db4f6cSWei Dai * of TX queues of all ports, it is valid. 117236db4f6cSWei Dai * if valid, return 0, else return -1 117336db4f6cSWei Dai */ 117436db4f6cSWei Dai int 117536db4f6cSWei Dai check_nb_txq(queueid_t txq) 117636db4f6cSWei Dai { 117736db4f6cSWei Dai queueid_t allowed_max_txq; 117836db4f6cSWei Dai portid_t pid = 0; 117936db4f6cSWei Dai 118036db4f6cSWei Dai allowed_max_txq = get_allowed_max_nb_txq(&pid); 118136db4f6cSWei Dai if (txq > allowed_max_txq) { 118236db4f6cSWei Dai printf("Fail: input txq (%u) can't be greater " 118336db4f6cSWei Dai "than max_tx_queues (%u) of port %u\n", 118436db4f6cSWei Dai txq, 118536db4f6cSWei Dai allowed_max_txq, 118636db4f6cSWei Dai pid); 118736db4f6cSWei Dai return -1; 118836db4f6cSWei Dai } 118936db4f6cSWei Dai return 0; 119036db4f6cSWei Dai } 119136db4f6cSWei Dai 11921c69df45SOri Kam /* 119399e040d3SLijun Ou * Get the allowed maximum number of RXDs of every rx queue. 119499e040d3SLijun Ou * *pid return the port id which has minimal value of 119599e040d3SLijun Ou * max_rxd in all queues of all ports. 119699e040d3SLijun Ou */ 119799e040d3SLijun Ou static uint16_t 119899e040d3SLijun Ou get_allowed_max_nb_rxd(portid_t *pid) 119999e040d3SLijun Ou { 120099e040d3SLijun Ou uint16_t allowed_max_rxd = UINT16_MAX; 120199e040d3SLijun Ou portid_t pi; 120299e040d3SLijun Ou struct rte_eth_dev_info dev_info; 120399e040d3SLijun Ou 120499e040d3SLijun Ou RTE_ETH_FOREACH_DEV(pi) { 120599e040d3SLijun Ou if (eth_dev_info_get_print_err(pi, &dev_info) != 0) 120699e040d3SLijun Ou continue; 120799e040d3SLijun Ou 120899e040d3SLijun Ou if (dev_info.rx_desc_lim.nb_max < allowed_max_rxd) { 120999e040d3SLijun Ou allowed_max_rxd = dev_info.rx_desc_lim.nb_max; 121099e040d3SLijun Ou *pid = pi; 121199e040d3SLijun Ou } 121299e040d3SLijun Ou } 121399e040d3SLijun Ou return allowed_max_rxd; 121499e040d3SLijun Ou } 121599e040d3SLijun Ou 121699e040d3SLijun Ou /* 121799e040d3SLijun Ou * Get the allowed minimal number of RXDs of every rx queue. 121899e040d3SLijun Ou * *pid return the port id which has minimal value of 121999e040d3SLijun Ou * min_rxd in all queues of all ports. 122099e040d3SLijun Ou */ 122199e040d3SLijun Ou static uint16_t 122299e040d3SLijun Ou get_allowed_min_nb_rxd(portid_t *pid) 122399e040d3SLijun Ou { 122499e040d3SLijun Ou uint16_t allowed_min_rxd = 0; 122599e040d3SLijun Ou portid_t pi; 122699e040d3SLijun Ou struct rte_eth_dev_info dev_info; 122799e040d3SLijun Ou 122899e040d3SLijun Ou RTE_ETH_FOREACH_DEV(pi) { 122999e040d3SLijun Ou if (eth_dev_info_get_print_err(pi, &dev_info) != 0) 123099e040d3SLijun Ou continue; 123199e040d3SLijun Ou 123299e040d3SLijun Ou if (dev_info.rx_desc_lim.nb_min > allowed_min_rxd) { 123399e040d3SLijun Ou allowed_min_rxd = dev_info.rx_desc_lim.nb_min; 123499e040d3SLijun Ou *pid = pi; 123599e040d3SLijun Ou } 123699e040d3SLijun Ou } 123799e040d3SLijun Ou 123899e040d3SLijun Ou return allowed_min_rxd; 123999e040d3SLijun Ou } 124099e040d3SLijun Ou 124199e040d3SLijun Ou /* 124299e040d3SLijun Ou * Check input rxd is valid or not. 124399e040d3SLijun Ou * If input rxd is not greater than any of maximum number 124499e040d3SLijun Ou * of RXDs of every Rx queues and is not less than any of 124599e040d3SLijun Ou * minimal number of RXDs of every Rx queues, it is valid. 124699e040d3SLijun Ou * if valid, return 0, else return -1 124799e040d3SLijun Ou */ 124899e040d3SLijun Ou int 124999e040d3SLijun Ou check_nb_rxd(queueid_t rxd) 125099e040d3SLijun Ou { 125199e040d3SLijun Ou uint16_t allowed_max_rxd; 125299e040d3SLijun Ou uint16_t allowed_min_rxd; 125399e040d3SLijun Ou portid_t pid = 0; 125499e040d3SLijun Ou 125599e040d3SLijun Ou allowed_max_rxd = get_allowed_max_nb_rxd(&pid); 125699e040d3SLijun Ou if (rxd > allowed_max_rxd) { 125799e040d3SLijun Ou printf("Fail: input rxd (%u) can't be greater " 125899e040d3SLijun Ou "than max_rxds (%u) of port %u\n", 125999e040d3SLijun Ou rxd, 126099e040d3SLijun Ou allowed_max_rxd, 126199e040d3SLijun Ou pid); 126299e040d3SLijun Ou return -1; 126399e040d3SLijun Ou } 126499e040d3SLijun Ou 126599e040d3SLijun Ou allowed_min_rxd = get_allowed_min_nb_rxd(&pid); 126699e040d3SLijun Ou if (rxd < allowed_min_rxd) { 126799e040d3SLijun Ou printf("Fail: input rxd (%u) can't be less " 126899e040d3SLijun Ou "than min_rxds (%u) of port %u\n", 126999e040d3SLijun Ou rxd, 127099e040d3SLijun Ou allowed_min_rxd, 127199e040d3SLijun Ou pid); 127299e040d3SLijun Ou return -1; 127399e040d3SLijun Ou } 127499e040d3SLijun Ou 127599e040d3SLijun Ou return 0; 127699e040d3SLijun Ou } 127799e040d3SLijun Ou 127899e040d3SLijun Ou /* 127999e040d3SLijun Ou * Get the allowed maximum number of TXDs of every rx queues. 128099e040d3SLijun Ou * *pid return the port id which has minimal value of 128199e040d3SLijun Ou * max_txd in every tx queue. 128299e040d3SLijun Ou */ 128399e040d3SLijun Ou static uint16_t 128499e040d3SLijun Ou get_allowed_max_nb_txd(portid_t *pid) 128599e040d3SLijun Ou { 128699e040d3SLijun Ou uint16_t allowed_max_txd = UINT16_MAX; 128799e040d3SLijun Ou portid_t pi; 128899e040d3SLijun Ou struct rte_eth_dev_info dev_info; 128999e040d3SLijun Ou 129099e040d3SLijun Ou RTE_ETH_FOREACH_DEV(pi) { 129199e040d3SLijun Ou if (eth_dev_info_get_print_err(pi, &dev_info) != 0) 129299e040d3SLijun Ou continue; 129399e040d3SLijun Ou 129499e040d3SLijun Ou if (dev_info.tx_desc_lim.nb_max < allowed_max_txd) { 129599e040d3SLijun Ou allowed_max_txd = dev_info.tx_desc_lim.nb_max; 129699e040d3SLijun Ou *pid = pi; 129799e040d3SLijun Ou } 129899e040d3SLijun Ou } 129999e040d3SLijun Ou return allowed_max_txd; 130099e040d3SLijun Ou } 130199e040d3SLijun Ou 130299e040d3SLijun Ou /* 130399e040d3SLijun Ou * Get the allowed maximum number of TXDs of every tx queues. 130499e040d3SLijun Ou * *pid return the port id which has minimal value of 130599e040d3SLijun Ou * min_txd in every tx queue. 130699e040d3SLijun Ou */ 130799e040d3SLijun Ou static uint16_t 130899e040d3SLijun Ou get_allowed_min_nb_txd(portid_t *pid) 130999e040d3SLijun Ou { 131099e040d3SLijun Ou uint16_t allowed_min_txd = 0; 131199e040d3SLijun Ou portid_t pi; 131299e040d3SLijun Ou struct rte_eth_dev_info dev_info; 131399e040d3SLijun Ou 131499e040d3SLijun Ou RTE_ETH_FOREACH_DEV(pi) { 131599e040d3SLijun Ou if (eth_dev_info_get_print_err(pi, &dev_info) != 0) 131699e040d3SLijun Ou continue; 131799e040d3SLijun Ou 131899e040d3SLijun Ou if (dev_info.tx_desc_lim.nb_min > allowed_min_txd) { 131999e040d3SLijun Ou allowed_min_txd = dev_info.tx_desc_lim.nb_min; 132099e040d3SLijun Ou *pid = pi; 132199e040d3SLijun Ou } 132299e040d3SLijun Ou } 132399e040d3SLijun Ou 132499e040d3SLijun Ou return allowed_min_txd; 132599e040d3SLijun Ou } 132699e040d3SLijun Ou 132799e040d3SLijun Ou /* 132899e040d3SLijun Ou * Check input txd is valid or not. 132999e040d3SLijun Ou * If input txd is not greater than any of maximum number 133099e040d3SLijun Ou * of TXDs of every Rx queues, it is valid. 133199e040d3SLijun Ou * if valid, return 0, else return -1 133299e040d3SLijun Ou */ 133399e040d3SLijun Ou int 133499e040d3SLijun Ou check_nb_txd(queueid_t txd) 133599e040d3SLijun Ou { 133699e040d3SLijun Ou uint16_t allowed_max_txd; 133799e040d3SLijun Ou uint16_t allowed_min_txd; 133899e040d3SLijun Ou portid_t pid = 0; 133999e040d3SLijun Ou 134099e040d3SLijun Ou allowed_max_txd = get_allowed_max_nb_txd(&pid); 134199e040d3SLijun Ou if (txd > allowed_max_txd) { 134299e040d3SLijun Ou printf("Fail: input txd (%u) can't be greater " 134399e040d3SLijun Ou "than max_txds (%u) of port %u\n", 134499e040d3SLijun Ou txd, 134599e040d3SLijun Ou allowed_max_txd, 134699e040d3SLijun Ou pid); 134799e040d3SLijun Ou return -1; 134899e040d3SLijun Ou } 134999e040d3SLijun Ou 135099e040d3SLijun Ou allowed_min_txd = get_allowed_min_nb_txd(&pid); 135199e040d3SLijun Ou if (txd < allowed_min_txd) { 135299e040d3SLijun Ou printf("Fail: input txd (%u) can't be less " 135399e040d3SLijun Ou "than min_txds (%u) of port %u\n", 135499e040d3SLijun Ou txd, 135599e040d3SLijun Ou allowed_min_txd, 135699e040d3SLijun Ou pid); 135799e040d3SLijun Ou return -1; 135899e040d3SLijun Ou } 135999e040d3SLijun Ou return 0; 136099e040d3SLijun Ou } 136199e040d3SLijun Ou 136299e040d3SLijun Ou 136399e040d3SLijun Ou /* 13641c69df45SOri Kam * Get the allowed maximum number of hairpin queues. 13651c69df45SOri Kam * *pid return the port id which has minimal value of 13661c69df45SOri Kam * max_hairpin_queues in all ports. 13671c69df45SOri Kam */ 13681c69df45SOri Kam queueid_t 13691c69df45SOri Kam get_allowed_max_nb_hairpinq(portid_t *pid) 13701c69df45SOri Kam { 13719e6b36c3SDavid Marchand queueid_t allowed_max_hairpinq = RTE_MAX_QUEUES_PER_PORT; 13721c69df45SOri Kam portid_t pi; 13731c69df45SOri Kam struct rte_eth_hairpin_cap cap; 13741c69df45SOri Kam 13751c69df45SOri Kam RTE_ETH_FOREACH_DEV(pi) { 13761c69df45SOri Kam if (rte_eth_dev_hairpin_capability_get(pi, &cap) != 0) { 13771c69df45SOri Kam *pid = pi; 13781c69df45SOri Kam return 0; 13791c69df45SOri Kam } 13801c69df45SOri Kam if (cap.max_nb_queues < allowed_max_hairpinq) { 13811c69df45SOri Kam allowed_max_hairpinq = cap.max_nb_queues; 13821c69df45SOri Kam *pid = pi; 13831c69df45SOri Kam } 13841c69df45SOri Kam } 13851c69df45SOri Kam return allowed_max_hairpinq; 13861c69df45SOri Kam } 13871c69df45SOri Kam 13881c69df45SOri Kam /* 13891c69df45SOri Kam * Check input hairpin is valid or not. 13901c69df45SOri Kam * If input hairpin is not greater than any of maximum number 13911c69df45SOri Kam * of hairpin queues of all ports, it is valid. 13921c69df45SOri Kam * if valid, return 0, else return -1 13931c69df45SOri Kam */ 13941c69df45SOri Kam int 13951c69df45SOri Kam check_nb_hairpinq(queueid_t hairpinq) 13961c69df45SOri Kam { 13971c69df45SOri Kam queueid_t allowed_max_hairpinq; 13981c69df45SOri Kam portid_t pid = 0; 13991c69df45SOri Kam 14001c69df45SOri Kam allowed_max_hairpinq = get_allowed_max_nb_hairpinq(&pid); 14011c69df45SOri Kam if (hairpinq > allowed_max_hairpinq) { 14021c69df45SOri Kam printf("Fail: input hairpin (%u) can't be greater " 14031c69df45SOri Kam "than max_hairpin_queues (%u) of port %u\n", 14041c69df45SOri Kam hairpinq, allowed_max_hairpinq, pid); 14051c69df45SOri Kam return -1; 14061c69df45SOri Kam } 14071c69df45SOri Kam return 0; 14081c69df45SOri Kam } 14091c69df45SOri Kam 1410af75078fSIntel static void 1411af75078fSIntel init_config(void) 1412af75078fSIntel { 1413ce8d5614SIntel portid_t pid; 1414af75078fSIntel struct rte_port *port; 1415af75078fSIntel struct rte_mempool *mbp; 1416af75078fSIntel unsigned int nb_mbuf_per_pool; 1417af75078fSIntel lcoreid_t lc_id; 14187acf894dSStephen Hurd uint8_t port_per_socket[RTE_MAX_NUMA_NODES]; 1419b7091f1dSJiayu Hu struct rte_gro_param gro_param; 142052f38a20SJiayu Hu uint32_t gso_types; 142133f9630fSSunil Kumar Kori uint16_t data_size; 142233f9630fSSunil Kumar Kori bool warning = 0; 1423c73a9071SWei Dai int k; 14246f51deb9SIvan Ilchenko int ret; 1425af75078fSIntel 14267acf894dSStephen Hurd memset(port_per_socket,0,RTE_MAX_NUMA_NODES); 1427487f9a59SYulong Pei 1428af75078fSIntel /* Configuration of logical cores. */ 1429af75078fSIntel fwd_lcores = rte_zmalloc("testpmd: fwd_lcores", 1430af75078fSIntel sizeof(struct fwd_lcore *) * nb_lcores, 1431fdf20fa7SSergio Gonzalez Monroy RTE_CACHE_LINE_SIZE); 1432af75078fSIntel if (fwd_lcores == NULL) { 1433ce8d5614SIntel rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) " 1434ce8d5614SIntel "failed\n", nb_lcores); 1435af75078fSIntel } 1436af75078fSIntel for (lc_id = 0; lc_id < nb_lcores; lc_id++) { 1437af75078fSIntel fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore", 1438af75078fSIntel sizeof(struct fwd_lcore), 1439fdf20fa7SSergio Gonzalez Monroy RTE_CACHE_LINE_SIZE); 1440af75078fSIntel if (fwd_lcores[lc_id] == NULL) { 1441ce8d5614SIntel rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) " 1442ce8d5614SIntel "failed\n"); 1443af75078fSIntel } 1444af75078fSIntel fwd_lcores[lc_id]->cpuid_idx = lc_id; 1445af75078fSIntel } 1446af75078fSIntel 14477d89b261SGaetan Rivet RTE_ETH_FOREACH_DEV(pid) { 1448ce8d5614SIntel port = &ports[pid]; 14498b9bd0efSMoti Haimovsky /* Apply default TxRx configuration for all ports */ 1450fd8c20aaSShahaf Shuler port->dev_conf.txmode = tx_mode; 1451384161e0SShahaf Shuler port->dev_conf.rxmode = rx_mode; 14526f51deb9SIvan Ilchenko 14536f51deb9SIvan Ilchenko ret = eth_dev_info_get_print_err(pid, &port->dev_info); 14546f51deb9SIvan Ilchenko if (ret != 0) 14556f51deb9SIvan Ilchenko rte_exit(EXIT_FAILURE, 14566f51deb9SIvan Ilchenko "rte_eth_dev_info_get() failed\n"); 14577c45f6c0SFerruh Yigit 14580c4abd36SSteve Yang ret = update_jumbo_frame_offload(pid); 14590c4abd36SSteve Yang if (ret != 0) 14600c4abd36SSteve Yang printf("Updating jumbo frame offload failed for port %u\n", 14610c4abd36SSteve Yang pid); 1462761c4d66SSteve Yang 146307e5f7bdSShahaf Shuler if (!(port->dev_info.tx_offload_capa & 146407e5f7bdSShahaf Shuler DEV_TX_OFFLOAD_MBUF_FAST_FREE)) 146507e5f7bdSShahaf Shuler port->dev_conf.txmode.offloads &= 146607e5f7bdSShahaf Shuler ~DEV_TX_OFFLOAD_MBUF_FAST_FREE; 1467b6ea6408SIntel if (numa_support) { 1468b6ea6408SIntel if (port_numa[pid] != NUMA_NO_CONFIG) 1469b6ea6408SIntel port_per_socket[port_numa[pid]]++; 1470b6ea6408SIntel else { 1471b6ea6408SIntel uint32_t socket_id = rte_eth_dev_socket_id(pid); 147220a0286fSLiu Xiaofeng 147329841336SPhil Yang /* 147429841336SPhil Yang * if socket_id is invalid, 147529841336SPhil Yang * set to the first available socket. 147629841336SPhil Yang */ 147720a0286fSLiu Xiaofeng if (check_socket_id(socket_id) < 0) 147829841336SPhil Yang socket_id = socket_ids[0]; 1479b6ea6408SIntel port_per_socket[socket_id]++; 1480b6ea6408SIntel } 1481b6ea6408SIntel } 1482b6ea6408SIntel 1483c73a9071SWei Dai /* Apply Rx offloads configuration */ 1484c73a9071SWei Dai for (k = 0; k < port->dev_info.max_rx_queues; k++) 1485c73a9071SWei Dai port->rx_conf[k].offloads = 1486c73a9071SWei Dai port->dev_conf.rxmode.offloads; 1487c73a9071SWei Dai /* Apply Tx offloads configuration */ 1488c73a9071SWei Dai for (k = 0; k < port->dev_info.max_tx_queues; k++) 1489c73a9071SWei Dai port->tx_conf[k].offloads = 1490c73a9071SWei Dai port->dev_conf.txmode.offloads; 1491c73a9071SWei Dai 1492*b7b78a08SAjit Khaparde if (eth_link_speed) 1493*b7b78a08SAjit Khaparde port->dev_conf.link_speeds = eth_link_speed; 1494*b7b78a08SAjit Khaparde 1495ce8d5614SIntel /* set flag to initialize port/queue */ 1496ce8d5614SIntel port->need_reconfig = 1; 1497ce8d5614SIntel port->need_reconfig_queues = 1; 1498c18feafaSDekel Peled port->tx_metadata = 0; 149933f9630fSSunil Kumar Kori 150033f9630fSSunil Kumar Kori /* Check for maximum number of segments per MTU. Accordingly 150133f9630fSSunil Kumar Kori * update the mbuf data size. 150233f9630fSSunil Kumar Kori */ 1503163fbaafSFerruh Yigit if (port->dev_info.rx_desc_lim.nb_mtu_seg_max != UINT16_MAX && 1504163fbaafSFerruh Yigit port->dev_info.rx_desc_lim.nb_mtu_seg_max != 0) { 150533f9630fSSunil Kumar Kori data_size = rx_mode.max_rx_pkt_len / 150633f9630fSSunil Kumar Kori port->dev_info.rx_desc_lim.nb_mtu_seg_max; 150733f9630fSSunil Kumar Kori 150833f9630fSSunil Kumar Kori if ((data_size + RTE_PKTMBUF_HEADROOM) > 150926cbb419SViacheslav Ovsiienko mbuf_data_size[0]) { 151026cbb419SViacheslav Ovsiienko mbuf_data_size[0] = data_size + 151133f9630fSSunil Kumar Kori RTE_PKTMBUF_HEADROOM; 151233f9630fSSunil Kumar Kori warning = 1; 1513ce8d5614SIntel } 151433f9630fSSunil Kumar Kori } 151533f9630fSSunil Kumar Kori } 151633f9630fSSunil Kumar Kori 151733f9630fSSunil Kumar Kori if (warning) 151826cbb419SViacheslav Ovsiienko TESTPMD_LOG(WARNING, 151926cbb419SViacheslav Ovsiienko "Configured mbuf size of the first segment %hu\n", 152026cbb419SViacheslav Ovsiienko mbuf_data_size[0]); 15213ab64341SOlivier Matz /* 15223ab64341SOlivier Matz * Create pools of mbuf. 15233ab64341SOlivier Matz * If NUMA support is disabled, create a single pool of mbuf in 15243ab64341SOlivier Matz * socket 0 memory by default. 15253ab64341SOlivier Matz * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1. 15263ab64341SOlivier Matz * 15273ab64341SOlivier Matz * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and 15283ab64341SOlivier Matz * nb_txd can be configured at run time. 15293ab64341SOlivier Matz */ 15303ab64341SOlivier Matz if (param_total_num_mbufs) 15313ab64341SOlivier Matz nb_mbuf_per_pool = param_total_num_mbufs; 15323ab64341SOlivier Matz else { 15333ab64341SOlivier Matz nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX + 15343ab64341SOlivier Matz (nb_lcores * mb_mempool_cache) + 15353ab64341SOlivier Matz RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST; 15363ab64341SOlivier Matz nb_mbuf_per_pool *= RTE_MAX_ETHPORTS; 15373ab64341SOlivier Matz } 15383ab64341SOlivier Matz 1539b6ea6408SIntel if (numa_support) { 154026cbb419SViacheslav Ovsiienko uint8_t i, j; 1541ce8d5614SIntel 1542c9cafcc8SShahaf Shuler for (i = 0; i < num_sockets; i++) 154326cbb419SViacheslav Ovsiienko for (j = 0; j < mbuf_data_size_n; j++) 154426cbb419SViacheslav Ovsiienko mempools[i * MAX_SEGS_BUFFER_SPLIT + j] = 154526cbb419SViacheslav Ovsiienko mbuf_pool_create(mbuf_data_size[j], 1546401b744dSShahaf Shuler nb_mbuf_per_pool, 154726cbb419SViacheslav Ovsiienko socket_ids[i], j); 15483ab64341SOlivier Matz } else { 154926cbb419SViacheslav Ovsiienko uint8_t i; 155026cbb419SViacheslav Ovsiienko 155126cbb419SViacheslav Ovsiienko for (i = 0; i < mbuf_data_size_n; i++) 155226cbb419SViacheslav Ovsiienko mempools[i] = mbuf_pool_create 155326cbb419SViacheslav Ovsiienko (mbuf_data_size[i], 1554401b744dSShahaf Shuler nb_mbuf_per_pool, 155526cbb419SViacheslav Ovsiienko socket_num == UMA_NO_CONFIG ? 155626cbb419SViacheslav Ovsiienko 0 : socket_num, i); 15573ab64341SOlivier Matz } 1558b6ea6408SIntel 1559b6ea6408SIntel init_port_config(); 15605886ae07SAdrien Mazarguil 156152f38a20SJiayu Hu gso_types = DEV_TX_OFFLOAD_TCP_TSO | DEV_TX_OFFLOAD_VXLAN_TNL_TSO | 1562aaacd052SJiayu Hu DEV_TX_OFFLOAD_GRE_TNL_TSO | DEV_TX_OFFLOAD_UDP_TSO; 15635886ae07SAdrien Mazarguil /* 15645886ae07SAdrien Mazarguil * Records which Mbuf pool to use by each logical core, if needed. 15655886ae07SAdrien Mazarguil */ 15665886ae07SAdrien Mazarguil for (lc_id = 0; lc_id < nb_lcores; lc_id++) { 15678fd8bebcSAdrien Mazarguil mbp = mbuf_pool_find( 156826cbb419SViacheslav Ovsiienko rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]), 0); 15698fd8bebcSAdrien Mazarguil 15705886ae07SAdrien Mazarguil if (mbp == NULL) 157126cbb419SViacheslav Ovsiienko mbp = mbuf_pool_find(0, 0); 15725886ae07SAdrien Mazarguil fwd_lcores[lc_id]->mbp = mbp; 157352f38a20SJiayu Hu /* initialize GSO context */ 157452f38a20SJiayu Hu fwd_lcores[lc_id]->gso_ctx.direct_pool = mbp; 157552f38a20SJiayu Hu fwd_lcores[lc_id]->gso_ctx.indirect_pool = mbp; 157652f38a20SJiayu Hu fwd_lcores[lc_id]->gso_ctx.gso_types = gso_types; 157735b2d13fSOlivier Matz fwd_lcores[lc_id]->gso_ctx.gso_size = RTE_ETHER_MAX_LEN - 157835b2d13fSOlivier Matz RTE_ETHER_CRC_LEN; 157952f38a20SJiayu Hu fwd_lcores[lc_id]->gso_ctx.flag = 0; 15805886ae07SAdrien Mazarguil } 15815886ae07SAdrien Mazarguil 1582ce8d5614SIntel /* Configuration of packet forwarding streams. */ 1583ce8d5614SIntel if (init_fwd_streams() < 0) 1584ce8d5614SIntel rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n"); 15850c0db76fSBernard Iremonger 15860c0db76fSBernard Iremonger fwd_config_setup(); 1587b7091f1dSJiayu Hu 1588b7091f1dSJiayu Hu /* create a gro context for each lcore */ 1589b7091f1dSJiayu Hu gro_param.gro_types = RTE_GRO_TCP_IPV4; 1590b7091f1dSJiayu Hu gro_param.max_flow_num = GRO_MAX_FLUSH_CYCLES; 1591b7091f1dSJiayu Hu gro_param.max_item_per_flow = MAX_PKT_BURST; 1592b7091f1dSJiayu Hu for (lc_id = 0; lc_id < nb_lcores; lc_id++) { 1593b7091f1dSJiayu Hu gro_param.socket_id = rte_lcore_to_socket_id( 1594b7091f1dSJiayu Hu fwd_lcores_cpuids[lc_id]); 1595b7091f1dSJiayu Hu fwd_lcores[lc_id]->gro_ctx = rte_gro_ctx_create(&gro_param); 1596b7091f1dSJiayu Hu if (fwd_lcores[lc_id]->gro_ctx == NULL) { 1597b7091f1dSJiayu Hu rte_exit(EXIT_FAILURE, 1598b7091f1dSJiayu Hu "rte_gro_ctx_create() failed\n"); 1599b7091f1dSJiayu Hu } 1600b7091f1dSJiayu Hu } 1601ce8d5614SIntel } 1602ce8d5614SIntel 16032950a769SDeclan Doherty 16042950a769SDeclan Doherty void 1605a21d5a4bSDeclan Doherty reconfig(portid_t new_port_id, unsigned socket_id) 16062950a769SDeclan Doherty { 16072950a769SDeclan Doherty struct rte_port *port; 16086f51deb9SIvan Ilchenko int ret; 16092950a769SDeclan Doherty 16102950a769SDeclan Doherty /* Reconfiguration of Ethernet ports. */ 16112950a769SDeclan Doherty port = &ports[new_port_id]; 16126f51deb9SIvan Ilchenko 16136f51deb9SIvan Ilchenko ret = eth_dev_info_get_print_err(new_port_id, &port->dev_info); 16146f51deb9SIvan Ilchenko if (ret != 0) 16156f51deb9SIvan Ilchenko return; 16162950a769SDeclan Doherty 16172950a769SDeclan Doherty /* set flag to initialize port/queue */ 16182950a769SDeclan Doherty port->need_reconfig = 1; 16192950a769SDeclan Doherty port->need_reconfig_queues = 1; 1620a21d5a4bSDeclan Doherty port->socket_id = socket_id; 16212950a769SDeclan Doherty 16222950a769SDeclan Doherty init_port_config(); 16232950a769SDeclan Doherty } 16242950a769SDeclan Doherty 16252950a769SDeclan Doherty 1626ce8d5614SIntel int 1627ce8d5614SIntel init_fwd_streams(void) 1628ce8d5614SIntel { 1629ce8d5614SIntel portid_t pid; 1630ce8d5614SIntel struct rte_port *port; 1631ce8d5614SIntel streamid_t sm_id, nb_fwd_streams_new; 16325a8fb55cSReshma Pattan queueid_t q; 1633ce8d5614SIntel 1634ce8d5614SIntel /* set socket id according to numa or not */ 16357d89b261SGaetan Rivet RTE_ETH_FOREACH_DEV(pid) { 1636ce8d5614SIntel port = &ports[pid]; 1637ce8d5614SIntel if (nb_rxq > port->dev_info.max_rx_queues) { 1638ce8d5614SIntel printf("Fail: nb_rxq(%d) is greater than " 1639ce8d5614SIntel "max_rx_queues(%d)\n", nb_rxq, 1640ce8d5614SIntel port->dev_info.max_rx_queues); 1641ce8d5614SIntel return -1; 1642ce8d5614SIntel } 1643ce8d5614SIntel if (nb_txq > port->dev_info.max_tx_queues) { 1644ce8d5614SIntel printf("Fail: nb_txq(%d) is greater than " 1645ce8d5614SIntel "max_tx_queues(%d)\n", nb_txq, 1646ce8d5614SIntel port->dev_info.max_tx_queues); 1647ce8d5614SIntel return -1; 1648ce8d5614SIntel } 164920a0286fSLiu Xiaofeng if (numa_support) { 165020a0286fSLiu Xiaofeng if (port_numa[pid] != NUMA_NO_CONFIG) 165120a0286fSLiu Xiaofeng port->socket_id = port_numa[pid]; 165220a0286fSLiu Xiaofeng else { 1653b6ea6408SIntel port->socket_id = rte_eth_dev_socket_id(pid); 165420a0286fSLiu Xiaofeng 165529841336SPhil Yang /* 165629841336SPhil Yang * if socket_id is invalid, 165729841336SPhil Yang * set to the first available socket. 165829841336SPhil Yang */ 165920a0286fSLiu Xiaofeng if (check_socket_id(port->socket_id) < 0) 166029841336SPhil Yang port->socket_id = socket_ids[0]; 166120a0286fSLiu Xiaofeng } 166220a0286fSLiu Xiaofeng } 1663b6ea6408SIntel else { 1664b6ea6408SIntel if (socket_num == UMA_NO_CONFIG) 1665af75078fSIntel port->socket_id = 0; 1666b6ea6408SIntel else 1667b6ea6408SIntel port->socket_id = socket_num; 1668b6ea6408SIntel } 1669af75078fSIntel } 1670af75078fSIntel 16715a8fb55cSReshma Pattan q = RTE_MAX(nb_rxq, nb_txq); 16725a8fb55cSReshma Pattan if (q == 0) { 16735a8fb55cSReshma Pattan printf("Fail: Cannot allocate fwd streams as number of queues is 0\n"); 16745a8fb55cSReshma Pattan return -1; 16755a8fb55cSReshma Pattan } 16765a8fb55cSReshma Pattan nb_fwd_streams_new = (streamid_t)(nb_ports * q); 1677ce8d5614SIntel if (nb_fwd_streams_new == nb_fwd_streams) 1678ce8d5614SIntel return 0; 1679ce8d5614SIntel /* clear the old */ 1680ce8d5614SIntel if (fwd_streams != NULL) { 1681ce8d5614SIntel for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) { 1682ce8d5614SIntel if (fwd_streams[sm_id] == NULL) 1683ce8d5614SIntel continue; 1684ce8d5614SIntel rte_free(fwd_streams[sm_id]); 1685ce8d5614SIntel fwd_streams[sm_id] = NULL; 1686af75078fSIntel } 1687ce8d5614SIntel rte_free(fwd_streams); 1688ce8d5614SIntel fwd_streams = NULL; 1689ce8d5614SIntel } 1690ce8d5614SIntel 1691ce8d5614SIntel /* init new */ 1692ce8d5614SIntel nb_fwd_streams = nb_fwd_streams_new; 16931f84c469SMatan Azrad if (nb_fwd_streams) { 1694ce8d5614SIntel fwd_streams = rte_zmalloc("testpmd: fwd_streams", 16951f84c469SMatan Azrad sizeof(struct fwd_stream *) * nb_fwd_streams, 16961f84c469SMatan Azrad RTE_CACHE_LINE_SIZE); 1697ce8d5614SIntel if (fwd_streams == NULL) 16981f84c469SMatan Azrad rte_exit(EXIT_FAILURE, "rte_zmalloc(%d" 16991f84c469SMatan Azrad " (struct fwd_stream *)) failed\n", 17001f84c469SMatan Azrad nb_fwd_streams); 1701ce8d5614SIntel 1702af75078fSIntel for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) { 17031f84c469SMatan Azrad fwd_streams[sm_id] = rte_zmalloc("testpmd:" 17041f84c469SMatan Azrad " struct fwd_stream", sizeof(struct fwd_stream), 17051f84c469SMatan Azrad RTE_CACHE_LINE_SIZE); 1706ce8d5614SIntel if (fwd_streams[sm_id] == NULL) 17071f84c469SMatan Azrad rte_exit(EXIT_FAILURE, "rte_zmalloc" 17081f84c469SMatan Azrad "(struct fwd_stream) failed\n"); 17091f84c469SMatan Azrad } 1710af75078fSIntel } 1711ce8d5614SIntel 1712ce8d5614SIntel return 0; 1713af75078fSIntel } 1714af75078fSIntel 1715af75078fSIntel static void 1716af75078fSIntel pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs) 1717af75078fSIntel { 17187569b8c1SHonnappa Nagarahalli uint64_t total_burst, sburst; 171985de481aSHonnappa Nagarahalli uint64_t nb_burst; 17207569b8c1SHonnappa Nagarahalli uint64_t burst_stats[4]; 17217569b8c1SHonnappa Nagarahalli uint16_t pktnb_stats[4]; 1722af75078fSIntel uint16_t nb_pkt; 17237569b8c1SHonnappa Nagarahalli int burst_percent[4], sburstp; 17247569b8c1SHonnappa Nagarahalli int i; 1725af75078fSIntel 1726af75078fSIntel /* 1727af75078fSIntel * First compute the total number of packet bursts and the 1728af75078fSIntel * two highest numbers of bursts of the same number of packets. 1729af75078fSIntel */ 17307569b8c1SHonnappa Nagarahalli memset(&burst_stats, 0x0, sizeof(burst_stats)); 17317569b8c1SHonnappa Nagarahalli memset(&pktnb_stats, 0x0, sizeof(pktnb_stats)); 17327569b8c1SHonnappa Nagarahalli 17337569b8c1SHonnappa Nagarahalli /* Show stats for 0 burst size always */ 17347569b8c1SHonnappa Nagarahalli total_burst = pbs->pkt_burst_spread[0]; 17357569b8c1SHonnappa Nagarahalli burst_stats[0] = pbs->pkt_burst_spread[0]; 17367569b8c1SHonnappa Nagarahalli pktnb_stats[0] = 0; 17377569b8c1SHonnappa Nagarahalli 17387569b8c1SHonnappa Nagarahalli /* Find the next 2 burst sizes with highest occurrences. */ 17397569b8c1SHonnappa Nagarahalli for (nb_pkt = 1; nb_pkt < MAX_PKT_BURST; nb_pkt++) { 1740af75078fSIntel nb_burst = pbs->pkt_burst_spread[nb_pkt]; 17417569b8c1SHonnappa Nagarahalli 1742af75078fSIntel if (nb_burst == 0) 1743af75078fSIntel continue; 17447569b8c1SHonnappa Nagarahalli 1745af75078fSIntel total_burst += nb_burst; 17467569b8c1SHonnappa Nagarahalli 17477569b8c1SHonnappa Nagarahalli if (nb_burst > burst_stats[1]) { 17487569b8c1SHonnappa Nagarahalli burst_stats[2] = burst_stats[1]; 17497569b8c1SHonnappa Nagarahalli pktnb_stats[2] = pktnb_stats[1]; 1750fe613657SDaniel Shelepov burst_stats[1] = nb_burst; 1751fe613657SDaniel Shelepov pktnb_stats[1] = nb_pkt; 17527569b8c1SHonnappa Nagarahalli } else if (nb_burst > burst_stats[2]) { 17537569b8c1SHonnappa Nagarahalli burst_stats[2] = nb_burst; 17547569b8c1SHonnappa Nagarahalli pktnb_stats[2] = nb_pkt; 1755af75078fSIntel } 1756af75078fSIntel } 1757af75078fSIntel if (total_burst == 0) 1758af75078fSIntel return; 17597569b8c1SHonnappa Nagarahalli 17607569b8c1SHonnappa Nagarahalli printf(" %s-bursts : %"PRIu64" [", rx_tx, total_burst); 17617569b8c1SHonnappa Nagarahalli for (i = 0, sburst = 0, sburstp = 0; i < 4; i++) { 17627569b8c1SHonnappa Nagarahalli if (i == 3) { 17637569b8c1SHonnappa Nagarahalli printf("%d%% of other]\n", 100 - sburstp); 1764af75078fSIntel return; 1765af75078fSIntel } 17667569b8c1SHonnappa Nagarahalli 17677569b8c1SHonnappa Nagarahalli sburst += burst_stats[i]; 17687569b8c1SHonnappa Nagarahalli if (sburst == total_burst) { 17697569b8c1SHonnappa Nagarahalli printf("%d%% of %d pkts]\n", 17707569b8c1SHonnappa Nagarahalli 100 - sburstp, (int) pktnb_stats[i]); 1771af75078fSIntel return; 1772af75078fSIntel } 17737569b8c1SHonnappa Nagarahalli 17747569b8c1SHonnappa Nagarahalli burst_percent[i] = 17757569b8c1SHonnappa Nagarahalli (double)burst_stats[i] / total_burst * 100; 17767569b8c1SHonnappa Nagarahalli printf("%d%% of %d pkts + ", 17777569b8c1SHonnappa Nagarahalli burst_percent[i], (int) pktnb_stats[i]); 17787569b8c1SHonnappa Nagarahalli sburstp += burst_percent[i]; 1779af75078fSIntel } 1780af75078fSIntel } 1781af75078fSIntel 1782af75078fSIntel static void 1783af75078fSIntel fwd_stream_stats_display(streamid_t stream_id) 1784af75078fSIntel { 1785af75078fSIntel struct fwd_stream *fs; 1786af75078fSIntel static const char *fwd_top_stats_border = "-------"; 1787af75078fSIntel 1788af75078fSIntel fs = fwd_streams[stream_id]; 1789af75078fSIntel if ((fs->rx_packets == 0) && (fs->tx_packets == 0) && 1790af75078fSIntel (fs->fwd_dropped == 0)) 1791af75078fSIntel return; 1792af75078fSIntel printf("\n %s Forward Stats for RX Port=%2d/Queue=%2d -> " 1793af75078fSIntel "TX Port=%2d/Queue=%2d %s\n", 1794af75078fSIntel fwd_top_stats_border, fs->rx_port, fs->rx_queue, 1795af75078fSIntel fs->tx_port, fs->tx_queue, fwd_top_stats_border); 1796c185d42cSDavid Marchand printf(" RX-packets: %-14"PRIu64" TX-packets: %-14"PRIu64 1797c185d42cSDavid Marchand " TX-dropped: %-14"PRIu64, 1798af75078fSIntel fs->rx_packets, fs->tx_packets, fs->fwd_dropped); 1799af75078fSIntel 1800af75078fSIntel /* if checksum mode */ 1801af75078fSIntel if (cur_fwd_eng == &csum_fwd_engine) { 1802c185d42cSDavid Marchand printf(" RX- bad IP checksum: %-14"PRIu64 1803c185d42cSDavid Marchand " Rx- bad L4 checksum: %-14"PRIu64 1804c185d42cSDavid Marchand " Rx- bad outer L4 checksum: %-14"PRIu64"\n", 180558d475b7SJerin Jacob fs->rx_bad_ip_csum, fs->rx_bad_l4_csum, 180658d475b7SJerin Jacob fs->rx_bad_outer_l4_csum); 1807d139cf23SLance Richardson printf(" RX- bad outer IP checksum: %-14"PRIu64"\n", 1808d139cf23SLance Richardson fs->rx_bad_outer_ip_csum); 180994d65546SDavid Marchand } else { 181094d65546SDavid Marchand printf("\n"); 1811af75078fSIntel } 1812af75078fSIntel 18130e4b1963SDharmik Thakkar if (record_burst_stats) { 1814af75078fSIntel pkt_burst_stats_display("RX", &fs->rx_burst_stats); 1815af75078fSIntel pkt_burst_stats_display("TX", &fs->tx_burst_stats); 18160e4b1963SDharmik Thakkar } 1817af75078fSIntel } 1818af75078fSIntel 181953324971SDavid Marchand void 182053324971SDavid Marchand fwd_stats_display(void) 182153324971SDavid Marchand { 182253324971SDavid Marchand static const char *fwd_stats_border = "----------------------"; 182353324971SDavid Marchand static const char *acc_stats_border = "+++++++++++++++"; 182453324971SDavid Marchand struct { 182553324971SDavid Marchand struct fwd_stream *rx_stream; 182653324971SDavid Marchand struct fwd_stream *tx_stream; 182753324971SDavid Marchand uint64_t tx_dropped; 182853324971SDavid Marchand uint64_t rx_bad_ip_csum; 182953324971SDavid Marchand uint64_t rx_bad_l4_csum; 183053324971SDavid Marchand uint64_t rx_bad_outer_l4_csum; 1831d139cf23SLance Richardson uint64_t rx_bad_outer_ip_csum; 183253324971SDavid Marchand } ports_stats[RTE_MAX_ETHPORTS]; 183353324971SDavid Marchand uint64_t total_rx_dropped = 0; 183453324971SDavid Marchand uint64_t total_tx_dropped = 0; 183553324971SDavid Marchand uint64_t total_rx_nombuf = 0; 183653324971SDavid Marchand struct rte_eth_stats stats; 183753324971SDavid Marchand uint64_t fwd_cycles = 0; 183853324971SDavid Marchand uint64_t total_recv = 0; 183953324971SDavid Marchand uint64_t total_xmit = 0; 184053324971SDavid Marchand struct rte_port *port; 184153324971SDavid Marchand streamid_t sm_id; 184253324971SDavid Marchand portid_t pt_id; 184353324971SDavid Marchand int i; 184453324971SDavid Marchand 184553324971SDavid Marchand memset(ports_stats, 0, sizeof(ports_stats)); 184653324971SDavid Marchand 184753324971SDavid Marchand for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) { 184853324971SDavid Marchand struct fwd_stream *fs = fwd_streams[sm_id]; 184953324971SDavid Marchand 185053324971SDavid Marchand if (cur_fwd_config.nb_fwd_streams > 185153324971SDavid Marchand cur_fwd_config.nb_fwd_ports) { 185253324971SDavid Marchand fwd_stream_stats_display(sm_id); 185353324971SDavid Marchand } else { 185453324971SDavid Marchand ports_stats[fs->tx_port].tx_stream = fs; 185553324971SDavid Marchand ports_stats[fs->rx_port].rx_stream = fs; 185653324971SDavid Marchand } 185753324971SDavid Marchand 185853324971SDavid Marchand ports_stats[fs->tx_port].tx_dropped += fs->fwd_dropped; 185953324971SDavid Marchand 186053324971SDavid Marchand ports_stats[fs->rx_port].rx_bad_ip_csum += fs->rx_bad_ip_csum; 186153324971SDavid Marchand ports_stats[fs->rx_port].rx_bad_l4_csum += fs->rx_bad_l4_csum; 186253324971SDavid Marchand ports_stats[fs->rx_port].rx_bad_outer_l4_csum += 186353324971SDavid Marchand fs->rx_bad_outer_l4_csum; 1864d139cf23SLance Richardson ports_stats[fs->rx_port].rx_bad_outer_ip_csum += 1865d139cf23SLance Richardson fs->rx_bad_outer_ip_csum; 186653324971SDavid Marchand 1867bc700b67SDharmik Thakkar if (record_core_cycles) 186853324971SDavid Marchand fwd_cycles += fs->core_cycles; 186953324971SDavid Marchand } 187053324971SDavid Marchand for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) { 187153324971SDavid Marchand pt_id = fwd_ports_ids[i]; 187253324971SDavid Marchand port = &ports[pt_id]; 187353324971SDavid Marchand 187453324971SDavid Marchand rte_eth_stats_get(pt_id, &stats); 187553324971SDavid Marchand stats.ipackets -= port->stats.ipackets; 187653324971SDavid Marchand stats.opackets -= port->stats.opackets; 187753324971SDavid Marchand stats.ibytes -= port->stats.ibytes; 187853324971SDavid Marchand stats.obytes -= port->stats.obytes; 187953324971SDavid Marchand stats.imissed -= port->stats.imissed; 188053324971SDavid Marchand stats.oerrors -= port->stats.oerrors; 188153324971SDavid Marchand stats.rx_nombuf -= port->stats.rx_nombuf; 188253324971SDavid Marchand 188353324971SDavid Marchand total_recv += stats.ipackets; 188453324971SDavid Marchand total_xmit += stats.opackets; 188553324971SDavid Marchand total_rx_dropped += stats.imissed; 188653324971SDavid Marchand total_tx_dropped += ports_stats[pt_id].tx_dropped; 188753324971SDavid Marchand total_tx_dropped += stats.oerrors; 188853324971SDavid Marchand total_rx_nombuf += stats.rx_nombuf; 188953324971SDavid Marchand 189053324971SDavid Marchand printf("\n %s Forward statistics for port %-2d %s\n", 189153324971SDavid Marchand fwd_stats_border, pt_id, fwd_stats_border); 189253324971SDavid Marchand 189308dcd187SHuisong Li printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64 189408dcd187SHuisong Li "RX-total: %-"PRIu64"\n", stats.ipackets, stats.imissed, 189553324971SDavid Marchand stats.ipackets + stats.imissed); 189653324971SDavid Marchand 1897d139cf23SLance Richardson if (cur_fwd_eng == &csum_fwd_engine) { 189853324971SDavid Marchand printf(" Bad-ipcsum: %-14"PRIu64 189953324971SDavid Marchand " Bad-l4csum: %-14"PRIu64 190053324971SDavid Marchand "Bad-outer-l4csum: %-14"PRIu64"\n", 190153324971SDavid Marchand ports_stats[pt_id].rx_bad_ip_csum, 190253324971SDavid Marchand ports_stats[pt_id].rx_bad_l4_csum, 190353324971SDavid Marchand ports_stats[pt_id].rx_bad_outer_l4_csum); 1904d139cf23SLance Richardson printf(" Bad-outer-ipcsum: %-14"PRIu64"\n", 1905d139cf23SLance Richardson ports_stats[pt_id].rx_bad_outer_ip_csum); 1906d139cf23SLance Richardson } 190753324971SDavid Marchand if (stats.ierrors + stats.rx_nombuf > 0) { 190808dcd187SHuisong Li printf(" RX-error: %-"PRIu64"\n", stats.ierrors); 190908dcd187SHuisong Li printf(" RX-nombufs: %-14"PRIu64"\n", stats.rx_nombuf); 191053324971SDavid Marchand } 191153324971SDavid Marchand 191208dcd187SHuisong Li printf(" TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64 191353324971SDavid Marchand "TX-total: %-"PRIu64"\n", 191453324971SDavid Marchand stats.opackets, ports_stats[pt_id].tx_dropped, 191553324971SDavid Marchand stats.opackets + ports_stats[pt_id].tx_dropped); 191653324971SDavid Marchand 19170e4b1963SDharmik Thakkar if (record_burst_stats) { 191853324971SDavid Marchand if (ports_stats[pt_id].rx_stream) 191953324971SDavid Marchand pkt_burst_stats_display("RX", 192053324971SDavid Marchand &ports_stats[pt_id].rx_stream->rx_burst_stats); 192153324971SDavid Marchand if (ports_stats[pt_id].tx_stream) 192253324971SDavid Marchand pkt_burst_stats_display("TX", 192353324971SDavid Marchand &ports_stats[pt_id].tx_stream->tx_burst_stats); 19240e4b1963SDharmik Thakkar } 192553324971SDavid Marchand 192653324971SDavid Marchand printf(" %s--------------------------------%s\n", 192753324971SDavid Marchand fwd_stats_border, fwd_stats_border); 192853324971SDavid Marchand } 192953324971SDavid Marchand 193053324971SDavid Marchand printf("\n %s Accumulated forward statistics for all ports" 193153324971SDavid Marchand "%s\n", 193253324971SDavid Marchand acc_stats_border, acc_stats_border); 193353324971SDavid Marchand printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: " 193453324971SDavid Marchand "%-"PRIu64"\n" 193553324971SDavid Marchand " TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: " 193653324971SDavid Marchand "%-"PRIu64"\n", 193753324971SDavid Marchand total_recv, total_rx_dropped, total_recv + total_rx_dropped, 193853324971SDavid Marchand total_xmit, total_tx_dropped, total_xmit + total_tx_dropped); 193953324971SDavid Marchand if (total_rx_nombuf > 0) 194053324971SDavid Marchand printf(" RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf); 194153324971SDavid Marchand printf(" %s++++++++++++++++++++++++++++++++++++++++++++++" 194253324971SDavid Marchand "%s\n", 194353324971SDavid Marchand acc_stats_border, acc_stats_border); 1944bc700b67SDharmik Thakkar if (record_core_cycles) { 19454c0497b1SDharmik Thakkar #define CYC_PER_MHZ 1E6 19463a164e00SPhil Yang if (total_recv > 0 || total_xmit > 0) { 19473a164e00SPhil Yang uint64_t total_pkts = 0; 19483a164e00SPhil Yang if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 || 19493a164e00SPhil Yang strcmp(cur_fwd_eng->fwd_mode_name, "flowgen") == 0) 19503a164e00SPhil Yang total_pkts = total_xmit; 19513a164e00SPhil Yang else 19523a164e00SPhil Yang total_pkts = total_recv; 19533a164e00SPhil Yang 19541920832aSDharmik Thakkar printf("\n CPU cycles/packet=%.2F (total cycles=" 19553a164e00SPhil Yang "%"PRIu64" / total %s packets=%"PRIu64") at %"PRIu64 19564c0497b1SDharmik Thakkar " MHz Clock\n", 19573a164e00SPhil Yang (double) fwd_cycles / total_pkts, 19583a164e00SPhil Yang fwd_cycles, cur_fwd_eng->fwd_mode_name, total_pkts, 19594c0497b1SDharmik Thakkar (uint64_t)(rte_get_tsc_hz() / CYC_PER_MHZ)); 19603a164e00SPhil Yang } 1961bc700b67SDharmik Thakkar } 196253324971SDavid Marchand } 196353324971SDavid Marchand 196453324971SDavid Marchand void 196553324971SDavid Marchand fwd_stats_reset(void) 196653324971SDavid Marchand { 196753324971SDavid Marchand streamid_t sm_id; 196853324971SDavid Marchand portid_t pt_id; 196953324971SDavid Marchand int i; 197053324971SDavid Marchand 197153324971SDavid Marchand for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) { 197253324971SDavid Marchand pt_id = fwd_ports_ids[i]; 197353324971SDavid Marchand rte_eth_stats_get(pt_id, &ports[pt_id].stats); 197453324971SDavid Marchand } 197553324971SDavid Marchand for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) { 197653324971SDavid Marchand struct fwd_stream *fs = fwd_streams[sm_id]; 197753324971SDavid Marchand 197853324971SDavid Marchand fs->rx_packets = 0; 197953324971SDavid Marchand fs->tx_packets = 0; 198053324971SDavid Marchand fs->fwd_dropped = 0; 198153324971SDavid Marchand fs->rx_bad_ip_csum = 0; 198253324971SDavid Marchand fs->rx_bad_l4_csum = 0; 198353324971SDavid Marchand fs->rx_bad_outer_l4_csum = 0; 1984d139cf23SLance Richardson fs->rx_bad_outer_ip_csum = 0; 198553324971SDavid Marchand 198653324971SDavid Marchand memset(&fs->rx_burst_stats, 0, sizeof(fs->rx_burst_stats)); 198753324971SDavid Marchand memset(&fs->tx_burst_stats, 0, sizeof(fs->tx_burst_stats)); 198853324971SDavid Marchand fs->core_cycles = 0; 198953324971SDavid Marchand } 199053324971SDavid Marchand } 199153324971SDavid Marchand 1992af75078fSIntel static void 19937741e4cfSIntel flush_fwd_rx_queues(void) 1994af75078fSIntel { 1995af75078fSIntel struct rte_mbuf *pkts_burst[MAX_PKT_BURST]; 1996af75078fSIntel portid_t rxp; 19977741e4cfSIntel portid_t port_id; 1998af75078fSIntel queueid_t rxq; 1999af75078fSIntel uint16_t nb_rx; 2000af75078fSIntel uint16_t i; 2001af75078fSIntel uint8_t j; 2002f487715fSReshma Pattan uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0; 2003594302c7SJames Poole uint64_t timer_period; 2004f487715fSReshma Pattan 2005f487715fSReshma Pattan /* convert to number of cycles */ 2006594302c7SJames Poole timer_period = rte_get_timer_hz(); /* 1 second timeout */ 2007af75078fSIntel 2008af75078fSIntel for (j = 0; j < 2; j++) { 20097741e4cfSIntel for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) { 2010af75078fSIntel for (rxq = 0; rxq < nb_rxq; rxq++) { 20117741e4cfSIntel port_id = fwd_ports_ids[rxp]; 2012f487715fSReshma Pattan /** 2013f487715fSReshma Pattan * testpmd can stuck in the below do while loop 2014f487715fSReshma Pattan * if rte_eth_rx_burst() always returns nonzero 2015f487715fSReshma Pattan * packets. So timer is added to exit this loop 2016f487715fSReshma Pattan * after 1sec timer expiry. 2017f487715fSReshma Pattan */ 2018f487715fSReshma Pattan prev_tsc = rte_rdtsc(); 2019af75078fSIntel do { 20207741e4cfSIntel nb_rx = rte_eth_rx_burst(port_id, rxq, 2021013af9b6SIntel pkts_burst, MAX_PKT_BURST); 2022af75078fSIntel for (i = 0; i < nb_rx; i++) 2023af75078fSIntel rte_pktmbuf_free(pkts_burst[i]); 2024f487715fSReshma Pattan 2025f487715fSReshma Pattan cur_tsc = rte_rdtsc(); 2026f487715fSReshma Pattan diff_tsc = cur_tsc - prev_tsc; 2027f487715fSReshma Pattan timer_tsc += diff_tsc; 2028f487715fSReshma Pattan } while ((nb_rx > 0) && 2029f487715fSReshma Pattan (timer_tsc < timer_period)); 2030f487715fSReshma Pattan timer_tsc = 0; 2031af75078fSIntel } 2032af75078fSIntel } 2033af75078fSIntel rte_delay_ms(10); /* wait 10 milli-seconds before retrying */ 2034af75078fSIntel } 2035af75078fSIntel } 2036af75078fSIntel 2037af75078fSIntel static void 2038af75078fSIntel run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd) 2039af75078fSIntel { 2040af75078fSIntel struct fwd_stream **fsm; 2041af75078fSIntel streamid_t nb_fs; 2042af75078fSIntel streamid_t sm_id; 2043a8d0d473SBruce Richardson #ifdef RTE_LIB_BITRATESTATS 20447e4441c8SRemy Horton uint64_t tics_per_1sec; 20457e4441c8SRemy Horton uint64_t tics_datum; 20467e4441c8SRemy Horton uint64_t tics_current; 20474918a357SXiaoyun Li uint16_t i, cnt_ports; 2048af75078fSIntel 20494918a357SXiaoyun Li cnt_ports = nb_ports; 20507e4441c8SRemy Horton tics_datum = rte_rdtsc(); 20517e4441c8SRemy Horton tics_per_1sec = rte_get_timer_hz(); 20527e4441c8SRemy Horton #endif 2053af75078fSIntel fsm = &fwd_streams[fc->stream_idx]; 2054af75078fSIntel nb_fs = fc->stream_nb; 2055af75078fSIntel do { 2056af75078fSIntel for (sm_id = 0; sm_id < nb_fs; sm_id++) 2057af75078fSIntel (*pkt_fwd)(fsm[sm_id]); 2058a8d0d473SBruce Richardson #ifdef RTE_LIB_BITRATESTATS 2059e25e6c70SRemy Horton if (bitrate_enabled != 0 && 2060e25e6c70SRemy Horton bitrate_lcore_id == rte_lcore_id()) { 20617e4441c8SRemy Horton tics_current = rte_rdtsc(); 20627e4441c8SRemy Horton if (tics_current - tics_datum >= tics_per_1sec) { 20637e4441c8SRemy Horton /* Periodic bitrate calculation */ 20644918a357SXiaoyun Li for (i = 0; i < cnt_ports; i++) 2065e25e6c70SRemy Horton rte_stats_bitrate_calc(bitrate_data, 20664918a357SXiaoyun Li ports_ids[i]); 20677e4441c8SRemy Horton tics_datum = tics_current; 20687e4441c8SRemy Horton } 2069e25e6c70SRemy Horton } 20707e4441c8SRemy Horton #endif 2071a8d0d473SBruce Richardson #ifdef RTE_LIB_LATENCYSTATS 207265eb1e54SPablo de Lara if (latencystats_enabled != 0 && 207365eb1e54SPablo de Lara latencystats_lcore_id == rte_lcore_id()) 207462d3216dSReshma Pattan rte_latencystats_update(); 207562d3216dSReshma Pattan #endif 207662d3216dSReshma Pattan 2077af75078fSIntel } while (! fc->stopped); 2078af75078fSIntel } 2079af75078fSIntel 2080af75078fSIntel static int 2081af75078fSIntel start_pkt_forward_on_core(void *fwd_arg) 2082af75078fSIntel { 2083af75078fSIntel run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg, 2084af75078fSIntel cur_fwd_config.fwd_eng->packet_fwd); 2085af75078fSIntel return 0; 2086af75078fSIntel } 2087af75078fSIntel 2088af75078fSIntel /* 2089af75078fSIntel * Run the TXONLY packet forwarding engine to send a single burst of packets. 2090af75078fSIntel * Used to start communication flows in network loopback test configurations. 2091af75078fSIntel */ 2092af75078fSIntel static int 2093af75078fSIntel run_one_txonly_burst_on_core(void *fwd_arg) 2094af75078fSIntel { 2095af75078fSIntel struct fwd_lcore *fwd_lc; 2096af75078fSIntel struct fwd_lcore tmp_lcore; 2097af75078fSIntel 2098af75078fSIntel fwd_lc = (struct fwd_lcore *) fwd_arg; 2099af75078fSIntel tmp_lcore = *fwd_lc; 2100af75078fSIntel tmp_lcore.stopped = 1; 2101af75078fSIntel run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd); 2102af75078fSIntel return 0; 2103af75078fSIntel } 2104af75078fSIntel 2105af75078fSIntel /* 2106af75078fSIntel * Launch packet forwarding: 2107af75078fSIntel * - Setup per-port forwarding context. 2108af75078fSIntel * - launch logical cores with their forwarding configuration. 2109af75078fSIntel */ 2110af75078fSIntel static void 2111af75078fSIntel launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore) 2112af75078fSIntel { 2113af75078fSIntel port_fwd_begin_t port_fwd_begin; 2114af75078fSIntel unsigned int i; 2115af75078fSIntel unsigned int lc_id; 2116af75078fSIntel int diag; 2117af75078fSIntel 2118af75078fSIntel port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin; 2119af75078fSIntel if (port_fwd_begin != NULL) { 2120af75078fSIntel for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) 2121af75078fSIntel (*port_fwd_begin)(fwd_ports_ids[i]); 2122af75078fSIntel } 2123af75078fSIntel for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) { 2124af75078fSIntel lc_id = fwd_lcores_cpuids[i]; 2125af75078fSIntel if ((interactive == 0) || (lc_id != rte_lcore_id())) { 2126af75078fSIntel fwd_lcores[i]->stopped = 0; 2127af75078fSIntel diag = rte_eal_remote_launch(pkt_fwd_on_lcore, 2128af75078fSIntel fwd_lcores[i], lc_id); 2129af75078fSIntel if (diag != 0) 2130af75078fSIntel printf("launch lcore %u failed - diag=%d\n", 2131af75078fSIntel lc_id, diag); 2132af75078fSIntel } 2133af75078fSIntel } 2134af75078fSIntel } 2135af75078fSIntel 2136af75078fSIntel /* 2137af75078fSIntel * Launch packet forwarding configuration. 2138af75078fSIntel */ 2139af75078fSIntel void 2140af75078fSIntel start_packet_forwarding(int with_tx_first) 2141af75078fSIntel { 2142af75078fSIntel port_fwd_begin_t port_fwd_begin; 2143af75078fSIntel port_fwd_end_t port_fwd_end; 2144af75078fSIntel struct rte_port *port; 2145af75078fSIntel unsigned int i; 2146af75078fSIntel portid_t pt_id; 2147af75078fSIntel 21485a8fb55cSReshma Pattan if (strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") == 0 && !nb_rxq) 21495a8fb55cSReshma Pattan rte_exit(EXIT_FAILURE, "rxq are 0, cannot use rxonly fwd mode\n"); 21505a8fb55cSReshma Pattan 21515a8fb55cSReshma Pattan if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 && !nb_txq) 21525a8fb55cSReshma Pattan rte_exit(EXIT_FAILURE, "txq are 0, cannot use txonly fwd mode\n"); 21535a8fb55cSReshma Pattan 21545a8fb55cSReshma Pattan if ((strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") != 0 && 21555a8fb55cSReshma Pattan strcmp(cur_fwd_eng->fwd_mode_name, "txonly") != 0) && 21565a8fb55cSReshma Pattan (!nb_rxq || !nb_txq)) 21575a8fb55cSReshma Pattan rte_exit(EXIT_FAILURE, 21585a8fb55cSReshma Pattan "Either rxq or txq are 0, cannot use %s fwd mode\n", 21595a8fb55cSReshma Pattan cur_fwd_eng->fwd_mode_name); 21605a8fb55cSReshma Pattan 2161ce8d5614SIntel if (all_ports_started() == 0) { 2162ce8d5614SIntel printf("Not all ports were started\n"); 2163ce8d5614SIntel return; 2164ce8d5614SIntel } 2165af75078fSIntel if (test_done == 0) { 2166af75078fSIntel printf("Packet forwarding already started\n"); 2167af75078fSIntel return; 2168af75078fSIntel } 2169edf87b4aSBernard Iremonger 2170edf87b4aSBernard Iremonger 21717741e4cfSIntel if(dcb_test) { 21727741e4cfSIntel for (i = 0; i < nb_fwd_ports; i++) { 21737741e4cfSIntel pt_id = fwd_ports_ids[i]; 21747741e4cfSIntel port = &ports[pt_id]; 21757741e4cfSIntel if (!port->dcb_flag) { 21767741e4cfSIntel printf("In DCB mode, all forwarding ports must " 21777741e4cfSIntel "be configured in this mode.\n"); 2178013af9b6SIntel return; 2179013af9b6SIntel } 21807741e4cfSIntel } 21817741e4cfSIntel if (nb_fwd_lcores == 1) { 21827741e4cfSIntel printf("In DCB mode,the nb forwarding cores " 21837741e4cfSIntel "should be larger than 1.\n"); 21847741e4cfSIntel return; 21857741e4cfSIntel } 21867741e4cfSIntel } 2187af75078fSIntel test_done = 0; 21887741e4cfSIntel 218947a767b2SMatan Azrad fwd_config_setup(); 219047a767b2SMatan Azrad 21917741e4cfSIntel if(!no_flush_rx) 21927741e4cfSIntel flush_fwd_rx_queues(); 21937741e4cfSIntel 2194933617d8SZhihong Wang pkt_fwd_config_display(&cur_fwd_config); 2195af75078fSIntel rxtx_config_display(); 2196af75078fSIntel 219753324971SDavid Marchand fwd_stats_reset(); 2198af75078fSIntel if (with_tx_first) { 2199af75078fSIntel port_fwd_begin = tx_only_engine.port_fwd_begin; 2200af75078fSIntel if (port_fwd_begin != NULL) { 2201af75078fSIntel for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) 2202af75078fSIntel (*port_fwd_begin)(fwd_ports_ids[i]); 2203af75078fSIntel } 2204acbf77a6SZhihong Wang while (with_tx_first--) { 2205acbf77a6SZhihong Wang launch_packet_forwarding( 2206acbf77a6SZhihong Wang run_one_txonly_burst_on_core); 2207af75078fSIntel rte_eal_mp_wait_lcore(); 2208acbf77a6SZhihong Wang } 2209af75078fSIntel port_fwd_end = tx_only_engine.port_fwd_end; 2210af75078fSIntel if (port_fwd_end != NULL) { 2211af75078fSIntel for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) 2212af75078fSIntel (*port_fwd_end)(fwd_ports_ids[i]); 2213af75078fSIntel } 2214af75078fSIntel } 2215af75078fSIntel launch_packet_forwarding(start_pkt_forward_on_core); 2216af75078fSIntel } 2217af75078fSIntel 2218af75078fSIntel void 2219af75078fSIntel stop_packet_forwarding(void) 2220af75078fSIntel { 2221af75078fSIntel port_fwd_end_t port_fwd_end; 2222af75078fSIntel lcoreid_t lc_id; 222353324971SDavid Marchand portid_t pt_id; 222453324971SDavid Marchand int i; 2225af75078fSIntel 2226af75078fSIntel if (test_done) { 2227af75078fSIntel printf("Packet forwarding not started\n"); 2228af75078fSIntel return; 2229af75078fSIntel } 2230af75078fSIntel printf("Telling cores to stop..."); 2231af75078fSIntel for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) 2232af75078fSIntel fwd_lcores[lc_id]->stopped = 1; 2233af75078fSIntel printf("\nWaiting for lcores to finish...\n"); 2234af75078fSIntel rte_eal_mp_wait_lcore(); 2235af75078fSIntel port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end; 2236af75078fSIntel if (port_fwd_end != NULL) { 2237af75078fSIntel for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) { 2238af75078fSIntel pt_id = fwd_ports_ids[i]; 2239af75078fSIntel (*port_fwd_end)(pt_id); 2240af75078fSIntel } 2241af75078fSIntel } 2242c185d42cSDavid Marchand 224353324971SDavid Marchand fwd_stats_display(); 224458d475b7SJerin Jacob 2245af75078fSIntel printf("\nDone.\n"); 2246af75078fSIntel test_done = 1; 2247af75078fSIntel } 2248af75078fSIntel 2249cfae07fdSOuyang Changchun void 2250cfae07fdSOuyang Changchun dev_set_link_up(portid_t pid) 2251cfae07fdSOuyang Changchun { 2252492ab604SZhiyong Yang if (rte_eth_dev_set_link_up(pid) < 0) 2253cfae07fdSOuyang Changchun printf("\nSet link up fail.\n"); 2254cfae07fdSOuyang Changchun } 2255cfae07fdSOuyang Changchun 2256cfae07fdSOuyang Changchun void 2257cfae07fdSOuyang Changchun dev_set_link_down(portid_t pid) 2258cfae07fdSOuyang Changchun { 2259492ab604SZhiyong Yang if (rte_eth_dev_set_link_down(pid) < 0) 2260cfae07fdSOuyang Changchun printf("\nSet link down fail.\n"); 2261cfae07fdSOuyang Changchun } 2262cfae07fdSOuyang Changchun 2263ce8d5614SIntel static int 2264ce8d5614SIntel all_ports_started(void) 2265ce8d5614SIntel { 2266ce8d5614SIntel portid_t pi; 2267ce8d5614SIntel struct rte_port *port; 2268ce8d5614SIntel 22697d89b261SGaetan Rivet RTE_ETH_FOREACH_DEV(pi) { 2270ce8d5614SIntel port = &ports[pi]; 2271ce8d5614SIntel /* Check if there is a port which is not started */ 227241b05095SBernard Iremonger if ((port->port_status != RTE_PORT_STARTED) && 227341b05095SBernard Iremonger (port->slave_flag == 0)) 2274ce8d5614SIntel return 0; 2275ce8d5614SIntel } 2276ce8d5614SIntel 2277ce8d5614SIntel /* No port is not started */ 2278ce8d5614SIntel return 1; 2279ce8d5614SIntel } 2280ce8d5614SIntel 2281148f963fSBruce Richardson int 22826018eb8cSShahaf Shuler port_is_stopped(portid_t port_id) 22836018eb8cSShahaf Shuler { 22846018eb8cSShahaf Shuler struct rte_port *port = &ports[port_id]; 22856018eb8cSShahaf Shuler 22866018eb8cSShahaf Shuler if ((port->port_status != RTE_PORT_STOPPED) && 22876018eb8cSShahaf Shuler (port->slave_flag == 0)) 22886018eb8cSShahaf Shuler return 0; 22896018eb8cSShahaf Shuler return 1; 22906018eb8cSShahaf Shuler } 22916018eb8cSShahaf Shuler 22926018eb8cSShahaf Shuler int 2293edab33b1STetsuya Mukawa all_ports_stopped(void) 2294edab33b1STetsuya Mukawa { 2295edab33b1STetsuya Mukawa portid_t pi; 2296edab33b1STetsuya Mukawa 22977d89b261SGaetan Rivet RTE_ETH_FOREACH_DEV(pi) { 22986018eb8cSShahaf Shuler if (!port_is_stopped(pi)) 2299edab33b1STetsuya Mukawa return 0; 2300edab33b1STetsuya Mukawa } 2301edab33b1STetsuya Mukawa 2302edab33b1STetsuya Mukawa return 1; 2303edab33b1STetsuya Mukawa } 2304edab33b1STetsuya Mukawa 2305edab33b1STetsuya Mukawa int 2306edab33b1STetsuya Mukawa port_is_started(portid_t port_id) 2307edab33b1STetsuya Mukawa { 2308edab33b1STetsuya Mukawa if (port_id_is_invalid(port_id, ENABLED_WARN)) 2309edab33b1STetsuya Mukawa return 0; 2310edab33b1STetsuya Mukawa 2311edab33b1STetsuya Mukawa if (ports[port_id].port_status != RTE_PORT_STARTED) 2312edab33b1STetsuya Mukawa return 0; 2313edab33b1STetsuya Mukawa 2314edab33b1STetsuya Mukawa return 1; 2315edab33b1STetsuya Mukawa } 2316edab33b1STetsuya Mukawa 23171c69df45SOri Kam /* Configure the Rx and Tx hairpin queues for the selected port. */ 23181c69df45SOri Kam static int 231901817b10SBing Zhao setup_hairpin_queues(portid_t pi, portid_t p_pi, uint16_t cnt_pi) 23201c69df45SOri Kam { 23211c69df45SOri Kam queueid_t qi; 23221c69df45SOri Kam struct rte_eth_hairpin_conf hairpin_conf = { 23231c69df45SOri Kam .peer_count = 1, 23241c69df45SOri Kam }; 23251c69df45SOri Kam int i; 23261c69df45SOri Kam int diag; 23271c69df45SOri Kam struct rte_port *port = &ports[pi]; 232801817b10SBing Zhao uint16_t peer_rx_port = pi; 232901817b10SBing Zhao uint16_t peer_tx_port = pi; 233001817b10SBing Zhao uint32_t manual = 1; 233101817b10SBing Zhao uint32_t tx_exp = hairpin_mode & 0x10; 233201817b10SBing Zhao 233301817b10SBing Zhao if (!(hairpin_mode & 0xf)) { 233401817b10SBing Zhao peer_rx_port = pi; 233501817b10SBing Zhao peer_tx_port = pi; 233601817b10SBing Zhao manual = 0; 233701817b10SBing Zhao } else if (hairpin_mode & 0x1) { 233801817b10SBing Zhao peer_tx_port = rte_eth_find_next_owned_by(pi + 1, 233901817b10SBing Zhao RTE_ETH_DEV_NO_OWNER); 234001817b10SBing Zhao if (peer_tx_port >= RTE_MAX_ETHPORTS) 234101817b10SBing Zhao peer_tx_port = rte_eth_find_next_owned_by(0, 234201817b10SBing Zhao RTE_ETH_DEV_NO_OWNER); 234301817b10SBing Zhao if (p_pi != RTE_MAX_ETHPORTS) { 234401817b10SBing Zhao peer_rx_port = p_pi; 234501817b10SBing Zhao } else { 234601817b10SBing Zhao uint16_t next_pi; 234701817b10SBing Zhao 234801817b10SBing Zhao /* Last port will be the peer RX port of the first. */ 234901817b10SBing Zhao RTE_ETH_FOREACH_DEV(next_pi) 235001817b10SBing Zhao peer_rx_port = next_pi; 235101817b10SBing Zhao } 235201817b10SBing Zhao manual = 1; 235301817b10SBing Zhao } else if (hairpin_mode & 0x2) { 235401817b10SBing Zhao if (cnt_pi & 0x1) { 235501817b10SBing Zhao peer_rx_port = p_pi; 235601817b10SBing Zhao } else { 235701817b10SBing Zhao peer_rx_port = rte_eth_find_next_owned_by(pi + 1, 235801817b10SBing Zhao RTE_ETH_DEV_NO_OWNER); 235901817b10SBing Zhao if (peer_rx_port >= RTE_MAX_ETHPORTS) 236001817b10SBing Zhao peer_rx_port = pi; 236101817b10SBing Zhao } 236201817b10SBing Zhao peer_tx_port = peer_rx_port; 236301817b10SBing Zhao manual = 1; 236401817b10SBing Zhao } 23651c69df45SOri Kam 23661c69df45SOri Kam for (qi = nb_txq, i = 0; qi < nb_hairpinq + nb_txq; qi++) { 236701817b10SBing Zhao hairpin_conf.peers[0].port = peer_rx_port; 23681c69df45SOri Kam hairpin_conf.peers[0].queue = i + nb_rxq; 236901817b10SBing Zhao hairpin_conf.manual_bind = !!manual; 237001817b10SBing Zhao hairpin_conf.tx_explicit = !!tx_exp; 23711c69df45SOri Kam diag = rte_eth_tx_hairpin_queue_setup 23721c69df45SOri Kam (pi, qi, nb_txd, &hairpin_conf); 23731c69df45SOri Kam i++; 23741c69df45SOri Kam if (diag == 0) 23751c69df45SOri Kam continue; 23761c69df45SOri Kam 23771c69df45SOri Kam /* Fail to setup rx queue, return */ 23781c69df45SOri Kam if (rte_atomic16_cmpset(&(port->port_status), 23791c69df45SOri Kam RTE_PORT_HANDLING, 23801c69df45SOri Kam RTE_PORT_STOPPED) == 0) 23811c69df45SOri Kam printf("Port %d can not be set back " 23821c69df45SOri Kam "to stopped\n", pi); 23831c69df45SOri Kam printf("Fail to configure port %d hairpin " 23841c69df45SOri Kam "queues\n", pi); 23851c69df45SOri Kam /* try to reconfigure queues next time */ 23861c69df45SOri Kam port->need_reconfig_queues = 1; 23871c69df45SOri Kam return -1; 23881c69df45SOri Kam } 23891c69df45SOri Kam for (qi = nb_rxq, i = 0; qi < nb_hairpinq + nb_rxq; qi++) { 239001817b10SBing Zhao hairpin_conf.peers[0].port = peer_tx_port; 23911c69df45SOri Kam hairpin_conf.peers[0].queue = i + nb_txq; 239201817b10SBing Zhao hairpin_conf.manual_bind = !!manual; 239301817b10SBing Zhao hairpin_conf.tx_explicit = !!tx_exp; 23941c69df45SOri Kam diag = rte_eth_rx_hairpin_queue_setup 23951c69df45SOri Kam (pi, qi, nb_rxd, &hairpin_conf); 23961c69df45SOri Kam i++; 23971c69df45SOri Kam if (diag == 0) 23981c69df45SOri Kam continue; 23991c69df45SOri Kam 24001c69df45SOri Kam /* Fail to setup rx queue, return */ 24011c69df45SOri Kam if (rte_atomic16_cmpset(&(port->port_status), 24021c69df45SOri Kam RTE_PORT_HANDLING, 24031c69df45SOri Kam RTE_PORT_STOPPED) == 0) 24041c69df45SOri Kam printf("Port %d can not be set back " 24051c69df45SOri Kam "to stopped\n", pi); 24061c69df45SOri Kam printf("Fail to configure port %d hairpin " 24071c69df45SOri Kam "queues\n", pi); 24081c69df45SOri Kam /* try to reconfigure queues next time */ 24091c69df45SOri Kam port->need_reconfig_queues = 1; 24101c69df45SOri Kam return -1; 24111c69df45SOri Kam } 24121c69df45SOri Kam return 0; 24131c69df45SOri Kam } 24141c69df45SOri Kam 24152befc67fSViacheslav Ovsiienko /* Configure the Rx with optional split. */ 24162befc67fSViacheslav Ovsiienko int 24172befc67fSViacheslav Ovsiienko rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id, 24182befc67fSViacheslav Ovsiienko uint16_t nb_rx_desc, unsigned int socket_id, 24192befc67fSViacheslav Ovsiienko struct rte_eth_rxconf *rx_conf, struct rte_mempool *mp) 24202befc67fSViacheslav Ovsiienko { 24212befc67fSViacheslav Ovsiienko union rte_eth_rxseg rx_useg[MAX_SEGS_BUFFER_SPLIT] = {}; 24222befc67fSViacheslav Ovsiienko unsigned int i, mp_n; 24232befc67fSViacheslav Ovsiienko int ret; 24242befc67fSViacheslav Ovsiienko 24252befc67fSViacheslav Ovsiienko if (rx_pkt_nb_segs <= 1 || 24262befc67fSViacheslav Ovsiienko (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) == 0) { 24272befc67fSViacheslav Ovsiienko rx_conf->rx_seg = NULL; 24282befc67fSViacheslav Ovsiienko rx_conf->rx_nseg = 0; 24292befc67fSViacheslav Ovsiienko ret = rte_eth_rx_queue_setup(port_id, rx_queue_id, 24302befc67fSViacheslav Ovsiienko nb_rx_desc, socket_id, 24312befc67fSViacheslav Ovsiienko rx_conf, mp); 24322befc67fSViacheslav Ovsiienko return ret; 24332befc67fSViacheslav Ovsiienko } 24342befc67fSViacheslav Ovsiienko for (i = 0; i < rx_pkt_nb_segs; i++) { 24352befc67fSViacheslav Ovsiienko struct rte_eth_rxseg_split *rx_seg = &rx_useg[i].split; 24362befc67fSViacheslav Ovsiienko struct rte_mempool *mpx; 24372befc67fSViacheslav Ovsiienko /* 24382befc67fSViacheslav Ovsiienko * Use last valid pool for the segments with number 24392befc67fSViacheslav Ovsiienko * exceeding the pool index. 24402befc67fSViacheslav Ovsiienko */ 24412befc67fSViacheslav Ovsiienko mp_n = (i > mbuf_data_size_n) ? mbuf_data_size_n - 1 : i; 24422befc67fSViacheslav Ovsiienko mpx = mbuf_pool_find(socket_id, mp_n); 24432befc67fSViacheslav Ovsiienko /* Handle zero as mbuf data buffer size. */ 24442befc67fSViacheslav Ovsiienko rx_seg->length = rx_pkt_seg_lengths[i] ? 24452befc67fSViacheslav Ovsiienko rx_pkt_seg_lengths[i] : 24462befc67fSViacheslav Ovsiienko mbuf_data_size[mp_n]; 24472befc67fSViacheslav Ovsiienko rx_seg->offset = i < rx_pkt_nb_offs ? 24482befc67fSViacheslav Ovsiienko rx_pkt_seg_offsets[i] : 0; 24492befc67fSViacheslav Ovsiienko rx_seg->mp = mpx ? mpx : mp; 24502befc67fSViacheslav Ovsiienko } 24512befc67fSViacheslav Ovsiienko rx_conf->rx_nseg = rx_pkt_nb_segs; 24522befc67fSViacheslav Ovsiienko rx_conf->rx_seg = rx_useg; 24532befc67fSViacheslav Ovsiienko ret = rte_eth_rx_queue_setup(port_id, rx_queue_id, nb_rx_desc, 24542befc67fSViacheslav Ovsiienko socket_id, rx_conf, NULL); 24552befc67fSViacheslav Ovsiienko rx_conf->rx_seg = NULL; 24562befc67fSViacheslav Ovsiienko rx_conf->rx_nseg = 0; 24572befc67fSViacheslav Ovsiienko return ret; 24582befc67fSViacheslav Ovsiienko } 24592befc67fSViacheslav Ovsiienko 2460edab33b1STetsuya Mukawa int 2461ce8d5614SIntel start_port(portid_t pid) 2462ce8d5614SIntel { 246392d2703eSMichael Qiu int diag, need_check_link_status = -1; 2464ce8d5614SIntel portid_t pi; 246501817b10SBing Zhao portid_t p_pi = RTE_MAX_ETHPORTS; 246601817b10SBing Zhao portid_t pl[RTE_MAX_ETHPORTS]; 246701817b10SBing Zhao portid_t peer_pl[RTE_MAX_ETHPORTS]; 246801817b10SBing Zhao uint16_t cnt_pi = 0; 246901817b10SBing Zhao uint16_t cfg_pi = 0; 247001817b10SBing Zhao int peer_pi; 2471ce8d5614SIntel queueid_t qi; 2472ce8d5614SIntel struct rte_port *port; 24736d13ea8eSOlivier Matz struct rte_ether_addr mac_addr; 24741c69df45SOri Kam struct rte_eth_hairpin_cap cap; 2475ce8d5614SIntel 24764468635fSMichael Qiu if (port_id_is_invalid(pid, ENABLED_WARN)) 24774468635fSMichael Qiu return 0; 24784468635fSMichael Qiu 2479ce8d5614SIntel if(dcb_config) 2480ce8d5614SIntel dcb_test = 1; 24817d89b261SGaetan Rivet RTE_ETH_FOREACH_DEV(pi) { 2482edab33b1STetsuya Mukawa if (pid != pi && pid != (portid_t)RTE_PORT_ALL) 2483ce8d5614SIntel continue; 2484ce8d5614SIntel 248592d2703eSMichael Qiu need_check_link_status = 0; 2486ce8d5614SIntel port = &ports[pi]; 2487ce8d5614SIntel if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED, 2488ce8d5614SIntel RTE_PORT_HANDLING) == 0) { 2489ce8d5614SIntel printf("Port %d is now not stopped\n", pi); 2490ce8d5614SIntel continue; 2491ce8d5614SIntel } 2492ce8d5614SIntel 2493ce8d5614SIntel if (port->need_reconfig > 0) { 2494ce8d5614SIntel port->need_reconfig = 0; 2495ce8d5614SIntel 24967ee3e944SVasily Philipov if (flow_isolate_all) { 24977ee3e944SVasily Philipov int ret = port_flow_isolate(pi, 1); 24987ee3e944SVasily Philipov if (ret) { 24997ee3e944SVasily Philipov printf("Failed to apply isolated" 25007ee3e944SVasily Philipov " mode on port %d\n", pi); 25017ee3e944SVasily Philipov return -1; 25027ee3e944SVasily Philipov } 25037ee3e944SVasily Philipov } 2504b5b38ed8SRaslan Darawsheh configure_rxtx_dump_callbacks(0); 25055706de65SJulien Cretin printf("Configuring Port %d (socket %u)\n", pi, 250620a0286fSLiu Xiaofeng port->socket_id); 25071c69df45SOri Kam if (nb_hairpinq > 0 && 25081c69df45SOri Kam rte_eth_dev_hairpin_capability_get(pi, &cap)) { 25091c69df45SOri Kam printf("Port %d doesn't support hairpin " 25101c69df45SOri Kam "queues\n", pi); 25111c69df45SOri Kam return -1; 25121c69df45SOri Kam } 2513ce8d5614SIntel /* configure port */ 25141c69df45SOri Kam diag = rte_eth_dev_configure(pi, nb_rxq + nb_hairpinq, 25151c69df45SOri Kam nb_txq + nb_hairpinq, 2516ce8d5614SIntel &(port->dev_conf)); 2517ce8d5614SIntel if (diag != 0) { 2518ce8d5614SIntel if (rte_atomic16_cmpset(&(port->port_status), 2519ce8d5614SIntel RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0) 2520ce8d5614SIntel printf("Port %d can not be set back " 2521ce8d5614SIntel "to stopped\n", pi); 2522ce8d5614SIntel printf("Fail to configure port %d\n", pi); 2523ce8d5614SIntel /* try to reconfigure port next time */ 2524ce8d5614SIntel port->need_reconfig = 1; 2525148f963fSBruce Richardson return -1; 2526ce8d5614SIntel } 2527ce8d5614SIntel } 2528ce8d5614SIntel if (port->need_reconfig_queues > 0) { 2529ce8d5614SIntel port->need_reconfig_queues = 0; 2530ce8d5614SIntel /* setup tx queues */ 2531ce8d5614SIntel for (qi = 0; qi < nb_txq; qi++) { 2532b6ea6408SIntel if ((numa_support) && 2533b6ea6408SIntel (txring_numa[pi] != NUMA_NO_CONFIG)) 2534b6ea6408SIntel diag = rte_eth_tx_queue_setup(pi, qi, 2535d44f8a48SQi Zhang port->nb_tx_desc[qi], 2536d44f8a48SQi Zhang txring_numa[pi], 2537d44f8a48SQi Zhang &(port->tx_conf[qi])); 2538b6ea6408SIntel else 2539b6ea6408SIntel diag = rte_eth_tx_queue_setup(pi, qi, 2540d44f8a48SQi Zhang port->nb_tx_desc[qi], 2541d44f8a48SQi Zhang port->socket_id, 2542d44f8a48SQi Zhang &(port->tx_conf[qi])); 2543b6ea6408SIntel 2544ce8d5614SIntel if (diag == 0) 2545ce8d5614SIntel continue; 2546ce8d5614SIntel 2547ce8d5614SIntel /* Fail to setup tx queue, return */ 2548ce8d5614SIntel if (rte_atomic16_cmpset(&(port->port_status), 2549ce8d5614SIntel RTE_PORT_HANDLING, 2550ce8d5614SIntel RTE_PORT_STOPPED) == 0) 2551ce8d5614SIntel printf("Port %d can not be set back " 2552ce8d5614SIntel "to stopped\n", pi); 2553d44f8a48SQi Zhang printf("Fail to configure port %d tx queues\n", 2554d44f8a48SQi Zhang pi); 2555ce8d5614SIntel /* try to reconfigure queues next time */ 2556ce8d5614SIntel port->need_reconfig_queues = 1; 2557148f963fSBruce Richardson return -1; 2558ce8d5614SIntel } 2559ce8d5614SIntel for (qi = 0; qi < nb_rxq; qi++) { 2560d44f8a48SQi Zhang /* setup rx queues */ 2561b6ea6408SIntel if ((numa_support) && 2562b6ea6408SIntel (rxring_numa[pi] != NUMA_NO_CONFIG)) { 2563b6ea6408SIntel struct rte_mempool * mp = 256426cbb419SViacheslav Ovsiienko mbuf_pool_find 256526cbb419SViacheslav Ovsiienko (rxring_numa[pi], 0); 2566b6ea6408SIntel if (mp == NULL) { 2567b6ea6408SIntel printf("Failed to setup RX queue:" 2568b6ea6408SIntel "No mempool allocation" 2569b6ea6408SIntel " on the socket %d\n", 2570b6ea6408SIntel rxring_numa[pi]); 2571148f963fSBruce Richardson return -1; 2572b6ea6408SIntel } 2573b6ea6408SIntel 25742befc67fSViacheslav Ovsiienko diag = rx_queue_setup(pi, qi, 2575d4930794SFerruh Yigit port->nb_rx_desc[qi], 2576d44f8a48SQi Zhang rxring_numa[pi], 2577d44f8a48SQi Zhang &(port->rx_conf[qi]), 2578d44f8a48SQi Zhang mp); 25791e1d6bddSBernard Iremonger } else { 25801e1d6bddSBernard Iremonger struct rte_mempool *mp = 258126cbb419SViacheslav Ovsiienko mbuf_pool_find 258226cbb419SViacheslav Ovsiienko (port->socket_id, 0); 25831e1d6bddSBernard Iremonger if (mp == NULL) { 25841e1d6bddSBernard Iremonger printf("Failed to setup RX queue:" 25851e1d6bddSBernard Iremonger "No mempool allocation" 25861e1d6bddSBernard Iremonger " on the socket %d\n", 25871e1d6bddSBernard Iremonger port->socket_id); 25881e1d6bddSBernard Iremonger return -1; 2589b6ea6408SIntel } 25902befc67fSViacheslav Ovsiienko diag = rx_queue_setup(pi, qi, 2591d4930794SFerruh Yigit port->nb_rx_desc[qi], 2592d44f8a48SQi Zhang port->socket_id, 2593d44f8a48SQi Zhang &(port->rx_conf[qi]), 2594d44f8a48SQi Zhang mp); 25951e1d6bddSBernard Iremonger } 2596ce8d5614SIntel if (diag == 0) 2597ce8d5614SIntel continue; 2598ce8d5614SIntel 2599ce8d5614SIntel /* Fail to setup rx queue, return */ 2600ce8d5614SIntel if (rte_atomic16_cmpset(&(port->port_status), 2601ce8d5614SIntel RTE_PORT_HANDLING, 2602ce8d5614SIntel RTE_PORT_STOPPED) == 0) 2603ce8d5614SIntel printf("Port %d can not be set back " 2604ce8d5614SIntel "to stopped\n", pi); 2605d44f8a48SQi Zhang printf("Fail to configure port %d rx queues\n", 2606d44f8a48SQi Zhang pi); 2607ce8d5614SIntel /* try to reconfigure queues next time */ 2608ce8d5614SIntel port->need_reconfig_queues = 1; 2609148f963fSBruce Richardson return -1; 2610ce8d5614SIntel } 26111c69df45SOri Kam /* setup hairpin queues */ 261201817b10SBing Zhao if (setup_hairpin_queues(pi, p_pi, cnt_pi) != 0) 26131c69df45SOri Kam return -1; 2614ce8d5614SIntel } 2615b5b38ed8SRaslan Darawsheh configure_rxtx_dump_callbacks(verbose_level); 2616b0a9354aSPavan Nikhilesh if (clear_ptypes) { 2617b0a9354aSPavan Nikhilesh diag = rte_eth_dev_set_ptypes(pi, RTE_PTYPE_UNKNOWN, 2618b0a9354aSPavan Nikhilesh NULL, 0); 2619b0a9354aSPavan Nikhilesh if (diag < 0) 2620b0a9354aSPavan Nikhilesh printf( 2621b0a9354aSPavan Nikhilesh "Port %d: Failed to disable Ptype parsing\n", 2622b0a9354aSPavan Nikhilesh pi); 2623b0a9354aSPavan Nikhilesh } 2624b0a9354aSPavan Nikhilesh 262501817b10SBing Zhao p_pi = pi; 262601817b10SBing Zhao cnt_pi++; 262701817b10SBing Zhao 2628ce8d5614SIntel /* start port */ 2629ce8d5614SIntel if (rte_eth_dev_start(pi) < 0) { 2630ce8d5614SIntel printf("Fail to start port %d\n", pi); 2631ce8d5614SIntel 2632ce8d5614SIntel /* Fail to setup rx queue, return */ 2633ce8d5614SIntel if (rte_atomic16_cmpset(&(port->port_status), 2634ce8d5614SIntel RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0) 2635ce8d5614SIntel printf("Port %d can not be set back to " 2636ce8d5614SIntel "stopped\n", pi); 2637ce8d5614SIntel continue; 2638ce8d5614SIntel } 2639ce8d5614SIntel 2640ce8d5614SIntel if (rte_atomic16_cmpset(&(port->port_status), 2641ce8d5614SIntel RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0) 2642ce8d5614SIntel printf("Port %d can not be set into started\n", pi); 2643ce8d5614SIntel 2644a5279d25SIgor Romanov if (eth_macaddr_get_print_err(pi, &mac_addr) == 0) 2645d8c89163SZijie Pan printf("Port %d: %02X:%02X:%02X:%02X:%02X:%02X\n", pi, 26462950a769SDeclan Doherty mac_addr.addr_bytes[0], mac_addr.addr_bytes[1], 26472950a769SDeclan Doherty mac_addr.addr_bytes[2], mac_addr.addr_bytes[3], 26482950a769SDeclan Doherty mac_addr.addr_bytes[4], mac_addr.addr_bytes[5]); 2649d8c89163SZijie Pan 2650ce8d5614SIntel /* at least one port started, need checking link status */ 2651ce8d5614SIntel need_check_link_status = 1; 265201817b10SBing Zhao 265301817b10SBing Zhao pl[cfg_pi++] = pi; 2654ce8d5614SIntel } 2655ce8d5614SIntel 265692d2703eSMichael Qiu if (need_check_link_status == 1 && !no_link_check) 2657edab33b1STetsuya Mukawa check_all_ports_link_status(RTE_PORT_ALL); 265892d2703eSMichael Qiu else if (need_check_link_status == 0) 2659ce8d5614SIntel printf("Please stop the ports first\n"); 2660ce8d5614SIntel 266101817b10SBing Zhao if (hairpin_mode & 0xf) { 266201817b10SBing Zhao uint16_t i; 266301817b10SBing Zhao int j; 266401817b10SBing Zhao 266501817b10SBing Zhao /* bind all started hairpin ports */ 266601817b10SBing Zhao for (i = 0; i < cfg_pi; i++) { 266701817b10SBing Zhao pi = pl[i]; 266801817b10SBing Zhao /* bind current Tx to all peer Rx */ 266901817b10SBing Zhao peer_pi = rte_eth_hairpin_get_peer_ports(pi, peer_pl, 267001817b10SBing Zhao RTE_MAX_ETHPORTS, 1); 267101817b10SBing Zhao if (peer_pi < 0) 267201817b10SBing Zhao return peer_pi; 267301817b10SBing Zhao for (j = 0; j < peer_pi; j++) { 267401817b10SBing Zhao if (!port_is_started(peer_pl[j])) 267501817b10SBing Zhao continue; 267601817b10SBing Zhao diag = rte_eth_hairpin_bind(pi, peer_pl[j]); 267701817b10SBing Zhao if (diag < 0) { 267801817b10SBing Zhao printf("Error during binding hairpin" 267901817b10SBing Zhao " Tx port %u to %u: %s\n", 268001817b10SBing Zhao pi, peer_pl[j], 268101817b10SBing Zhao rte_strerror(-diag)); 268201817b10SBing Zhao return -1; 268301817b10SBing Zhao } 268401817b10SBing Zhao } 268501817b10SBing Zhao /* bind all peer Tx to current Rx */ 268601817b10SBing Zhao peer_pi = rte_eth_hairpin_get_peer_ports(pi, peer_pl, 268701817b10SBing Zhao RTE_MAX_ETHPORTS, 0); 268801817b10SBing Zhao if (peer_pi < 0) 268901817b10SBing Zhao return peer_pi; 269001817b10SBing Zhao for (j = 0; j < peer_pi; j++) { 269101817b10SBing Zhao if (!port_is_started(peer_pl[j])) 269201817b10SBing Zhao continue; 269301817b10SBing Zhao diag = rte_eth_hairpin_bind(peer_pl[j], pi); 269401817b10SBing Zhao if (diag < 0) { 269501817b10SBing Zhao printf("Error during binding hairpin" 269601817b10SBing Zhao " Tx port %u to %u: %s\n", 269701817b10SBing Zhao peer_pl[j], pi, 269801817b10SBing Zhao rte_strerror(-diag)); 269901817b10SBing Zhao return -1; 270001817b10SBing Zhao } 270101817b10SBing Zhao } 270201817b10SBing Zhao } 270301817b10SBing Zhao } 270401817b10SBing Zhao 2705ce8d5614SIntel printf("Done\n"); 2706148f963fSBruce Richardson return 0; 2707ce8d5614SIntel } 2708ce8d5614SIntel 2709ce8d5614SIntel void 2710ce8d5614SIntel stop_port(portid_t pid) 2711ce8d5614SIntel { 2712ce8d5614SIntel portid_t pi; 2713ce8d5614SIntel struct rte_port *port; 2714ce8d5614SIntel int need_check_link_status = 0; 271501817b10SBing Zhao portid_t peer_pl[RTE_MAX_ETHPORTS]; 271601817b10SBing Zhao int peer_pi; 2717ce8d5614SIntel 2718ce8d5614SIntel if (dcb_test) { 2719ce8d5614SIntel dcb_test = 0; 2720ce8d5614SIntel dcb_config = 0; 2721ce8d5614SIntel } 27224468635fSMichael Qiu 27234468635fSMichael Qiu if (port_id_is_invalid(pid, ENABLED_WARN)) 27244468635fSMichael Qiu return; 27254468635fSMichael Qiu 2726ce8d5614SIntel printf("Stopping ports...\n"); 2727ce8d5614SIntel 27287d89b261SGaetan Rivet RTE_ETH_FOREACH_DEV(pi) { 27294468635fSMichael Qiu if (pid != pi && pid != (portid_t)RTE_PORT_ALL) 2730ce8d5614SIntel continue; 2731ce8d5614SIntel 2732a8ef3e3aSBernard Iremonger if (port_is_forwarding(pi) != 0 && test_done == 0) { 2733a8ef3e3aSBernard Iremonger printf("Please remove port %d from forwarding configuration.\n", pi); 2734a8ef3e3aSBernard Iremonger continue; 2735a8ef3e3aSBernard Iremonger } 2736a8ef3e3aSBernard Iremonger 27370e545d30SBernard Iremonger if (port_is_bonding_slave(pi)) { 27380e545d30SBernard Iremonger printf("Please remove port %d from bonded device.\n", pi); 27390e545d30SBernard Iremonger continue; 27400e545d30SBernard Iremonger } 27410e545d30SBernard Iremonger 2742ce8d5614SIntel port = &ports[pi]; 2743ce8d5614SIntel if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED, 2744ce8d5614SIntel RTE_PORT_HANDLING) == 0) 2745ce8d5614SIntel continue; 2746ce8d5614SIntel 274701817b10SBing Zhao if (hairpin_mode & 0xf) { 274801817b10SBing Zhao int j; 274901817b10SBing Zhao 275001817b10SBing Zhao rte_eth_hairpin_unbind(pi, RTE_MAX_ETHPORTS); 275101817b10SBing Zhao /* unbind all peer Tx from current Rx */ 275201817b10SBing Zhao peer_pi = rte_eth_hairpin_get_peer_ports(pi, peer_pl, 275301817b10SBing Zhao RTE_MAX_ETHPORTS, 0); 275401817b10SBing Zhao if (peer_pi < 0) 275501817b10SBing Zhao continue; 275601817b10SBing Zhao for (j = 0; j < peer_pi; j++) { 275701817b10SBing Zhao if (!port_is_started(peer_pl[j])) 275801817b10SBing Zhao continue; 275901817b10SBing Zhao rte_eth_hairpin_unbind(peer_pl[j], pi); 276001817b10SBing Zhao } 276101817b10SBing Zhao } 276201817b10SBing Zhao 27630f93edbfSGregory Etelson if (port->flow_list) 27640f93edbfSGregory Etelson port_flow_flush(pi); 27650f93edbfSGregory Etelson 2766e62c5a12SIvan Ilchenko if (rte_eth_dev_stop(pi) != 0) 2767e62c5a12SIvan Ilchenko RTE_LOG(ERR, EAL, "rte_eth_dev_stop failed for port %u\n", 2768e62c5a12SIvan Ilchenko pi); 2769ce8d5614SIntel 2770ce8d5614SIntel if (rte_atomic16_cmpset(&(port->port_status), 2771ce8d5614SIntel RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0) 2772ce8d5614SIntel printf("Port %d can not be set into stopped\n", pi); 2773ce8d5614SIntel need_check_link_status = 1; 2774ce8d5614SIntel } 2775bc202406SDavid Marchand if (need_check_link_status && !no_link_check) 2776edab33b1STetsuya Mukawa check_all_ports_link_status(RTE_PORT_ALL); 2777ce8d5614SIntel 2778ce8d5614SIntel printf("Done\n"); 2779ce8d5614SIntel } 2780ce8d5614SIntel 2781ce6959bfSWisam Jaddo static void 27824f1de450SThomas Monjalon remove_invalid_ports_in(portid_t *array, portid_t *total) 2783ce6959bfSWisam Jaddo { 27844f1de450SThomas Monjalon portid_t i; 27854f1de450SThomas Monjalon portid_t new_total = 0; 2786ce6959bfSWisam Jaddo 27874f1de450SThomas Monjalon for (i = 0; i < *total; i++) 27884f1de450SThomas Monjalon if (!port_id_is_invalid(array[i], DISABLED_WARN)) { 27894f1de450SThomas Monjalon array[new_total] = array[i]; 27904f1de450SThomas Monjalon new_total++; 2791ce6959bfSWisam Jaddo } 27924f1de450SThomas Monjalon *total = new_total; 27934f1de450SThomas Monjalon } 27944f1de450SThomas Monjalon 27954f1de450SThomas Monjalon static void 27964f1de450SThomas Monjalon remove_invalid_ports(void) 27974f1de450SThomas Monjalon { 27984f1de450SThomas Monjalon remove_invalid_ports_in(ports_ids, &nb_ports); 27994f1de450SThomas Monjalon remove_invalid_ports_in(fwd_ports_ids, &nb_fwd_ports); 28004f1de450SThomas Monjalon nb_cfg_ports = nb_fwd_ports; 2801ce6959bfSWisam Jaddo } 2802ce6959bfSWisam Jaddo 2803ce8d5614SIntel void 2804ce8d5614SIntel close_port(portid_t pid) 2805ce8d5614SIntel { 2806ce8d5614SIntel portid_t pi; 2807ce8d5614SIntel struct rte_port *port; 2808ce8d5614SIntel 28094468635fSMichael Qiu if (port_id_is_invalid(pid, ENABLED_WARN)) 28104468635fSMichael Qiu return; 28114468635fSMichael Qiu 2812ce8d5614SIntel printf("Closing ports...\n"); 2813ce8d5614SIntel 28147d89b261SGaetan Rivet RTE_ETH_FOREACH_DEV(pi) { 28154468635fSMichael Qiu if (pid != pi && pid != (portid_t)RTE_PORT_ALL) 2816ce8d5614SIntel continue; 2817ce8d5614SIntel 2818a8ef3e3aSBernard Iremonger if (port_is_forwarding(pi) != 0 && test_done == 0) { 2819a8ef3e3aSBernard Iremonger printf("Please remove port %d from forwarding configuration.\n", pi); 2820a8ef3e3aSBernard Iremonger continue; 2821a8ef3e3aSBernard Iremonger } 2822a8ef3e3aSBernard Iremonger 28230e545d30SBernard Iremonger if (port_is_bonding_slave(pi)) { 28240e545d30SBernard Iremonger printf("Please remove port %d from bonded device.\n", pi); 28250e545d30SBernard Iremonger continue; 28260e545d30SBernard Iremonger } 28270e545d30SBernard Iremonger 2828ce8d5614SIntel port = &ports[pi]; 2829ce8d5614SIntel if (rte_atomic16_cmpset(&(port->port_status), 2830d4e8ad64SMichael Qiu RTE_PORT_CLOSED, RTE_PORT_CLOSED) == 1) { 2831d4e8ad64SMichael Qiu printf("Port %d is already closed\n", pi); 2832d4e8ad64SMichael Qiu continue; 2833d4e8ad64SMichael Qiu } 2834d4e8ad64SMichael Qiu 2835938a184aSAdrien Mazarguil port_flow_flush(pi); 2836ce8d5614SIntel rte_eth_dev_close(pi); 2837ce8d5614SIntel } 2838ce8d5614SIntel 283985c6571cSThomas Monjalon remove_invalid_ports(); 2840ce8d5614SIntel printf("Done\n"); 2841ce8d5614SIntel } 2842ce8d5614SIntel 2843edab33b1STetsuya Mukawa void 284497f1e196SWei Dai reset_port(portid_t pid) 284597f1e196SWei Dai { 284697f1e196SWei Dai int diag; 284797f1e196SWei Dai portid_t pi; 284897f1e196SWei Dai struct rte_port *port; 284997f1e196SWei Dai 285097f1e196SWei Dai if (port_id_is_invalid(pid, ENABLED_WARN)) 285197f1e196SWei Dai return; 285297f1e196SWei Dai 28531cde1b9aSShougang Wang if ((pid == (portid_t)RTE_PORT_ALL && !all_ports_stopped()) || 28541cde1b9aSShougang Wang (pid != (portid_t)RTE_PORT_ALL && !port_is_stopped(pid))) { 28551cde1b9aSShougang Wang printf("Can not reset port(s), please stop port(s) first.\n"); 28561cde1b9aSShougang Wang return; 28571cde1b9aSShougang Wang } 28581cde1b9aSShougang Wang 285997f1e196SWei Dai printf("Resetting ports...\n"); 286097f1e196SWei Dai 286197f1e196SWei Dai RTE_ETH_FOREACH_DEV(pi) { 286297f1e196SWei Dai if (pid != pi && pid != (portid_t)RTE_PORT_ALL) 286397f1e196SWei Dai continue; 286497f1e196SWei Dai 286597f1e196SWei Dai if (port_is_forwarding(pi) != 0 && test_done == 0) { 286697f1e196SWei Dai printf("Please remove port %d from forwarding " 286797f1e196SWei Dai "configuration.\n", pi); 286897f1e196SWei Dai continue; 286997f1e196SWei Dai } 287097f1e196SWei Dai 287197f1e196SWei Dai if (port_is_bonding_slave(pi)) { 287297f1e196SWei Dai printf("Please remove port %d from bonded device.\n", 287397f1e196SWei Dai pi); 287497f1e196SWei Dai continue; 287597f1e196SWei Dai } 287697f1e196SWei Dai 287797f1e196SWei Dai diag = rte_eth_dev_reset(pi); 287897f1e196SWei Dai if (diag == 0) { 287997f1e196SWei Dai port = &ports[pi]; 288097f1e196SWei Dai port->need_reconfig = 1; 288197f1e196SWei Dai port->need_reconfig_queues = 1; 288297f1e196SWei Dai } else { 288397f1e196SWei Dai printf("Failed to reset port %d. diag=%d\n", pi, diag); 288497f1e196SWei Dai } 288597f1e196SWei Dai } 288697f1e196SWei Dai 288797f1e196SWei Dai printf("Done\n"); 288897f1e196SWei Dai } 288997f1e196SWei Dai 289097f1e196SWei Dai void 2891edab33b1STetsuya Mukawa attach_port(char *identifier) 2892ce8d5614SIntel { 28934f1ed78eSThomas Monjalon portid_t pi; 2894c9cce428SThomas Monjalon struct rte_dev_iterator iterator; 2895ce8d5614SIntel 2896edab33b1STetsuya Mukawa printf("Attaching a new port...\n"); 2897edab33b1STetsuya Mukawa 2898edab33b1STetsuya Mukawa if (identifier == NULL) { 2899edab33b1STetsuya Mukawa printf("Invalid parameters are specified\n"); 2900edab33b1STetsuya Mukawa return; 2901ce8d5614SIntel } 2902ce8d5614SIntel 290375b66decSIlya Maximets if (rte_dev_probe(identifier) < 0) { 2904c9cce428SThomas Monjalon TESTPMD_LOG(ERR, "Failed to attach port %s\n", identifier); 2905edab33b1STetsuya Mukawa return; 2906c9cce428SThomas Monjalon } 2907c9cce428SThomas Monjalon 29084f1ed78eSThomas Monjalon /* first attach mode: event */ 29094f1ed78eSThomas Monjalon if (setup_on_probe_event) { 29104f1ed78eSThomas Monjalon /* new ports are detected on RTE_ETH_EVENT_NEW event */ 29114f1ed78eSThomas Monjalon for (pi = 0; pi < RTE_MAX_ETHPORTS; pi++) 29124f1ed78eSThomas Monjalon if (ports[pi].port_status == RTE_PORT_HANDLING && 29134f1ed78eSThomas Monjalon ports[pi].need_setup != 0) 29144f1ed78eSThomas Monjalon setup_attached_port(pi); 29154f1ed78eSThomas Monjalon return; 29164f1ed78eSThomas Monjalon } 29174f1ed78eSThomas Monjalon 29184f1ed78eSThomas Monjalon /* second attach mode: iterator */ 291986fa5de1SThomas Monjalon RTE_ETH_FOREACH_MATCHING_DEV(pi, identifier, &iterator) { 29204f1ed78eSThomas Monjalon /* setup ports matching the devargs used for probing */ 292186fa5de1SThomas Monjalon if (port_is_forwarding(pi)) 292286fa5de1SThomas Monjalon continue; /* port was already attached before */ 2923c9cce428SThomas Monjalon setup_attached_port(pi); 2924c9cce428SThomas Monjalon } 292586fa5de1SThomas Monjalon } 2926c9cce428SThomas Monjalon 2927c9cce428SThomas Monjalon static void 2928c9cce428SThomas Monjalon setup_attached_port(portid_t pi) 2929c9cce428SThomas Monjalon { 2930c9cce428SThomas Monjalon unsigned int socket_id; 293134fc1051SIvan Ilchenko int ret; 2932edab33b1STetsuya Mukawa 2933931126baSBernard Iremonger socket_id = (unsigned)rte_eth_dev_socket_id(pi); 293429841336SPhil Yang /* if socket_id is invalid, set to the first available socket. */ 2935931126baSBernard Iremonger if (check_socket_id(socket_id) < 0) 293629841336SPhil Yang socket_id = socket_ids[0]; 2937931126baSBernard Iremonger reconfig(pi, socket_id); 293834fc1051SIvan Ilchenko ret = rte_eth_promiscuous_enable(pi); 293934fc1051SIvan Ilchenko if (ret != 0) 294034fc1051SIvan Ilchenko printf("Error during enabling promiscuous mode for port %u: %s - ignore\n", 294134fc1051SIvan Ilchenko pi, rte_strerror(-ret)); 2942edab33b1STetsuya Mukawa 29434f1de450SThomas Monjalon ports_ids[nb_ports++] = pi; 29444f1de450SThomas Monjalon fwd_ports_ids[nb_fwd_ports++] = pi; 29454f1de450SThomas Monjalon nb_cfg_ports = nb_fwd_ports; 29464f1ed78eSThomas Monjalon ports[pi].need_setup = 0; 2947edab33b1STetsuya Mukawa ports[pi].port_status = RTE_PORT_STOPPED; 2948edab33b1STetsuya Mukawa 2949edab33b1STetsuya Mukawa printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports); 2950edab33b1STetsuya Mukawa printf("Done\n"); 2951edab33b1STetsuya Mukawa } 2952edab33b1STetsuya Mukawa 29530654d4a8SThomas Monjalon static void 29540654d4a8SThomas Monjalon detach_device(struct rte_device *dev) 29555f4ec54fSChen Jing D(Mark) { 2956f8e5baa2SThomas Monjalon portid_t sibling; 2957f8e5baa2SThomas Monjalon 2958f8e5baa2SThomas Monjalon if (dev == NULL) { 2959f8e5baa2SThomas Monjalon printf("Device already removed\n"); 2960f8e5baa2SThomas Monjalon return; 2961f8e5baa2SThomas Monjalon } 2962f8e5baa2SThomas Monjalon 29630654d4a8SThomas Monjalon printf("Removing a device...\n"); 2964938a184aSAdrien Mazarguil 29652a449871SThomas Monjalon RTE_ETH_FOREACH_DEV_OF(sibling, dev) { 29662a449871SThomas Monjalon if (ports[sibling].port_status != RTE_PORT_CLOSED) { 29672a449871SThomas Monjalon if (ports[sibling].port_status != RTE_PORT_STOPPED) { 29682a449871SThomas Monjalon printf("Port %u not stopped\n", sibling); 29692a449871SThomas Monjalon return; 29702a449871SThomas Monjalon } 29712a449871SThomas Monjalon port_flow_flush(sibling); 29722a449871SThomas Monjalon } 29732a449871SThomas Monjalon } 29742a449871SThomas Monjalon 297575b66decSIlya Maximets if (rte_dev_remove(dev) < 0) { 2976f8e5baa2SThomas Monjalon TESTPMD_LOG(ERR, "Failed to detach device %s\n", dev->name); 2977edab33b1STetsuya Mukawa return; 29783070419eSGaetan Rivet } 29794f1de450SThomas Monjalon remove_invalid_ports(); 298003ce2c53SMatan Azrad 29810654d4a8SThomas Monjalon printf("Device is detached\n"); 2982f8e5baa2SThomas Monjalon printf("Now total ports is %d\n", nb_ports); 2983edab33b1STetsuya Mukawa printf("Done\n"); 2984edab33b1STetsuya Mukawa return; 29855f4ec54fSChen Jing D(Mark) } 29865f4ec54fSChen Jing D(Mark) 2987af75078fSIntel void 29880654d4a8SThomas Monjalon detach_port_device(portid_t port_id) 29890654d4a8SThomas Monjalon { 29900654d4a8SThomas Monjalon if (port_id_is_invalid(port_id, ENABLED_WARN)) 29910654d4a8SThomas Monjalon return; 29920654d4a8SThomas Monjalon 29930654d4a8SThomas Monjalon if (ports[port_id].port_status != RTE_PORT_CLOSED) { 29940654d4a8SThomas Monjalon if (ports[port_id].port_status != RTE_PORT_STOPPED) { 29950654d4a8SThomas Monjalon printf("Port not stopped\n"); 29960654d4a8SThomas Monjalon return; 29970654d4a8SThomas Monjalon } 29980654d4a8SThomas Monjalon printf("Port was not closed\n"); 29990654d4a8SThomas Monjalon } 30000654d4a8SThomas Monjalon 30010654d4a8SThomas Monjalon detach_device(rte_eth_devices[port_id].device); 30020654d4a8SThomas Monjalon } 30030654d4a8SThomas Monjalon 30040654d4a8SThomas Monjalon void 30055edee5f6SThomas Monjalon detach_devargs(char *identifier) 300655e51c96SNithin Dabilpuram { 300755e51c96SNithin Dabilpuram struct rte_dev_iterator iterator; 300855e51c96SNithin Dabilpuram struct rte_devargs da; 300955e51c96SNithin Dabilpuram portid_t port_id; 301055e51c96SNithin Dabilpuram 301155e51c96SNithin Dabilpuram printf("Removing a device...\n"); 301255e51c96SNithin Dabilpuram 301355e51c96SNithin Dabilpuram memset(&da, 0, sizeof(da)); 301455e51c96SNithin Dabilpuram if (rte_devargs_parsef(&da, "%s", identifier)) { 301555e51c96SNithin Dabilpuram printf("cannot parse identifier\n"); 301655e51c96SNithin Dabilpuram if (da.args) 301755e51c96SNithin Dabilpuram free(da.args); 301855e51c96SNithin Dabilpuram return; 301955e51c96SNithin Dabilpuram } 302055e51c96SNithin Dabilpuram 302155e51c96SNithin Dabilpuram RTE_ETH_FOREACH_MATCHING_DEV(port_id, identifier, &iterator) { 302255e51c96SNithin Dabilpuram if (ports[port_id].port_status != RTE_PORT_CLOSED) { 302355e51c96SNithin Dabilpuram if (ports[port_id].port_status != RTE_PORT_STOPPED) { 302455e51c96SNithin Dabilpuram printf("Port %u not stopped\n", port_id); 3025149677c9SStephen Hemminger rte_eth_iterator_cleanup(&iterator); 302655e51c96SNithin Dabilpuram return; 302755e51c96SNithin Dabilpuram } 302855e51c96SNithin Dabilpuram port_flow_flush(port_id); 302955e51c96SNithin Dabilpuram } 303055e51c96SNithin Dabilpuram } 303155e51c96SNithin Dabilpuram 303255e51c96SNithin Dabilpuram if (rte_eal_hotplug_remove(da.bus->name, da.name) != 0) { 303355e51c96SNithin Dabilpuram TESTPMD_LOG(ERR, "Failed to detach device %s(%s)\n", 303455e51c96SNithin Dabilpuram da.name, da.bus->name); 303555e51c96SNithin Dabilpuram return; 303655e51c96SNithin Dabilpuram } 303755e51c96SNithin Dabilpuram 303855e51c96SNithin Dabilpuram remove_invalid_ports(); 303955e51c96SNithin Dabilpuram 304055e51c96SNithin Dabilpuram printf("Device %s is detached\n", identifier); 304155e51c96SNithin Dabilpuram printf("Now total ports is %d\n", nb_ports); 304255e51c96SNithin Dabilpuram printf("Done\n"); 304355e51c96SNithin Dabilpuram } 304455e51c96SNithin Dabilpuram 304555e51c96SNithin Dabilpuram void 3046af75078fSIntel pmd_test_exit(void) 3047af75078fSIntel { 3048af75078fSIntel portid_t pt_id; 304926cbb419SViacheslav Ovsiienko unsigned int i; 3050fb73e096SJeff Guo int ret; 3051af75078fSIntel 30528210ec25SPablo de Lara if (test_done == 0) 30538210ec25SPablo de Lara stop_packet_forwarding(); 30548210ec25SPablo de Lara 305526cbb419SViacheslav Ovsiienko for (i = 0 ; i < RTE_DIM(mempools) ; i++) { 30563a0968c8SShahaf Shuler if (mempools[i]) { 30573a0968c8SShahaf Shuler if (mp_alloc_type == MP_ALLOC_ANON) 30583a0968c8SShahaf Shuler rte_mempool_mem_iter(mempools[i], dma_unmap_cb, 30593a0968c8SShahaf Shuler NULL); 30603a0968c8SShahaf Shuler } 30613a0968c8SShahaf Shuler } 3062d3a274ceSZhihong Wang if (ports != NULL) { 3063d3a274ceSZhihong Wang no_link_check = 1; 30647d89b261SGaetan Rivet RTE_ETH_FOREACH_DEV(pt_id) { 306508fd782bSCristian Dumitrescu printf("\nStopping port %d...\n", pt_id); 3066af75078fSIntel fflush(stdout); 3067d3a274ceSZhihong Wang stop_port(pt_id); 306808fd782bSCristian Dumitrescu } 306908fd782bSCristian Dumitrescu RTE_ETH_FOREACH_DEV(pt_id) { 307008fd782bSCristian Dumitrescu printf("\nShutting down port %d...\n", pt_id); 307108fd782bSCristian Dumitrescu fflush(stdout); 3072d3a274ceSZhihong Wang close_port(pt_id); 3073af75078fSIntel } 3074d3a274ceSZhihong Wang } 3075fb73e096SJeff Guo 3076fb73e096SJeff Guo if (hot_plug) { 3077fb73e096SJeff Guo ret = rte_dev_event_monitor_stop(); 30782049c511SJeff Guo if (ret) { 3079fb73e096SJeff Guo RTE_LOG(ERR, EAL, 3080fb73e096SJeff Guo "fail to stop device event monitor."); 30812049c511SJeff Guo return; 30822049c511SJeff Guo } 3083fb73e096SJeff Guo 30842049c511SJeff Guo ret = rte_dev_event_callback_unregister(NULL, 3085cc1bf307SJeff Guo dev_event_callback, NULL); 30862049c511SJeff Guo if (ret < 0) { 3087fb73e096SJeff Guo RTE_LOG(ERR, EAL, 30882049c511SJeff Guo "fail to unregister device event callback.\n"); 30892049c511SJeff Guo return; 30902049c511SJeff Guo } 30912049c511SJeff Guo 30922049c511SJeff Guo ret = rte_dev_hotplug_handle_disable(); 30932049c511SJeff Guo if (ret) { 30942049c511SJeff Guo RTE_LOG(ERR, EAL, 30952049c511SJeff Guo "fail to disable hotplug handling.\n"); 30962049c511SJeff Guo return; 30972049c511SJeff Guo } 3098fb73e096SJeff Guo } 309926cbb419SViacheslav Ovsiienko for (i = 0 ; i < RTE_DIM(mempools) ; i++) { 3100401b744dSShahaf Shuler if (mempools[i]) 3101401b744dSShahaf Shuler rte_mempool_free(mempools[i]); 3102401b744dSShahaf Shuler } 3103fb73e096SJeff Guo 3104d3a274ceSZhihong Wang printf("\nBye...\n"); 3105af75078fSIntel } 3106af75078fSIntel 3107af75078fSIntel typedef void (*cmd_func_t)(void); 3108af75078fSIntel struct pmd_test_command { 3109af75078fSIntel const char *cmd_name; 3110af75078fSIntel cmd_func_t cmd_func; 3111af75078fSIntel }; 3112af75078fSIntel 3113ce8d5614SIntel /* Check the link status of all ports in up to 9s, and print them finally */ 3114af75078fSIntel static void 3115edab33b1STetsuya Mukawa check_all_ports_link_status(uint32_t port_mask) 3116af75078fSIntel { 3117ce8d5614SIntel #define CHECK_INTERVAL 100 /* 100ms */ 3118ce8d5614SIntel #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */ 3119f8244c63SZhiyong Yang portid_t portid; 3120f8244c63SZhiyong Yang uint8_t count, all_ports_up, print_flag = 0; 3121ce8d5614SIntel struct rte_eth_link link; 3122e661a08bSIgor Romanov int ret; 3123ba5509a6SIvan Dyukov char link_status[RTE_ETH_LINK_MAX_STR_LEN]; 3124ce8d5614SIntel 3125ce8d5614SIntel printf("Checking link statuses...\n"); 3126ce8d5614SIntel fflush(stdout); 3127ce8d5614SIntel for (count = 0; count <= MAX_CHECK_TIME; count++) { 3128ce8d5614SIntel all_ports_up = 1; 31297d89b261SGaetan Rivet RTE_ETH_FOREACH_DEV(portid) { 3130ce8d5614SIntel if ((port_mask & (1 << portid)) == 0) 3131ce8d5614SIntel continue; 3132ce8d5614SIntel memset(&link, 0, sizeof(link)); 3133e661a08bSIgor Romanov ret = rte_eth_link_get_nowait(portid, &link); 3134e661a08bSIgor Romanov if (ret < 0) { 3135e661a08bSIgor Romanov all_ports_up = 0; 3136e661a08bSIgor Romanov if (print_flag == 1) 3137e661a08bSIgor Romanov printf("Port %u link get failed: %s\n", 3138e661a08bSIgor Romanov portid, rte_strerror(-ret)); 3139e661a08bSIgor Romanov continue; 3140e661a08bSIgor Romanov } 3141ce8d5614SIntel /* print link status if flag set */ 3142ce8d5614SIntel if (print_flag == 1) { 3143ba5509a6SIvan Dyukov rte_eth_link_to_str(link_status, 3144ba5509a6SIvan Dyukov sizeof(link_status), &link); 3145ba5509a6SIvan Dyukov printf("Port %d %s\n", portid, link_status); 3146ce8d5614SIntel continue; 3147ce8d5614SIntel } 3148ce8d5614SIntel /* clear all_ports_up flag if any link down */ 314909419f23SThomas Monjalon if (link.link_status == ETH_LINK_DOWN) { 3150ce8d5614SIntel all_ports_up = 0; 3151ce8d5614SIntel break; 3152ce8d5614SIntel } 3153ce8d5614SIntel } 3154ce8d5614SIntel /* after finally printing all link status, get out */ 3155ce8d5614SIntel if (print_flag == 1) 3156ce8d5614SIntel break; 3157ce8d5614SIntel 3158ce8d5614SIntel if (all_ports_up == 0) { 3159ce8d5614SIntel fflush(stdout); 3160ce8d5614SIntel rte_delay_ms(CHECK_INTERVAL); 3161ce8d5614SIntel } 3162ce8d5614SIntel 3163ce8d5614SIntel /* set the print_flag if all ports up or timeout */ 3164ce8d5614SIntel if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) { 3165ce8d5614SIntel print_flag = 1; 3166ce8d5614SIntel } 31678ea656f8SGaetan Rivet 31688ea656f8SGaetan Rivet if (lsc_interrupt) 31698ea656f8SGaetan Rivet break; 3170ce8d5614SIntel } 3171af75078fSIntel } 3172af75078fSIntel 3173284c908cSGaetan Rivet static void 3174cc1bf307SJeff Guo rmv_port_callback(void *arg) 3175284c908cSGaetan Rivet { 31763b97888aSMatan Azrad int need_to_start = 0; 31770da2a62bSMatan Azrad int org_no_link_check = no_link_check; 317828caa76aSZhiyong Yang portid_t port_id = (intptr_t)arg; 31790654d4a8SThomas Monjalon struct rte_device *dev; 3180284c908cSGaetan Rivet 3181284c908cSGaetan Rivet RTE_ETH_VALID_PORTID_OR_RET(port_id); 3182284c908cSGaetan Rivet 31833b97888aSMatan Azrad if (!test_done && port_is_forwarding(port_id)) { 31843b97888aSMatan Azrad need_to_start = 1; 31853b97888aSMatan Azrad stop_packet_forwarding(); 31863b97888aSMatan Azrad } 31870da2a62bSMatan Azrad no_link_check = 1; 3188284c908cSGaetan Rivet stop_port(port_id); 31890da2a62bSMatan Azrad no_link_check = org_no_link_check; 31900654d4a8SThomas Monjalon 31910654d4a8SThomas Monjalon /* Save rte_device pointer before closing ethdev port */ 31920654d4a8SThomas Monjalon dev = rte_eth_devices[port_id].device; 3193284c908cSGaetan Rivet close_port(port_id); 31940654d4a8SThomas Monjalon detach_device(dev); /* might be already removed or have more ports */ 31950654d4a8SThomas Monjalon 31963b97888aSMatan Azrad if (need_to_start) 31973b97888aSMatan Azrad start_packet_forwarding(0); 3198284c908cSGaetan Rivet } 3199284c908cSGaetan Rivet 320076ad4a2dSGaetan Rivet /* This function is used by the interrupt thread */ 3201d6af1a13SBernard Iremonger static int 3202f8244c63SZhiyong Yang eth_event_callback(portid_t port_id, enum rte_eth_event_type type, void *param, 3203d6af1a13SBernard Iremonger void *ret_param) 320476ad4a2dSGaetan Rivet { 320576ad4a2dSGaetan Rivet RTE_SET_USED(param); 3206d6af1a13SBernard Iremonger RTE_SET_USED(ret_param); 320776ad4a2dSGaetan Rivet 320876ad4a2dSGaetan Rivet if (type >= RTE_ETH_EVENT_MAX) { 3209f431e010SHerakliusz Lipiec fprintf(stderr, "\nPort %" PRIu16 ": %s called upon invalid event %d\n", 321076ad4a2dSGaetan Rivet port_id, __func__, type); 321176ad4a2dSGaetan Rivet fflush(stderr); 32123af72783SGaetan Rivet } else if (event_print_mask & (UINT32_C(1) << type)) { 3213f431e010SHerakliusz Lipiec printf("\nPort %" PRIu16 ": %s event\n", port_id, 321497b5d8b5SThomas Monjalon eth_event_desc[type]); 321576ad4a2dSGaetan Rivet fflush(stdout); 321676ad4a2dSGaetan Rivet } 3217284c908cSGaetan Rivet 3218284c908cSGaetan Rivet switch (type) { 32194f1ed78eSThomas Monjalon case RTE_ETH_EVENT_NEW: 32204f1ed78eSThomas Monjalon ports[port_id].need_setup = 1; 32214f1ed78eSThomas Monjalon ports[port_id].port_status = RTE_PORT_HANDLING; 32224f1ed78eSThomas Monjalon break; 3223284c908cSGaetan Rivet case RTE_ETH_EVENT_INTR_RMV: 32244f1ed78eSThomas Monjalon if (port_id_is_invalid(port_id, DISABLED_WARN)) 32254f1ed78eSThomas Monjalon break; 3226284c908cSGaetan Rivet if (rte_eal_alarm_set(100000, 3227cc1bf307SJeff Guo rmv_port_callback, (void *)(intptr_t)port_id)) 3228284c908cSGaetan Rivet fprintf(stderr, "Could not set up deferred device removal\n"); 3229284c908cSGaetan Rivet break; 323085c6571cSThomas Monjalon case RTE_ETH_EVENT_DESTROY: 323185c6571cSThomas Monjalon ports[port_id].port_status = RTE_PORT_CLOSED; 323285c6571cSThomas Monjalon printf("Port %u is closed\n", port_id); 323385c6571cSThomas Monjalon break; 3234284c908cSGaetan Rivet default: 3235284c908cSGaetan Rivet break; 3236284c908cSGaetan Rivet } 3237d6af1a13SBernard Iremonger return 0; 323876ad4a2dSGaetan Rivet } 323976ad4a2dSGaetan Rivet 324097b5d8b5SThomas Monjalon static int 324197b5d8b5SThomas Monjalon register_eth_event_callback(void) 324297b5d8b5SThomas Monjalon { 324397b5d8b5SThomas Monjalon int ret; 324497b5d8b5SThomas Monjalon enum rte_eth_event_type event; 324597b5d8b5SThomas Monjalon 324697b5d8b5SThomas Monjalon for (event = RTE_ETH_EVENT_UNKNOWN; 324797b5d8b5SThomas Monjalon event < RTE_ETH_EVENT_MAX; event++) { 324897b5d8b5SThomas Monjalon ret = rte_eth_dev_callback_register(RTE_ETH_ALL, 324997b5d8b5SThomas Monjalon event, 325097b5d8b5SThomas Monjalon eth_event_callback, 325197b5d8b5SThomas Monjalon NULL); 325297b5d8b5SThomas Monjalon if (ret != 0) { 325397b5d8b5SThomas Monjalon TESTPMD_LOG(ERR, "Failed to register callback for " 325497b5d8b5SThomas Monjalon "%s event\n", eth_event_desc[event]); 325597b5d8b5SThomas Monjalon return -1; 325697b5d8b5SThomas Monjalon } 325797b5d8b5SThomas Monjalon } 325897b5d8b5SThomas Monjalon 325997b5d8b5SThomas Monjalon return 0; 326097b5d8b5SThomas Monjalon } 326197b5d8b5SThomas Monjalon 3262fb73e096SJeff Guo /* This function is used by the interrupt thread */ 3263fb73e096SJeff Guo static void 3264cc1bf307SJeff Guo dev_event_callback(const char *device_name, enum rte_dev_event_type type, 3265fb73e096SJeff Guo __rte_unused void *arg) 3266fb73e096SJeff Guo { 32672049c511SJeff Guo uint16_t port_id; 32682049c511SJeff Guo int ret; 32692049c511SJeff Guo 3270fb73e096SJeff Guo if (type >= RTE_DEV_EVENT_MAX) { 3271fb73e096SJeff Guo fprintf(stderr, "%s called upon invalid event %d\n", 3272fb73e096SJeff Guo __func__, type); 3273fb73e096SJeff Guo fflush(stderr); 3274fb73e096SJeff Guo } 3275fb73e096SJeff Guo 3276fb73e096SJeff Guo switch (type) { 3277fb73e096SJeff Guo case RTE_DEV_EVENT_REMOVE: 3278cc1bf307SJeff Guo RTE_LOG(DEBUG, EAL, "The device: %s has been removed!\n", 3279fb73e096SJeff Guo device_name); 32802049c511SJeff Guo ret = rte_eth_dev_get_port_by_name(device_name, &port_id); 32812049c511SJeff Guo if (ret) { 32822049c511SJeff Guo RTE_LOG(ERR, EAL, "can not get port by device %s!\n", 32832049c511SJeff Guo device_name); 32842049c511SJeff Guo return; 32852049c511SJeff Guo } 3286cc1bf307SJeff Guo /* 3287cc1bf307SJeff Guo * Because the user's callback is invoked in eal interrupt 3288cc1bf307SJeff Guo * callback, the interrupt callback need to be finished before 3289cc1bf307SJeff Guo * it can be unregistered when detaching device. So finish 3290cc1bf307SJeff Guo * callback soon and use a deferred removal to detach device 3291cc1bf307SJeff Guo * is need. It is a workaround, once the device detaching be 3292cc1bf307SJeff Guo * moved into the eal in the future, the deferred removal could 3293cc1bf307SJeff Guo * be deleted. 3294cc1bf307SJeff Guo */ 3295cc1bf307SJeff Guo if (rte_eal_alarm_set(100000, 3296cc1bf307SJeff Guo rmv_port_callback, (void *)(intptr_t)port_id)) 3297cc1bf307SJeff Guo RTE_LOG(ERR, EAL, 3298cc1bf307SJeff Guo "Could not set up deferred device removal\n"); 3299fb73e096SJeff Guo break; 3300fb73e096SJeff Guo case RTE_DEV_EVENT_ADD: 3301fb73e096SJeff Guo RTE_LOG(ERR, EAL, "The device: %s has been added!\n", 3302fb73e096SJeff Guo device_name); 3303fb73e096SJeff Guo /* TODO: After finish kernel driver binding, 3304fb73e096SJeff Guo * begin to attach port. 3305fb73e096SJeff Guo */ 3306fb73e096SJeff Guo break; 3307fb73e096SJeff Guo default: 3308fb73e096SJeff Guo break; 3309fb73e096SJeff Guo } 3310fb73e096SJeff Guo } 3311fb73e096SJeff Guo 3312f2c5125aSPablo de Lara static void 3313f2c5125aSPablo de Lara rxtx_port_config(struct rte_port *port) 3314f2c5125aSPablo de Lara { 3315d44f8a48SQi Zhang uint16_t qid; 33165e91aeefSWei Zhao uint64_t offloads; 3317f2c5125aSPablo de Lara 3318d44f8a48SQi Zhang for (qid = 0; qid < nb_rxq; qid++) { 33195e91aeefSWei Zhao offloads = port->rx_conf[qid].offloads; 3320d44f8a48SQi Zhang port->rx_conf[qid] = port->dev_info.default_rxconf; 3321575e0fd1SWei Zhao if (offloads != 0) 3322575e0fd1SWei Zhao port->rx_conf[qid].offloads = offloads; 3323d44f8a48SQi Zhang 3324d44f8a48SQi Zhang /* Check if any Rx parameters have been passed */ 3325f2c5125aSPablo de Lara if (rx_pthresh != RTE_PMD_PARAM_UNSET) 3326d44f8a48SQi Zhang port->rx_conf[qid].rx_thresh.pthresh = rx_pthresh; 3327f2c5125aSPablo de Lara 3328f2c5125aSPablo de Lara if (rx_hthresh != RTE_PMD_PARAM_UNSET) 3329d44f8a48SQi Zhang port->rx_conf[qid].rx_thresh.hthresh = rx_hthresh; 3330f2c5125aSPablo de Lara 3331f2c5125aSPablo de Lara if (rx_wthresh != RTE_PMD_PARAM_UNSET) 3332d44f8a48SQi Zhang port->rx_conf[qid].rx_thresh.wthresh = rx_wthresh; 3333f2c5125aSPablo de Lara 3334f2c5125aSPablo de Lara if (rx_free_thresh != RTE_PMD_PARAM_UNSET) 3335d44f8a48SQi Zhang port->rx_conf[qid].rx_free_thresh = rx_free_thresh; 3336f2c5125aSPablo de Lara 3337f2c5125aSPablo de Lara if (rx_drop_en != RTE_PMD_PARAM_UNSET) 3338d44f8a48SQi Zhang port->rx_conf[qid].rx_drop_en = rx_drop_en; 3339f2c5125aSPablo de Lara 3340d44f8a48SQi Zhang port->nb_rx_desc[qid] = nb_rxd; 3341d44f8a48SQi Zhang } 3342d44f8a48SQi Zhang 3343d44f8a48SQi Zhang for (qid = 0; qid < nb_txq; qid++) { 33445e91aeefSWei Zhao offloads = port->tx_conf[qid].offloads; 3345d44f8a48SQi Zhang port->tx_conf[qid] = port->dev_info.default_txconf; 3346575e0fd1SWei Zhao if (offloads != 0) 3347575e0fd1SWei Zhao port->tx_conf[qid].offloads = offloads; 3348d44f8a48SQi Zhang 3349d44f8a48SQi Zhang /* Check if any Tx parameters have been passed */ 3350f2c5125aSPablo de Lara if (tx_pthresh != RTE_PMD_PARAM_UNSET) 3351d44f8a48SQi Zhang port->tx_conf[qid].tx_thresh.pthresh = tx_pthresh; 3352f2c5125aSPablo de Lara 3353f2c5125aSPablo de Lara if (tx_hthresh != RTE_PMD_PARAM_UNSET) 3354d44f8a48SQi Zhang port->tx_conf[qid].tx_thresh.hthresh = tx_hthresh; 3355f2c5125aSPablo de Lara 3356f2c5125aSPablo de Lara if (tx_wthresh != RTE_PMD_PARAM_UNSET) 3357d44f8a48SQi Zhang port->tx_conf[qid].tx_thresh.wthresh = tx_wthresh; 3358f2c5125aSPablo de Lara 3359f2c5125aSPablo de Lara if (tx_rs_thresh != RTE_PMD_PARAM_UNSET) 3360d44f8a48SQi Zhang port->tx_conf[qid].tx_rs_thresh = tx_rs_thresh; 3361f2c5125aSPablo de Lara 3362f2c5125aSPablo de Lara if (tx_free_thresh != RTE_PMD_PARAM_UNSET) 3363d44f8a48SQi Zhang port->tx_conf[qid].tx_free_thresh = tx_free_thresh; 3364d44f8a48SQi Zhang 3365d44f8a48SQi Zhang port->nb_tx_desc[qid] = nb_txd; 3366d44f8a48SQi Zhang } 3367f2c5125aSPablo de Lara } 3368f2c5125aSPablo de Lara 33690c4abd36SSteve Yang /* 33700c4abd36SSteve Yang * Helper function to arrange max_rx_pktlen value and JUMBO_FRAME offload, 33710c4abd36SSteve Yang * MTU is also aligned if JUMBO_FRAME offload is not set. 33720c4abd36SSteve Yang * 33730c4abd36SSteve Yang * port->dev_info should be set before calling this function. 33740c4abd36SSteve Yang * 33750c4abd36SSteve Yang * return 0 on success, negative on error 33760c4abd36SSteve Yang */ 33770c4abd36SSteve Yang int 33780c4abd36SSteve Yang update_jumbo_frame_offload(portid_t portid) 33790c4abd36SSteve Yang { 33800c4abd36SSteve Yang struct rte_port *port = &ports[portid]; 33810c4abd36SSteve Yang uint32_t eth_overhead; 33820c4abd36SSteve Yang uint64_t rx_offloads; 33830c4abd36SSteve Yang int ret; 33840c4abd36SSteve Yang bool on; 33850c4abd36SSteve Yang 33860c4abd36SSteve Yang /* Update the max_rx_pkt_len to have MTU as RTE_ETHER_MTU */ 33870c4abd36SSteve Yang if (port->dev_info.max_mtu != UINT16_MAX && 33880c4abd36SSteve Yang port->dev_info.max_rx_pktlen > port->dev_info.max_mtu) 33890c4abd36SSteve Yang eth_overhead = port->dev_info.max_rx_pktlen - 33900c4abd36SSteve Yang port->dev_info.max_mtu; 33910c4abd36SSteve Yang else 33920c4abd36SSteve Yang eth_overhead = RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN; 33930c4abd36SSteve Yang 33940c4abd36SSteve Yang rx_offloads = port->dev_conf.rxmode.offloads; 33950c4abd36SSteve Yang 33960c4abd36SSteve Yang /* Default config value is 0 to use PMD specific overhead */ 33970c4abd36SSteve Yang if (port->dev_conf.rxmode.max_rx_pkt_len == 0) 33980c4abd36SSteve Yang port->dev_conf.rxmode.max_rx_pkt_len = RTE_ETHER_MTU + eth_overhead; 33990c4abd36SSteve Yang 34000c4abd36SSteve Yang if (port->dev_conf.rxmode.max_rx_pkt_len <= RTE_ETHER_MTU + eth_overhead) { 34010c4abd36SSteve Yang rx_offloads &= ~DEV_RX_OFFLOAD_JUMBO_FRAME; 34020c4abd36SSteve Yang on = false; 34030c4abd36SSteve Yang } else { 34040c4abd36SSteve Yang if ((port->dev_info.rx_offload_capa & DEV_RX_OFFLOAD_JUMBO_FRAME) == 0) { 34050c4abd36SSteve Yang printf("Frame size (%u) is not supported by port %u\n", 34060c4abd36SSteve Yang port->dev_conf.rxmode.max_rx_pkt_len, 34070c4abd36SSteve Yang portid); 34080c4abd36SSteve Yang return -1; 34090c4abd36SSteve Yang } 34100c4abd36SSteve Yang rx_offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME; 34110c4abd36SSteve Yang on = true; 34120c4abd36SSteve Yang } 34130c4abd36SSteve Yang 34140c4abd36SSteve Yang if (rx_offloads != port->dev_conf.rxmode.offloads) { 34150c4abd36SSteve Yang uint16_t qid; 34160c4abd36SSteve Yang 34170c4abd36SSteve Yang port->dev_conf.rxmode.offloads = rx_offloads; 34180c4abd36SSteve Yang 34190c4abd36SSteve Yang /* Apply JUMBO_FRAME offload configuration to Rx queue(s) */ 34200c4abd36SSteve Yang for (qid = 0; qid < port->dev_info.nb_rx_queues; qid++) { 34210c4abd36SSteve Yang if (on) 34220c4abd36SSteve Yang port->rx_conf[qid].offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME; 34230c4abd36SSteve Yang else 34240c4abd36SSteve Yang port->rx_conf[qid].offloads &= ~DEV_RX_OFFLOAD_JUMBO_FRAME; 34250c4abd36SSteve Yang } 34260c4abd36SSteve Yang } 34270c4abd36SSteve Yang 34280c4abd36SSteve Yang /* If JUMBO_FRAME is set MTU conversion done by ethdev layer, 34290c4abd36SSteve Yang * if unset do it here 34300c4abd36SSteve Yang */ 34310c4abd36SSteve Yang if ((rx_offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) == 0) { 34320c4abd36SSteve Yang ret = rte_eth_dev_set_mtu(portid, 34330c4abd36SSteve Yang port->dev_conf.rxmode.max_rx_pkt_len - eth_overhead); 34340c4abd36SSteve Yang if (ret) 34350c4abd36SSteve Yang printf("Failed to set MTU to %u for port %u\n", 34360c4abd36SSteve Yang port->dev_conf.rxmode.max_rx_pkt_len - eth_overhead, 34370c4abd36SSteve Yang portid); 34380c4abd36SSteve Yang } 34390c4abd36SSteve Yang 34400c4abd36SSteve Yang return 0; 34410c4abd36SSteve Yang } 34420c4abd36SSteve Yang 3443013af9b6SIntel void 3444013af9b6SIntel init_port_config(void) 3445013af9b6SIntel { 3446013af9b6SIntel portid_t pid; 3447013af9b6SIntel struct rte_port *port; 34486f51deb9SIvan Ilchenko int ret; 3449013af9b6SIntel 34507d89b261SGaetan Rivet RTE_ETH_FOREACH_DEV(pid) { 3451013af9b6SIntel port = &ports[pid]; 3452013af9b6SIntel port->dev_conf.fdir_conf = fdir_conf; 34536f51deb9SIvan Ilchenko 34546f51deb9SIvan Ilchenko ret = eth_dev_info_get_print_err(pid, &port->dev_info); 34556f51deb9SIvan Ilchenko if (ret != 0) 34566f51deb9SIvan Ilchenko return; 34576f51deb9SIvan Ilchenko 34583ce690d3SBruce Richardson if (nb_rxq > 1) { 3459013af9b6SIntel port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL; 346090892962SQi Zhang port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 3461422515b9SAdrien Mazarguil rss_hf & port->dev_info.flow_type_rss_offloads; 3462af75078fSIntel } else { 3463013af9b6SIntel port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL; 3464013af9b6SIntel port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0; 3465af75078fSIntel } 34663ce690d3SBruce Richardson 34675f592039SJingjing Wu if (port->dcb_flag == 0) { 34683ce690d3SBruce Richardson if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0) 3469f9295aa2SXiaoyu Min port->dev_conf.rxmode.mq_mode = 3470f9295aa2SXiaoyu Min (enum rte_eth_rx_mq_mode) 3471f9295aa2SXiaoyu Min (rx_mq_mode & ETH_MQ_RX_RSS); 34723ce690d3SBruce Richardson else 34733ce690d3SBruce Richardson port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE; 34743ce690d3SBruce Richardson } 34753ce690d3SBruce Richardson 3476f2c5125aSPablo de Lara rxtx_port_config(port); 3477013af9b6SIntel 3478a5279d25SIgor Romanov ret = eth_macaddr_get_print_err(pid, &port->eth_addr); 3479a5279d25SIgor Romanov if (ret != 0) 3480a5279d25SIgor Romanov return; 3481013af9b6SIntel 3482a8d0d473SBruce Richardson #if defined RTE_NET_IXGBE && defined RTE_LIBRTE_IXGBE_BYPASS 3483e261265eSRadu Nicolau rte_pmd_ixgbe_bypass_init(pid); 34847b7e5ba7SIntel #endif 34858ea656f8SGaetan Rivet 34868ea656f8SGaetan Rivet if (lsc_interrupt && 34878ea656f8SGaetan Rivet (rte_eth_devices[pid].data->dev_flags & 34888ea656f8SGaetan Rivet RTE_ETH_DEV_INTR_LSC)) 34898ea656f8SGaetan Rivet port->dev_conf.intr_conf.lsc = 1; 3490284c908cSGaetan Rivet if (rmv_interrupt && 3491284c908cSGaetan Rivet (rte_eth_devices[pid].data->dev_flags & 3492284c908cSGaetan Rivet RTE_ETH_DEV_INTR_RMV)) 3493284c908cSGaetan Rivet port->dev_conf.intr_conf.rmv = 1; 3494013af9b6SIntel } 3495013af9b6SIntel } 3496013af9b6SIntel 349741b05095SBernard Iremonger void set_port_slave_flag(portid_t slave_pid) 349841b05095SBernard Iremonger { 349941b05095SBernard Iremonger struct rte_port *port; 350041b05095SBernard Iremonger 350141b05095SBernard Iremonger port = &ports[slave_pid]; 350241b05095SBernard Iremonger port->slave_flag = 1; 350341b05095SBernard Iremonger } 350441b05095SBernard Iremonger 350541b05095SBernard Iremonger void clear_port_slave_flag(portid_t slave_pid) 350641b05095SBernard Iremonger { 350741b05095SBernard Iremonger struct rte_port *port; 350841b05095SBernard Iremonger 350941b05095SBernard Iremonger port = &ports[slave_pid]; 351041b05095SBernard Iremonger port->slave_flag = 0; 351141b05095SBernard Iremonger } 351241b05095SBernard Iremonger 35130e545d30SBernard Iremonger uint8_t port_is_bonding_slave(portid_t slave_pid) 35140e545d30SBernard Iremonger { 35150e545d30SBernard Iremonger struct rte_port *port; 35160e545d30SBernard Iremonger 35170e545d30SBernard Iremonger port = &ports[slave_pid]; 3518b8b8b344SMatan Azrad if ((rte_eth_devices[slave_pid].data->dev_flags & 3519b8b8b344SMatan Azrad RTE_ETH_DEV_BONDED_SLAVE) || (port->slave_flag == 1)) 3520b8b8b344SMatan Azrad return 1; 3521b8b8b344SMatan Azrad return 0; 35220e545d30SBernard Iremonger } 35230e545d30SBernard Iremonger 3524013af9b6SIntel const uint16_t vlan_tags[] = { 3525013af9b6SIntel 0, 1, 2, 3, 4, 5, 6, 7, 3526013af9b6SIntel 8, 9, 10, 11, 12, 13, 14, 15, 3527013af9b6SIntel 16, 17, 18, 19, 20, 21, 22, 23, 3528013af9b6SIntel 24, 25, 26, 27, 28, 29, 30, 31 3529013af9b6SIntel }; 3530013af9b6SIntel 3531013af9b6SIntel static int 3532ac7c491cSKonstantin Ananyev get_eth_dcb_conf(portid_t pid, struct rte_eth_conf *eth_conf, 35331a572499SJingjing Wu enum dcb_mode_enable dcb_mode, 35341a572499SJingjing Wu enum rte_eth_nb_tcs num_tcs, 35351a572499SJingjing Wu uint8_t pfc_en) 3536013af9b6SIntel { 3537013af9b6SIntel uint8_t i; 3538ac7c491cSKonstantin Ananyev int32_t rc; 3539ac7c491cSKonstantin Ananyev struct rte_eth_rss_conf rss_conf; 3540af75078fSIntel 3541af75078fSIntel /* 3542013af9b6SIntel * Builds up the correct configuration for dcb+vt based on the vlan tags array 3543013af9b6SIntel * given above, and the number of traffic classes available for use. 3544af75078fSIntel */ 35451a572499SJingjing Wu if (dcb_mode == DCB_VT_ENABLED) { 35461a572499SJingjing Wu struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf = 35471a572499SJingjing Wu ð_conf->rx_adv_conf.vmdq_dcb_conf; 35481a572499SJingjing Wu struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf = 35491a572499SJingjing Wu ð_conf->tx_adv_conf.vmdq_dcb_tx_conf; 3550013af9b6SIntel 3551547d946cSNirmoy Das /* VMDQ+DCB RX and TX configurations */ 35521a572499SJingjing Wu vmdq_rx_conf->enable_default_pool = 0; 35531a572499SJingjing Wu vmdq_rx_conf->default_pool = 0; 35541a572499SJingjing Wu vmdq_rx_conf->nb_queue_pools = 35551a572499SJingjing Wu (num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS); 35561a572499SJingjing Wu vmdq_tx_conf->nb_queue_pools = 35571a572499SJingjing Wu (num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS); 3558013af9b6SIntel 35591a572499SJingjing Wu vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools; 35601a572499SJingjing Wu for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) { 35611a572499SJingjing Wu vmdq_rx_conf->pool_map[i].vlan_id = vlan_tags[i]; 35621a572499SJingjing Wu vmdq_rx_conf->pool_map[i].pools = 35631a572499SJingjing Wu 1 << (i % vmdq_rx_conf->nb_queue_pools); 3564af75078fSIntel } 3565013af9b6SIntel for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) { 3566f59908feSWei Dai vmdq_rx_conf->dcb_tc[i] = i % num_tcs; 3567f59908feSWei Dai vmdq_tx_conf->dcb_tc[i] = i % num_tcs; 3568013af9b6SIntel } 3569013af9b6SIntel 3570013af9b6SIntel /* set DCB mode of RX and TX of multiple queues */ 3571f9295aa2SXiaoyu Min eth_conf->rxmode.mq_mode = 3572f9295aa2SXiaoyu Min (enum rte_eth_rx_mq_mode) 3573f9295aa2SXiaoyu Min (rx_mq_mode & ETH_MQ_RX_VMDQ_DCB); 357432e7aa0bSIntel eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB; 35751a572499SJingjing Wu } else { 35761a572499SJingjing Wu struct rte_eth_dcb_rx_conf *rx_conf = 35771a572499SJingjing Wu ð_conf->rx_adv_conf.dcb_rx_conf; 35781a572499SJingjing Wu struct rte_eth_dcb_tx_conf *tx_conf = 35791a572499SJingjing Wu ð_conf->tx_adv_conf.dcb_tx_conf; 3580013af9b6SIntel 35815139bc12STing Xu memset(&rss_conf, 0, sizeof(struct rte_eth_rss_conf)); 35825139bc12STing Xu 3583ac7c491cSKonstantin Ananyev rc = rte_eth_dev_rss_hash_conf_get(pid, &rss_conf); 3584ac7c491cSKonstantin Ananyev if (rc != 0) 3585ac7c491cSKonstantin Ananyev return rc; 3586ac7c491cSKonstantin Ananyev 35871a572499SJingjing Wu rx_conf->nb_tcs = num_tcs; 35881a572499SJingjing Wu tx_conf->nb_tcs = num_tcs; 35891a572499SJingjing Wu 3590bcd0e432SJingjing Wu for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) { 3591bcd0e432SJingjing Wu rx_conf->dcb_tc[i] = i % num_tcs; 3592bcd0e432SJingjing Wu tx_conf->dcb_tc[i] = i % num_tcs; 3593013af9b6SIntel } 3594ac7c491cSKonstantin Ananyev 3595f9295aa2SXiaoyu Min eth_conf->rxmode.mq_mode = 3596f9295aa2SXiaoyu Min (enum rte_eth_rx_mq_mode) 3597f9295aa2SXiaoyu Min (rx_mq_mode & ETH_MQ_RX_DCB_RSS); 3598ac7c491cSKonstantin Ananyev eth_conf->rx_adv_conf.rss_conf = rss_conf; 359932e7aa0bSIntel eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB; 36001a572499SJingjing Wu } 36011a572499SJingjing Wu 36021a572499SJingjing Wu if (pfc_en) 36031a572499SJingjing Wu eth_conf->dcb_capability_en = 36041a572499SJingjing Wu ETH_DCB_PG_SUPPORT | ETH_DCB_PFC_SUPPORT; 3605013af9b6SIntel else 3606013af9b6SIntel eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT; 3607013af9b6SIntel 3608013af9b6SIntel return 0; 3609013af9b6SIntel } 3610013af9b6SIntel 3611013af9b6SIntel int 36121a572499SJingjing Wu init_port_dcb_config(portid_t pid, 36131a572499SJingjing Wu enum dcb_mode_enable dcb_mode, 36141a572499SJingjing Wu enum rte_eth_nb_tcs num_tcs, 36151a572499SJingjing Wu uint8_t pfc_en) 3616013af9b6SIntel { 3617013af9b6SIntel struct rte_eth_conf port_conf; 3618013af9b6SIntel struct rte_port *rte_port; 3619013af9b6SIntel int retval; 3620013af9b6SIntel uint16_t i; 3621013af9b6SIntel 36222a977b89SWenzhuo Lu rte_port = &ports[pid]; 3623013af9b6SIntel 3624013af9b6SIntel memset(&port_conf, 0, sizeof(struct rte_eth_conf)); 3625013af9b6SIntel /* Enter DCB configuration status */ 3626013af9b6SIntel dcb_config = 1; 3627013af9b6SIntel 3628d5354e89SYanglong Wu port_conf.rxmode = rte_port->dev_conf.rxmode; 3629d5354e89SYanglong Wu port_conf.txmode = rte_port->dev_conf.txmode; 3630d5354e89SYanglong Wu 3631013af9b6SIntel /*set configuration of DCB in vt mode and DCB in non-vt mode*/ 3632ac7c491cSKonstantin Ananyev retval = get_eth_dcb_conf(pid, &port_conf, dcb_mode, num_tcs, pfc_en); 3633013af9b6SIntel if (retval < 0) 3634013af9b6SIntel return retval; 36350074d02fSShahaf Shuler port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER; 3636013af9b6SIntel 36372f203d44SQi Zhang /* re-configure the device . */ 36382b0e0ebaSChenbo Xia retval = rte_eth_dev_configure(pid, nb_rxq, nb_rxq, &port_conf); 36392b0e0ebaSChenbo Xia if (retval < 0) 36402b0e0ebaSChenbo Xia return retval; 36416f51deb9SIvan Ilchenko 36426f51deb9SIvan Ilchenko retval = eth_dev_info_get_print_err(pid, &rte_port->dev_info); 36436f51deb9SIvan Ilchenko if (retval != 0) 36446f51deb9SIvan Ilchenko return retval; 36452a977b89SWenzhuo Lu 36462a977b89SWenzhuo Lu /* If dev_info.vmdq_pool_base is greater than 0, 36472a977b89SWenzhuo Lu * the queue id of vmdq pools is started after pf queues. 36482a977b89SWenzhuo Lu */ 36492a977b89SWenzhuo Lu if (dcb_mode == DCB_VT_ENABLED && 36502a977b89SWenzhuo Lu rte_port->dev_info.vmdq_pool_base > 0) { 36512a977b89SWenzhuo Lu printf("VMDQ_DCB multi-queue mode is nonsensical" 36522a977b89SWenzhuo Lu " for port %d.", pid); 36532a977b89SWenzhuo Lu return -1; 36542a977b89SWenzhuo Lu } 36552a977b89SWenzhuo Lu 36562a977b89SWenzhuo Lu /* Assume the ports in testpmd have the same dcb capability 36572a977b89SWenzhuo Lu * and has the same number of rxq and txq in dcb mode 36582a977b89SWenzhuo Lu */ 36592a977b89SWenzhuo Lu if (dcb_mode == DCB_VT_ENABLED) { 366086ef65eeSBernard Iremonger if (rte_port->dev_info.max_vfs > 0) { 366186ef65eeSBernard Iremonger nb_rxq = rte_port->dev_info.nb_rx_queues; 366286ef65eeSBernard Iremonger nb_txq = rte_port->dev_info.nb_tx_queues; 366386ef65eeSBernard Iremonger } else { 36642a977b89SWenzhuo Lu nb_rxq = rte_port->dev_info.max_rx_queues; 36652a977b89SWenzhuo Lu nb_txq = rte_port->dev_info.max_tx_queues; 366686ef65eeSBernard Iremonger } 36672a977b89SWenzhuo Lu } else { 36682a977b89SWenzhuo Lu /*if vt is disabled, use all pf queues */ 36692a977b89SWenzhuo Lu if (rte_port->dev_info.vmdq_pool_base == 0) { 36702a977b89SWenzhuo Lu nb_rxq = rte_port->dev_info.max_rx_queues; 36712a977b89SWenzhuo Lu nb_txq = rte_port->dev_info.max_tx_queues; 36722a977b89SWenzhuo Lu } else { 36732a977b89SWenzhuo Lu nb_rxq = (queueid_t)num_tcs; 36742a977b89SWenzhuo Lu nb_txq = (queueid_t)num_tcs; 36752a977b89SWenzhuo Lu 36762a977b89SWenzhuo Lu } 36772a977b89SWenzhuo Lu } 36782a977b89SWenzhuo Lu rx_free_thresh = 64; 36792a977b89SWenzhuo Lu 3680013af9b6SIntel memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf)); 3681013af9b6SIntel 3682f2c5125aSPablo de Lara rxtx_port_config(rte_port); 3683013af9b6SIntel /* VLAN filter */ 36840074d02fSShahaf Shuler rte_port->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER; 36851a572499SJingjing Wu for (i = 0; i < RTE_DIM(vlan_tags); i++) 3686013af9b6SIntel rx_vft_set(pid, vlan_tags[i], 1); 3687013af9b6SIntel 3688a5279d25SIgor Romanov retval = eth_macaddr_get_print_err(pid, &rte_port->eth_addr); 3689a5279d25SIgor Romanov if (retval != 0) 3690a5279d25SIgor Romanov return retval; 3691a5279d25SIgor Romanov 36927741e4cfSIntel rte_port->dcb_flag = 1; 36937741e4cfSIntel 3694013af9b6SIntel return 0; 3695af75078fSIntel } 3696af75078fSIntel 3697ffc468ffSTetsuya Mukawa static void 3698ffc468ffSTetsuya Mukawa init_port(void) 3699ffc468ffSTetsuya Mukawa { 37001b9f2746SGregory Etelson int i; 37011b9f2746SGregory Etelson 3702ffc468ffSTetsuya Mukawa /* Configuration of Ethernet ports. */ 3703ffc468ffSTetsuya Mukawa ports = rte_zmalloc("testpmd: ports", 3704ffc468ffSTetsuya Mukawa sizeof(struct rte_port) * RTE_MAX_ETHPORTS, 3705ffc468ffSTetsuya Mukawa RTE_CACHE_LINE_SIZE); 3706ffc468ffSTetsuya Mukawa if (ports == NULL) { 3707ffc468ffSTetsuya Mukawa rte_exit(EXIT_FAILURE, 3708ffc468ffSTetsuya Mukawa "rte_zmalloc(%d struct rte_port) failed\n", 3709ffc468ffSTetsuya Mukawa RTE_MAX_ETHPORTS); 3710ffc468ffSTetsuya Mukawa } 37111b9f2746SGregory Etelson for (i = 0; i < RTE_MAX_ETHPORTS; i++) 37121b9f2746SGregory Etelson LIST_INIT(&ports[i].flow_tunnel_list); 371329841336SPhil Yang /* Initialize ports NUMA structures */ 371429841336SPhil Yang memset(port_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS); 371529841336SPhil Yang memset(rxring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS); 371629841336SPhil Yang memset(txring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS); 3717ffc468ffSTetsuya Mukawa } 3718ffc468ffSTetsuya Mukawa 3719d3a274ceSZhihong Wang static void 3720d3a274ceSZhihong Wang force_quit(void) 3721d3a274ceSZhihong Wang { 3722d3a274ceSZhihong Wang pmd_test_exit(); 3723d3a274ceSZhihong Wang prompt_exit(); 3724d3a274ceSZhihong Wang } 3725d3a274ceSZhihong Wang 3726d3a274ceSZhihong Wang static void 3727cfea1f30SPablo de Lara print_stats(void) 3728cfea1f30SPablo de Lara { 3729cfea1f30SPablo de Lara uint8_t i; 3730cfea1f30SPablo de Lara const char clr[] = { 27, '[', '2', 'J', '\0' }; 3731cfea1f30SPablo de Lara const char top_left[] = { 27, '[', '1', ';', '1', 'H', '\0' }; 3732cfea1f30SPablo de Lara 3733cfea1f30SPablo de Lara /* Clear screen and move to top left */ 3734cfea1f30SPablo de Lara printf("%s%s", clr, top_left); 3735cfea1f30SPablo de Lara 3736cfea1f30SPablo de Lara printf("\nPort statistics ===================================="); 3737cfea1f30SPablo de Lara for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) 3738cfea1f30SPablo de Lara nic_stats_display(fwd_ports_ids[i]); 3739683d1e82SIgor Romanov 3740683d1e82SIgor Romanov fflush(stdout); 3741cfea1f30SPablo de Lara } 3742cfea1f30SPablo de Lara 3743cfea1f30SPablo de Lara static void 3744d3a274ceSZhihong Wang signal_handler(int signum) 3745d3a274ceSZhihong Wang { 3746d3a274ceSZhihong Wang if (signum == SIGINT || signum == SIGTERM) { 3747d3a274ceSZhihong Wang printf("\nSignal %d received, preparing to exit...\n", 3748d3a274ceSZhihong Wang signum); 3749a8d0d473SBruce Richardson #ifdef RTE_LIB_PDUMP 3750102b7329SReshma Pattan /* uninitialize packet capture framework */ 3751102b7329SReshma Pattan rte_pdump_uninit(); 3752102b7329SReshma Pattan #endif 3753a8d0d473SBruce Richardson #ifdef RTE_LIB_LATENCYSTATS 37548b36297dSAmit Gupta if (latencystats_enabled != 0) 375562d3216dSReshma Pattan rte_latencystats_uninit(); 375662d3216dSReshma Pattan #endif 3757d3a274ceSZhihong Wang force_quit(); 3758d9a191a0SPhil Yang /* Set flag to indicate the force termination. */ 3759d9a191a0SPhil Yang f_quit = 1; 3760d3a274ceSZhihong Wang /* exit with the expected status */ 3761d3a274ceSZhihong Wang signal(signum, SIG_DFL); 3762d3a274ceSZhihong Wang kill(getpid(), signum); 3763d3a274ceSZhihong Wang } 3764d3a274ceSZhihong Wang } 3765d3a274ceSZhihong Wang 3766af75078fSIntel int 3767af75078fSIntel main(int argc, char** argv) 3768af75078fSIntel { 3769af75078fSIntel int diag; 3770f8244c63SZhiyong Yang portid_t port_id; 37714918a357SXiaoyun Li uint16_t count; 3772fb73e096SJeff Guo int ret; 3773af75078fSIntel 3774d3a274ceSZhihong Wang signal(SIGINT, signal_handler); 3775d3a274ceSZhihong Wang signal(SIGTERM, signal_handler); 3776d3a274ceSZhihong Wang 3777285fd101SOlivier Matz testpmd_logtype = rte_log_register("testpmd"); 3778285fd101SOlivier Matz if (testpmd_logtype < 0) 377916267ceeSStephen Hemminger rte_exit(EXIT_FAILURE, "Cannot register log type"); 3780285fd101SOlivier Matz rte_log_set_level(testpmd_logtype, RTE_LOG_DEBUG); 3781285fd101SOlivier Matz 37829201806eSStephen Hemminger diag = rte_eal_init(argc, argv); 37839201806eSStephen Hemminger if (diag < 0) 378416267ceeSStephen Hemminger rte_exit(EXIT_FAILURE, "Cannot init EAL: %s\n", 378516267ceeSStephen Hemminger rte_strerror(rte_errno)); 37869201806eSStephen Hemminger 3787a87ab9f7SStephen Hemminger if (rte_eal_process_type() == RTE_PROC_SECONDARY) 378816267ceeSStephen Hemminger rte_exit(EXIT_FAILURE, 378916267ceeSStephen Hemminger "Secondary process type not supported.\n"); 3790a87ab9f7SStephen Hemminger 379197b5d8b5SThomas Monjalon ret = register_eth_event_callback(); 379297b5d8b5SThomas Monjalon if (ret != 0) 379316267ceeSStephen Hemminger rte_exit(EXIT_FAILURE, "Cannot register for ethdev events"); 379497b5d8b5SThomas Monjalon 3795a8d0d473SBruce Richardson #ifdef RTE_LIB_PDUMP 37964aa0d012SAnatoly Burakov /* initialize packet capture framework */ 3797e9436f54STiwei Bie rte_pdump_init(); 37984aa0d012SAnatoly Burakov #endif 37994aa0d012SAnatoly Burakov 38004918a357SXiaoyun Li count = 0; 38014918a357SXiaoyun Li RTE_ETH_FOREACH_DEV(port_id) { 38024918a357SXiaoyun Li ports_ids[count] = port_id; 38034918a357SXiaoyun Li count++; 38044918a357SXiaoyun Li } 38054918a357SXiaoyun Li nb_ports = (portid_t) count; 38064aa0d012SAnatoly Burakov if (nb_ports == 0) 38074aa0d012SAnatoly Burakov TESTPMD_LOG(WARNING, "No probed ethernet devices\n"); 38084aa0d012SAnatoly Burakov 38094aa0d012SAnatoly Burakov /* allocate port structures, and init them */ 38104aa0d012SAnatoly Burakov init_port(); 38114aa0d012SAnatoly Burakov 38124aa0d012SAnatoly Burakov set_def_fwd_config(); 38134aa0d012SAnatoly Burakov if (nb_lcores == 0) 381416267ceeSStephen Hemminger rte_exit(EXIT_FAILURE, "No cores defined for forwarding\n" 381516267ceeSStephen Hemminger "Check the core mask argument\n"); 38164aa0d012SAnatoly Burakov 3817e505d84cSAnatoly Burakov /* Bitrate/latency stats disabled by default */ 3818a8d0d473SBruce Richardson #ifdef RTE_LIB_BITRATESTATS 3819e505d84cSAnatoly Burakov bitrate_enabled = 0; 3820e505d84cSAnatoly Burakov #endif 3821a8d0d473SBruce Richardson #ifdef RTE_LIB_LATENCYSTATS 3822e505d84cSAnatoly Burakov latencystats_enabled = 0; 3823e505d84cSAnatoly Burakov #endif 3824e505d84cSAnatoly Burakov 3825fb7b8b32SAnatoly Burakov /* on FreeBSD, mlockall() is disabled by default */ 38265fbc1d49SBruce Richardson #ifdef RTE_EXEC_ENV_FREEBSD 3827fb7b8b32SAnatoly Burakov do_mlockall = 0; 3828fb7b8b32SAnatoly Burakov #else 3829fb7b8b32SAnatoly Burakov do_mlockall = 1; 3830fb7b8b32SAnatoly Burakov #endif 3831fb7b8b32SAnatoly Burakov 3832e505d84cSAnatoly Burakov argc -= diag; 3833e505d84cSAnatoly Burakov argv += diag; 3834e505d84cSAnatoly Burakov if (argc > 1) 3835e505d84cSAnatoly Burakov launch_args_parse(argc, argv); 3836e505d84cSAnatoly Burakov 3837e505d84cSAnatoly Burakov if (do_mlockall && mlockall(MCL_CURRENT | MCL_FUTURE)) { 3838285fd101SOlivier Matz TESTPMD_LOG(NOTICE, "mlockall() failed with error \"%s\"\n", 38391c036b16SEelco Chaudron strerror(errno)); 38401c036b16SEelco Chaudron } 38411c036b16SEelco Chaudron 384299cabef0SPablo de Lara if (tx_first && interactive) 384399cabef0SPablo de Lara rte_exit(EXIT_FAILURE, "--tx-first cannot be used on " 384499cabef0SPablo de Lara "interactive mode.\n"); 38458820cba4SDavid Hunt 38468820cba4SDavid Hunt if (tx_first && lsc_interrupt) { 38478820cba4SDavid Hunt printf("Warning: lsc_interrupt needs to be off when " 38488820cba4SDavid Hunt " using tx_first. Disabling.\n"); 38498820cba4SDavid Hunt lsc_interrupt = 0; 38508820cba4SDavid Hunt } 38518820cba4SDavid Hunt 38525a8fb55cSReshma Pattan if (!nb_rxq && !nb_txq) 38535a8fb55cSReshma Pattan printf("Warning: Either rx or tx queues should be non-zero\n"); 38545a8fb55cSReshma Pattan 38555a8fb55cSReshma Pattan if (nb_rxq > 1 && nb_rxq > nb_txq) 3856af75078fSIntel printf("Warning: nb_rxq=%d enables RSS configuration, " 3857af75078fSIntel "but nb_txq=%d will prevent to fully test it.\n", 3858af75078fSIntel nb_rxq, nb_txq); 3859af75078fSIntel 3860af75078fSIntel init_config(); 3861fb73e096SJeff Guo 3862fb73e096SJeff Guo if (hot_plug) { 38632049c511SJeff Guo ret = rte_dev_hotplug_handle_enable(); 3864fb73e096SJeff Guo if (ret) { 38652049c511SJeff Guo RTE_LOG(ERR, EAL, 38662049c511SJeff Guo "fail to enable hotplug handling."); 3867fb73e096SJeff Guo return -1; 3868fb73e096SJeff Guo } 3869fb73e096SJeff Guo 38702049c511SJeff Guo ret = rte_dev_event_monitor_start(); 38712049c511SJeff Guo if (ret) { 38722049c511SJeff Guo RTE_LOG(ERR, EAL, 38732049c511SJeff Guo "fail to start device event monitoring."); 38742049c511SJeff Guo return -1; 38752049c511SJeff Guo } 38762049c511SJeff Guo 38772049c511SJeff Guo ret = rte_dev_event_callback_register(NULL, 3878cc1bf307SJeff Guo dev_event_callback, NULL); 38792049c511SJeff Guo if (ret) { 38802049c511SJeff Guo RTE_LOG(ERR, EAL, 38812049c511SJeff Guo "fail to register device event callback\n"); 38822049c511SJeff Guo return -1; 38832049c511SJeff Guo } 3884fb73e096SJeff Guo } 3885fb73e096SJeff Guo 38866937d210SStephen Hemminger if (!no_device_start && start_port(RTE_PORT_ALL) != 0) 3887148f963fSBruce Richardson rte_exit(EXIT_FAILURE, "Start ports failed\n"); 3888af75078fSIntel 3889ce8d5614SIntel /* set all ports to promiscuous mode by default */ 389034fc1051SIvan Ilchenko RTE_ETH_FOREACH_DEV(port_id) { 389134fc1051SIvan Ilchenko ret = rte_eth_promiscuous_enable(port_id); 389234fc1051SIvan Ilchenko if (ret != 0) 389334fc1051SIvan Ilchenko printf("Error during enabling promiscuous mode for port %u: %s - ignore\n", 389434fc1051SIvan Ilchenko port_id, rte_strerror(-ret)); 389534fc1051SIvan Ilchenko } 3896af75078fSIntel 38977e4441c8SRemy Horton /* Init metrics library */ 38987e4441c8SRemy Horton rte_metrics_init(rte_socket_id()); 38997e4441c8SRemy Horton 3900a8d0d473SBruce Richardson #ifdef RTE_LIB_LATENCYSTATS 390162d3216dSReshma Pattan if (latencystats_enabled != 0) { 390262d3216dSReshma Pattan int ret = rte_latencystats_init(1, NULL); 390362d3216dSReshma Pattan if (ret) 390462d3216dSReshma Pattan printf("Warning: latencystats init()" 390562d3216dSReshma Pattan " returned error %d\n", ret); 390662d3216dSReshma Pattan printf("Latencystats running on lcore %d\n", 390762d3216dSReshma Pattan latencystats_lcore_id); 390862d3216dSReshma Pattan } 390962d3216dSReshma Pattan #endif 391062d3216dSReshma Pattan 39117e4441c8SRemy Horton /* Setup bitrate stats */ 3912a8d0d473SBruce Richardson #ifdef RTE_LIB_BITRATESTATS 3913e25e6c70SRemy Horton if (bitrate_enabled != 0) { 39147e4441c8SRemy Horton bitrate_data = rte_stats_bitrate_create(); 39157e4441c8SRemy Horton if (bitrate_data == NULL) 3916e25e6c70SRemy Horton rte_exit(EXIT_FAILURE, 3917e25e6c70SRemy Horton "Could not allocate bitrate data.\n"); 39187e4441c8SRemy Horton rte_stats_bitrate_reg(bitrate_data); 3919e25e6c70SRemy Horton } 39207e4441c8SRemy Horton #endif 39217e4441c8SRemy Horton 3922a8d0d473SBruce Richardson #ifdef RTE_LIB_CMDLINE 392381ef862bSAllain Legacy if (strlen(cmdline_filename) != 0) 392481ef862bSAllain Legacy cmdline_read_from_file(cmdline_filename); 392581ef862bSAllain Legacy 3926ca7feb22SCyril Chemparathy if (interactive == 1) { 3927ca7feb22SCyril Chemparathy if (auto_start) { 3928ca7feb22SCyril Chemparathy printf("Start automatic packet forwarding\n"); 3929ca7feb22SCyril Chemparathy start_packet_forwarding(0); 3930ca7feb22SCyril Chemparathy } 3931af75078fSIntel prompt(); 39320de738cfSJiayu Hu pmd_test_exit(); 3933ca7feb22SCyril Chemparathy } else 39340d56cb81SThomas Monjalon #endif 39350d56cb81SThomas Monjalon { 3936af75078fSIntel char c; 3937af75078fSIntel int rc; 3938af75078fSIntel 3939d9a191a0SPhil Yang f_quit = 0; 3940d9a191a0SPhil Yang 3941af75078fSIntel printf("No commandline core given, start packet forwarding\n"); 394299cabef0SPablo de Lara start_packet_forwarding(tx_first); 3943cfea1f30SPablo de Lara if (stats_period != 0) { 3944cfea1f30SPablo de Lara uint64_t prev_time = 0, cur_time, diff_time = 0; 3945cfea1f30SPablo de Lara uint64_t timer_period; 3946cfea1f30SPablo de Lara 3947cfea1f30SPablo de Lara /* Convert to number of cycles */ 3948cfea1f30SPablo de Lara timer_period = stats_period * rte_get_timer_hz(); 3949cfea1f30SPablo de Lara 3950d9a191a0SPhil Yang while (f_quit == 0) { 3951cfea1f30SPablo de Lara cur_time = rte_get_timer_cycles(); 3952cfea1f30SPablo de Lara diff_time += cur_time - prev_time; 3953cfea1f30SPablo de Lara 3954cfea1f30SPablo de Lara if (diff_time >= timer_period) { 3955cfea1f30SPablo de Lara print_stats(); 3956cfea1f30SPablo de Lara /* Reset the timer */ 3957cfea1f30SPablo de Lara diff_time = 0; 3958cfea1f30SPablo de Lara } 3959cfea1f30SPablo de Lara /* Sleep to avoid unnecessary checks */ 3960cfea1f30SPablo de Lara prev_time = cur_time; 3961cfea1f30SPablo de Lara sleep(1); 3962cfea1f30SPablo de Lara } 3963cfea1f30SPablo de Lara } 3964cfea1f30SPablo de Lara 3965af75078fSIntel printf("Press enter to exit\n"); 3966af75078fSIntel rc = read(0, &c, 1); 3967d3a274ceSZhihong Wang pmd_test_exit(); 3968af75078fSIntel if (rc < 0) 3969af75078fSIntel return 1; 3970af75078fSIntel } 3971af75078fSIntel 39725e516c89SStephen Hemminger ret = rte_eal_cleanup(); 39735e516c89SStephen Hemminger if (ret != 0) 39745e516c89SStephen Hemminger rte_exit(EXIT_FAILURE, 39755e516c89SStephen Hemminger "EAL cleanup failed: %s\n", strerror(-ret)); 39765e516c89SStephen Hemminger 39775e516c89SStephen Hemminger return EXIT_SUCCESS; 3978af75078fSIntel } 3979