1174a1631SBruce Richardson /* SPDX-License-Identifier: BSD-3-Clause 2174a1631SBruce Richardson * Copyright(c) 2010-2017 Intel Corporation 3af75078fSIntel */ 4af75078fSIntel 5af75078fSIntel #include <stdarg.h> 6af75078fSIntel #include <stdio.h> 7af75078fSIntel #include <stdlib.h> 8af75078fSIntel #include <signal.h> 9af75078fSIntel #include <string.h> 10af75078fSIntel #include <time.h> 11af75078fSIntel #include <fcntl.h> 12761f7ae1SJie Zhou #ifndef RTE_EXEC_ENV_WINDOWS 131c036b16SEelco Chaudron #include <sys/mman.h> 14761f7ae1SJie Zhou #endif 15af75078fSIntel #include <sys/types.h> 16af75078fSIntel #include <errno.h> 17fb73e096SJeff Guo #include <stdbool.h> 18af75078fSIntel 19af75078fSIntel #include <sys/queue.h> 20af75078fSIntel #include <sys/stat.h> 21af75078fSIntel 22af75078fSIntel #include <stdint.h> 23af75078fSIntel #include <unistd.h> 24af75078fSIntel #include <inttypes.h> 25af75078fSIntel 26af75078fSIntel #include <rte_common.h> 27d1eb542eSOlivier Matz #include <rte_errno.h> 28af75078fSIntel #include <rte_byteorder.h> 29af75078fSIntel #include <rte_log.h> 30af75078fSIntel #include <rte_debug.h> 31af75078fSIntel #include <rte_cycles.h> 32af75078fSIntel #include <rte_memory.h> 33af75078fSIntel #include <rte_memcpy.h> 34af75078fSIntel #include <rte_launch.h> 35af75078fSIntel #include <rte_eal.h> 36284c908cSGaetan Rivet #include <rte_alarm.h> 37af75078fSIntel #include <rte_per_lcore.h> 38af75078fSIntel #include <rte_lcore.h> 39af75078fSIntel #include <rte_branch_prediction.h> 40af75078fSIntel #include <rte_mempool.h> 41af75078fSIntel #include <rte_malloc.h> 42af75078fSIntel #include <rte_mbuf.h> 430e798567SPavan Nikhilesh #include <rte_mbuf_pool_ops.h> 44af75078fSIntel #include <rte_interrupts.h> 45af75078fSIntel #include <rte_pci.h> 46af75078fSIntel #include <rte_ether.h> 47af75078fSIntel #include <rte_ethdev.h> 48edab33b1STetsuya Mukawa #include <rte_dev.h> 49af75078fSIntel #include <rte_string_fns.h> 50a8d0d473SBruce Richardson #ifdef RTE_NET_IXGBE 51e261265eSRadu Nicolau #include <rte_pmd_ixgbe.h> 52e261265eSRadu Nicolau #endif 53a8d0d473SBruce Richardson #ifdef RTE_LIB_PDUMP 54102b7329SReshma Pattan #include <rte_pdump.h> 55102b7329SReshma Pattan #endif 56938a184aSAdrien Mazarguil #include <rte_flow.h> 57bb9be9a4SDavid Marchand #ifdef RTE_LIB_METRICS 587e4441c8SRemy Horton #include <rte_metrics.h> 59bb9be9a4SDavid Marchand #endif 60a8d0d473SBruce Richardson #ifdef RTE_LIB_BITRATESTATS 617e4441c8SRemy Horton #include <rte_bitrate.h> 627e4441c8SRemy Horton #endif 63a8d0d473SBruce Richardson #ifdef RTE_LIB_LATENCYSTATS 6462d3216dSReshma Pattan #include <rte_latencystats.h> 6562d3216dSReshma Pattan #endif 66761f7ae1SJie Zhou #ifdef RTE_EXEC_ENV_WINDOWS 67761f7ae1SJie Zhou #include <process.h> 68761f7ae1SJie Zhou #endif 69e46372d7SHuisong Li #ifdef RTE_NET_BOND 70e46372d7SHuisong Li #include <rte_eth_bond.h> 71e46372d7SHuisong Li #endif 72af75078fSIntel 73af75078fSIntel #include "testpmd.h" 74af75078fSIntel 75c7f5dba7SAnatoly Burakov #ifndef MAP_HUGETLB 76c7f5dba7SAnatoly Burakov /* FreeBSD may not have MAP_HUGETLB (in fact, it probably doesn't) */ 77c7f5dba7SAnatoly Burakov #define HUGE_FLAG (0x40000) 78c7f5dba7SAnatoly Burakov #else 79c7f5dba7SAnatoly Burakov #define HUGE_FLAG MAP_HUGETLB 80c7f5dba7SAnatoly Burakov #endif 81c7f5dba7SAnatoly Burakov 82c7f5dba7SAnatoly Burakov #ifndef MAP_HUGE_SHIFT 83c7f5dba7SAnatoly Burakov /* older kernels (or FreeBSD) will not have this define */ 84c7f5dba7SAnatoly Burakov #define HUGE_SHIFT (26) 85c7f5dba7SAnatoly Burakov #else 86c7f5dba7SAnatoly Burakov #define HUGE_SHIFT MAP_HUGE_SHIFT 87c7f5dba7SAnatoly Burakov #endif 88c7f5dba7SAnatoly Burakov 89c7f5dba7SAnatoly Burakov #define EXTMEM_HEAP_NAME "extmem" 9013b19642SDmitry Kozlyuk /* 9113b19642SDmitry Kozlyuk * Zone size with the malloc overhead (max of debug and release variants) 9213b19642SDmitry Kozlyuk * must fit into the smallest supported hugepage size (2M), 9313b19642SDmitry Kozlyuk * so that an IOVA-contiguous zone of this size can always be allocated 9413b19642SDmitry Kozlyuk * if there are free 2M hugepages. 9513b19642SDmitry Kozlyuk */ 9613b19642SDmitry Kozlyuk #define EXTBUF_ZONE_SIZE (RTE_PGSIZE_2M - 4 * RTE_CACHE_LINE_SIZE) 97c7f5dba7SAnatoly Burakov 98af75078fSIntel uint16_t verbose_level = 0; /**< Silent by default. */ 99285fd101SOlivier Matz int testpmd_logtype; /**< Log type for testpmd logs */ 100af75078fSIntel 101cb056611SStephen Hemminger /* use main core for command line ? */ 102af75078fSIntel uint8_t interactive = 0; 103ca7feb22SCyril Chemparathy uint8_t auto_start = 0; 10499cabef0SPablo de Lara uint8_t tx_first; 10581ef862bSAllain Legacy char cmdline_filename[PATH_MAX] = {0}; 106af75078fSIntel 107af75078fSIntel /* 108af75078fSIntel * NUMA support configuration. 109af75078fSIntel * When set, the NUMA support attempts to dispatch the allocation of the 110af75078fSIntel * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the 111af75078fSIntel * probed ports among the CPU sockets 0 and 1. 112af75078fSIntel * Otherwise, all memory is allocated from CPU socket 0. 113af75078fSIntel */ 114999b2ee0SBruce Richardson uint8_t numa_support = 1; /**< numa enabled by default */ 115af75078fSIntel 116af75078fSIntel /* 117b6ea6408SIntel * In UMA mode,all memory is allocated from socket 0 if --socket-num is 118b6ea6408SIntel * not configured. 119b6ea6408SIntel */ 120b6ea6408SIntel uint8_t socket_num = UMA_NO_CONFIG; 121b6ea6408SIntel 122b6ea6408SIntel /* 123c7f5dba7SAnatoly Burakov * Select mempool allocation type: 124c7f5dba7SAnatoly Burakov * - native: use regular DPDK memory 125c7f5dba7SAnatoly Burakov * - anon: use regular DPDK memory to create mempool, but populate using 126c7f5dba7SAnatoly Burakov * anonymous memory (may not be IOVA-contiguous) 127c7f5dba7SAnatoly Burakov * - xmem: use externally allocated hugepage memory 128148f963fSBruce Richardson */ 129c7f5dba7SAnatoly Burakov uint8_t mp_alloc_type = MP_ALLOC_NATIVE; 130148f963fSBruce Richardson 131148f963fSBruce Richardson /* 13263531389SGeorgios Katsikas * Store specified sockets on which memory pool to be used by ports 13363531389SGeorgios Katsikas * is allocated. 13463531389SGeorgios Katsikas */ 13563531389SGeorgios Katsikas uint8_t port_numa[RTE_MAX_ETHPORTS]; 13663531389SGeorgios Katsikas 13763531389SGeorgios Katsikas /* 13863531389SGeorgios Katsikas * Store specified sockets on which RX ring to be used by ports 13963531389SGeorgios Katsikas * is allocated. 14063531389SGeorgios Katsikas */ 14163531389SGeorgios Katsikas uint8_t rxring_numa[RTE_MAX_ETHPORTS]; 14263531389SGeorgios Katsikas 14363531389SGeorgios Katsikas /* 14463531389SGeorgios Katsikas * Store specified sockets on which TX ring to be used by ports 14563531389SGeorgios Katsikas * is allocated. 14663531389SGeorgios Katsikas */ 14763531389SGeorgios Katsikas uint8_t txring_numa[RTE_MAX_ETHPORTS]; 14863531389SGeorgios Katsikas 14963531389SGeorgios Katsikas /* 150af75078fSIntel * Record the Ethernet address of peer target ports to which packets are 151af75078fSIntel * forwarded. 152547d946cSNirmoy Das * Must be instantiated with the ethernet addresses of peer traffic generator 153af75078fSIntel * ports. 154af75078fSIntel */ 1556d13ea8eSOlivier Matz struct rte_ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS]; 156af75078fSIntel portid_t nb_peer_eth_addrs = 0; 157af75078fSIntel 158af75078fSIntel /* 159af75078fSIntel * Probed Target Environment. 160af75078fSIntel */ 161af75078fSIntel struct rte_port *ports; /**< For all probed ethernet ports. */ 162af75078fSIntel portid_t nb_ports; /**< Number of probed ethernet ports. */ 163af75078fSIntel struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */ 164af75078fSIntel lcoreid_t nb_lcores; /**< Number of probed logical cores. */ 165af75078fSIntel 1664918a357SXiaoyun Li portid_t ports_ids[RTE_MAX_ETHPORTS]; /**< Store all port ids. */ 1674918a357SXiaoyun Li 168af75078fSIntel /* 169af75078fSIntel * Test Forwarding Configuration. 170af75078fSIntel * nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores 171af75078fSIntel * nb_fwd_ports <= nb_cfg_ports <= nb_ports 172af75078fSIntel */ 173af75078fSIntel lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */ 174af75078fSIntel lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */ 175af75078fSIntel portid_t nb_cfg_ports; /**< Number of configured ports. */ 176af75078fSIntel portid_t nb_fwd_ports; /**< Number of forwarding ports. */ 177af75078fSIntel 178af75078fSIntel unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */ 179af75078fSIntel portid_t fwd_ports_ids[RTE_MAX_ETHPORTS]; /**< Port ids configuration. */ 180af75078fSIntel 181af75078fSIntel struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */ 182af75078fSIntel streamid_t nb_fwd_streams; /**< Is equal to (nb_ports * nb_rxq). */ 183af75078fSIntel 184af75078fSIntel /* 185af75078fSIntel * Forwarding engines. 186af75078fSIntel */ 187af75078fSIntel struct fwd_engine * fwd_engines[] = { 188af75078fSIntel &io_fwd_engine, 189af75078fSIntel &mac_fwd_engine, 190d47388f1SCyril Chemparathy &mac_swap_engine, 191e9e23a61SCyril Chemparathy &flow_gen_engine, 192af75078fSIntel &rx_only_engine, 193af75078fSIntel &tx_only_engine, 194af75078fSIntel &csum_fwd_engine, 195168dfa61SIvan Boule &icmp_echo_engine, 1963c156061SJens Freimann &noisy_vnf_engine, 1972564abdaSShiri Kuzin &five_tuple_swap_fwd_engine, 198af75078fSIntel #ifdef RTE_LIBRTE_IEEE1588 199af75078fSIntel &ieee1588_fwd_engine, 200af75078fSIntel #endif 20159840375SXueming Li &shared_rxq_engine, 202af75078fSIntel NULL, 203af75078fSIntel }; 204af75078fSIntel 20526cbb419SViacheslav Ovsiienko struct rte_mempool *mempools[RTE_MAX_NUMA_NODES * MAX_SEGS_BUFFER_SPLIT]; 20659fcf854SShahaf Shuler uint16_t mempool_flags; 207401b744dSShahaf Shuler 208af75078fSIntel struct fwd_config cur_fwd_config; 209af75078fSIntel struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */ 210bf56fce1SZhihong Wang uint32_t retry_enabled; 211bf56fce1SZhihong Wang uint32_t burst_tx_delay_time = BURST_TX_WAIT_US; 212bf56fce1SZhihong Wang uint32_t burst_tx_retry_num = BURST_TX_RETRIES; 213af75078fSIntel 21426cbb419SViacheslav Ovsiienko uint32_t mbuf_data_size_n = 1; /* Number of specified mbuf sizes. */ 21526cbb419SViacheslav Ovsiienko uint16_t mbuf_data_size[MAX_SEGS_BUFFER_SPLIT] = { 21626cbb419SViacheslav Ovsiienko DEFAULT_MBUF_DATA_SIZE 21726cbb419SViacheslav Ovsiienko }; /**< Mbuf data space size. */ 218c8798818SIntel uint32_t param_total_num_mbufs = 0; /**< number of mbufs in all pools - if 219c8798818SIntel * specified on command-line. */ 220cfea1f30SPablo de Lara uint16_t stats_period; /**< Period to show statistics (disabled by default) */ 221d9a191a0SPhil Yang 22263b72657SIvan Ilchenko /** Extended statistics to show. */ 22363b72657SIvan Ilchenko struct rte_eth_xstat_name *xstats_display; 22463b72657SIvan Ilchenko 22563b72657SIvan Ilchenko unsigned int xstats_display_num; /**< Size of extended statistics to show */ 22663b72657SIvan Ilchenko 227d9a191a0SPhil Yang /* 228d9a191a0SPhil Yang * In container, it cannot terminate the process which running with 'stats-period' 229d9a191a0SPhil Yang * option. Set flag to exit stats period loop after received SIGINT/SIGTERM. 230d9a191a0SPhil Yang */ 231d9a191a0SPhil Yang uint8_t f_quit; 232d9a191a0SPhil Yang 233af75078fSIntel /* 2341bb4a528SFerruh Yigit * Max Rx frame size, set by '--max-pkt-len' parameter. 2351bb4a528SFerruh Yigit */ 2361bb4a528SFerruh Yigit uint32_t max_rx_pkt_len; 2371bb4a528SFerruh Yigit 2381bb4a528SFerruh Yigit /* 2390f2096d7SViacheslav Ovsiienko * Configuration of packet segments used to scatter received packets 2400f2096d7SViacheslav Ovsiienko * if some of split features is configured. 2410f2096d7SViacheslav Ovsiienko */ 2420f2096d7SViacheslav Ovsiienko uint16_t rx_pkt_seg_lengths[MAX_SEGS_BUFFER_SPLIT]; 2430f2096d7SViacheslav Ovsiienko uint8_t rx_pkt_nb_segs; /**< Number of segments to split */ 24491c78e09SViacheslav Ovsiienko uint16_t rx_pkt_seg_offsets[MAX_SEGS_BUFFER_SPLIT]; 24591c78e09SViacheslav Ovsiienko uint8_t rx_pkt_nb_offs; /**< Number of specified offsets */ 2460f2096d7SViacheslav Ovsiienko 2470f2096d7SViacheslav Ovsiienko /* 248af75078fSIntel * Configuration of packet segments used by the "txonly" processing engine. 249af75078fSIntel */ 250af75078fSIntel uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */ 251af75078fSIntel uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = { 252af75078fSIntel TXONLY_DEF_PACKET_LEN, 253af75078fSIntel }; 254af75078fSIntel uint8_t tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */ 255af75078fSIntel 25679bec05bSKonstantin Ananyev enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF; 25779bec05bSKonstantin Ananyev /**< Split policy for packets to TX. */ 25879bec05bSKonstantin Ananyev 25982010ef5SYongseok Koh uint8_t txonly_multi_flow; 26082010ef5SYongseok Koh /**< Whether multiple flows are generated in TXONLY mode. */ 26182010ef5SYongseok Koh 2624940344dSViacheslav Ovsiienko uint32_t tx_pkt_times_inter; 2634940344dSViacheslav Ovsiienko /**< Timings for send scheduling in TXONLY mode, time between bursts. */ 2644940344dSViacheslav Ovsiienko 2654940344dSViacheslav Ovsiienko uint32_t tx_pkt_times_intra; 2664940344dSViacheslav Ovsiienko /**< Timings for send scheduling in TXONLY mode, time between packets. */ 2674940344dSViacheslav Ovsiienko 268af75078fSIntel uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */ 2696c02043eSIgor Russkikh uint16_t nb_pkt_flowgen_clones; /**< Number of Tx packet clones to send in flowgen mode. */ 270861e7684SZhihong Wang int nb_flows_flowgen = 1024; /**< Number of flows in flowgen mode. */ 271e9378bbcSCunming Liang uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */ 272af75078fSIntel 273900550deSIntel /* current configuration is in DCB or not,0 means it is not in DCB mode */ 274900550deSIntel uint8_t dcb_config = 0; 275900550deSIntel 276af75078fSIntel /* 277af75078fSIntel * Configurable number of RX/TX queues. 278af75078fSIntel */ 2791c69df45SOri Kam queueid_t nb_hairpinq; /**< Number of hairpin queues per port. */ 280af75078fSIntel queueid_t nb_rxq = 1; /**< Number of RX queues per port. */ 281af75078fSIntel queueid_t nb_txq = 1; /**< Number of TX queues per port. */ 282af75078fSIntel 283af75078fSIntel /* 284af75078fSIntel * Configurable number of RX/TX ring descriptors. 2858599ed31SRemy Horton * Defaults are supplied by drivers via ethdev. 286af75078fSIntel */ 2878599ed31SRemy Horton #define RTE_TEST_RX_DESC_DEFAULT 0 2888599ed31SRemy Horton #define RTE_TEST_TX_DESC_DEFAULT 0 289af75078fSIntel uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */ 290af75078fSIntel uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */ 291af75078fSIntel 292f2c5125aSPablo de Lara #define RTE_PMD_PARAM_UNSET -1 293af75078fSIntel /* 294af75078fSIntel * Configurable values of RX and TX ring threshold registers. 295af75078fSIntel */ 296af75078fSIntel 297f2c5125aSPablo de Lara int8_t rx_pthresh = RTE_PMD_PARAM_UNSET; 298f2c5125aSPablo de Lara int8_t rx_hthresh = RTE_PMD_PARAM_UNSET; 299f2c5125aSPablo de Lara int8_t rx_wthresh = RTE_PMD_PARAM_UNSET; 300af75078fSIntel 301f2c5125aSPablo de Lara int8_t tx_pthresh = RTE_PMD_PARAM_UNSET; 302f2c5125aSPablo de Lara int8_t tx_hthresh = RTE_PMD_PARAM_UNSET; 303f2c5125aSPablo de Lara int8_t tx_wthresh = RTE_PMD_PARAM_UNSET; 304af75078fSIntel 305af75078fSIntel /* 306af75078fSIntel * Configurable value of RX free threshold. 307af75078fSIntel */ 308f2c5125aSPablo de Lara int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET; 309af75078fSIntel 310af75078fSIntel /* 311ce8d5614SIntel * Configurable value of RX drop enable. 312ce8d5614SIntel */ 313f2c5125aSPablo de Lara int8_t rx_drop_en = RTE_PMD_PARAM_UNSET; 314ce8d5614SIntel 315ce8d5614SIntel /* 316af75078fSIntel * Configurable value of TX free threshold. 317af75078fSIntel */ 318f2c5125aSPablo de Lara int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET; 319af75078fSIntel 320af75078fSIntel /* 321af75078fSIntel * Configurable value of TX RS bit threshold. 322af75078fSIntel */ 323f2c5125aSPablo de Lara int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET; 324af75078fSIntel 325af75078fSIntel /* 3263c156061SJens Freimann * Configurable value of buffered packets before sending. 3273c156061SJens Freimann */ 3283c156061SJens Freimann uint16_t noisy_tx_sw_bufsz; 3293c156061SJens Freimann 3303c156061SJens Freimann /* 3313c156061SJens Freimann * Configurable value of packet buffer timeout. 3323c156061SJens Freimann */ 3333c156061SJens Freimann uint16_t noisy_tx_sw_buf_flush_time; 3343c156061SJens Freimann 3353c156061SJens Freimann /* 3363c156061SJens Freimann * Configurable value for size of VNF internal memory area 3373c156061SJens Freimann * used for simulating noisy neighbour behaviour 3383c156061SJens Freimann */ 3393c156061SJens Freimann uint64_t noisy_lkup_mem_sz; 3403c156061SJens Freimann 3413c156061SJens Freimann /* 3423c156061SJens Freimann * Configurable value of number of random writes done in 3433c156061SJens Freimann * VNF simulation memory area. 3443c156061SJens Freimann */ 3453c156061SJens Freimann uint64_t noisy_lkup_num_writes; 3463c156061SJens Freimann 3473c156061SJens Freimann /* 3483c156061SJens Freimann * Configurable value of number of random reads done in 3493c156061SJens Freimann * VNF simulation memory area. 3503c156061SJens Freimann */ 3513c156061SJens Freimann uint64_t noisy_lkup_num_reads; 3523c156061SJens Freimann 3533c156061SJens Freimann /* 3543c156061SJens Freimann * Configurable value of number of random reads/writes done in 3553c156061SJens Freimann * VNF simulation memory area. 3563c156061SJens Freimann */ 3573c156061SJens Freimann uint64_t noisy_lkup_num_reads_writes; 3583c156061SJens Freimann 3593c156061SJens Freimann /* 360af75078fSIntel * Receive Side Scaling (RSS) configuration. 361af75078fSIntel */ 362295968d1SFerruh Yigit uint64_t rss_hf = RTE_ETH_RSS_IP; /* RSS IP by default. */ 363af75078fSIntel 364af75078fSIntel /* 365af75078fSIntel * Port topology configuration 366af75078fSIntel */ 367af75078fSIntel uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */ 368af75078fSIntel 3697741e4cfSIntel /* 3707741e4cfSIntel * Avoids to flush all the RX streams before starts forwarding. 3717741e4cfSIntel */ 3727741e4cfSIntel uint8_t no_flush_rx = 0; /* flush by default */ 3737741e4cfSIntel 374af75078fSIntel /* 3757ee3e944SVasily Philipov * Flow API isolated mode. 3767ee3e944SVasily Philipov */ 3777ee3e944SVasily Philipov uint8_t flow_isolate_all; 3787ee3e944SVasily Philipov 3797ee3e944SVasily Philipov /* 380bc202406SDavid Marchand * Avoids to check link status when starting/stopping a port. 381bc202406SDavid Marchand */ 382bc202406SDavid Marchand uint8_t no_link_check = 0; /* check by default */ 383bc202406SDavid Marchand 384bc202406SDavid Marchand /* 3856937d210SStephen Hemminger * Don't automatically start all ports in interactive mode. 3866937d210SStephen Hemminger */ 3876937d210SStephen Hemminger uint8_t no_device_start = 0; 3886937d210SStephen Hemminger 3896937d210SStephen Hemminger /* 3908ea656f8SGaetan Rivet * Enable link status change notification 3918ea656f8SGaetan Rivet */ 3928ea656f8SGaetan Rivet uint8_t lsc_interrupt = 1; /* enabled by default */ 3938ea656f8SGaetan Rivet 3948ea656f8SGaetan Rivet /* 395284c908cSGaetan Rivet * Enable device removal notification. 396284c908cSGaetan Rivet */ 397284c908cSGaetan Rivet uint8_t rmv_interrupt = 1; /* enabled by default */ 398284c908cSGaetan Rivet 399fb73e096SJeff Guo uint8_t hot_plug = 0; /**< hotplug disabled by default. */ 400fb73e096SJeff Guo 4014f1ed78eSThomas Monjalon /* After attach, port setup is called on event or by iterator */ 4024f1ed78eSThomas Monjalon bool setup_on_probe_event = true; 4034f1ed78eSThomas Monjalon 404b0a9354aSPavan Nikhilesh /* Clear ptypes on port initialization. */ 405b0a9354aSPavan Nikhilesh uint8_t clear_ptypes = true; 406b0a9354aSPavan Nikhilesh 40701817b10SBing Zhao /* Hairpin ports configuration mode. */ 40801817b10SBing Zhao uint16_t hairpin_mode; 40901817b10SBing Zhao 41097b5d8b5SThomas Monjalon /* Pretty printing of ethdev events */ 41197b5d8b5SThomas Monjalon static const char * const eth_event_desc[] = { 41297b5d8b5SThomas Monjalon [RTE_ETH_EVENT_UNKNOWN] = "unknown", 41397b5d8b5SThomas Monjalon [RTE_ETH_EVENT_INTR_LSC] = "link state change", 41497b5d8b5SThomas Monjalon [RTE_ETH_EVENT_QUEUE_STATE] = "queue state", 41597b5d8b5SThomas Monjalon [RTE_ETH_EVENT_INTR_RESET] = "reset", 41697b5d8b5SThomas Monjalon [RTE_ETH_EVENT_VF_MBOX] = "VF mbox", 41797b5d8b5SThomas Monjalon [RTE_ETH_EVENT_IPSEC] = "IPsec", 41897b5d8b5SThomas Monjalon [RTE_ETH_EVENT_MACSEC] = "MACsec", 41997b5d8b5SThomas Monjalon [RTE_ETH_EVENT_INTR_RMV] = "device removal", 42097b5d8b5SThomas Monjalon [RTE_ETH_EVENT_NEW] = "device probed", 42197b5d8b5SThomas Monjalon [RTE_ETH_EVENT_DESTROY] = "device released", 4220e459ffaSDong Zhou [RTE_ETH_EVENT_FLOW_AGED] = "flow aged", 42397b5d8b5SThomas Monjalon [RTE_ETH_EVENT_MAX] = NULL, 42497b5d8b5SThomas Monjalon }; 42597b5d8b5SThomas Monjalon 426284c908cSGaetan Rivet /* 4273af72783SGaetan Rivet * Display or mask ether events 4283af72783SGaetan Rivet * Default to all events except VF_MBOX 4293af72783SGaetan Rivet */ 4303af72783SGaetan Rivet uint32_t event_print_mask = (UINT32_C(1) << RTE_ETH_EVENT_UNKNOWN) | 4313af72783SGaetan Rivet (UINT32_C(1) << RTE_ETH_EVENT_INTR_LSC) | 4323af72783SGaetan Rivet (UINT32_C(1) << RTE_ETH_EVENT_QUEUE_STATE) | 4333af72783SGaetan Rivet (UINT32_C(1) << RTE_ETH_EVENT_INTR_RESET) | 434badb87c1SAnoob Joseph (UINT32_C(1) << RTE_ETH_EVENT_IPSEC) | 4353af72783SGaetan Rivet (UINT32_C(1) << RTE_ETH_EVENT_MACSEC) | 4360e459ffaSDong Zhou (UINT32_C(1) << RTE_ETH_EVENT_INTR_RMV) | 4370e459ffaSDong Zhou (UINT32_C(1) << RTE_ETH_EVENT_FLOW_AGED); 438e505d84cSAnatoly Burakov /* 439e505d84cSAnatoly Burakov * Decide if all memory are locked for performance. 440e505d84cSAnatoly Burakov */ 441e505d84cSAnatoly Burakov int do_mlockall = 0; 4423af72783SGaetan Rivet 4433af72783SGaetan Rivet /* 4447b7e5ba7SIntel * NIC bypass mode configuration options. 4457b7e5ba7SIntel */ 4467b7e5ba7SIntel 447a8d0d473SBruce Richardson #if defined RTE_NET_IXGBE && defined RTE_LIBRTE_IXGBE_BYPASS 4487b7e5ba7SIntel /* The NIC bypass watchdog timeout. */ 449e261265eSRadu Nicolau uint32_t bypass_timeout = RTE_PMD_IXGBE_BYPASS_TMT_OFF; 4507b7e5ba7SIntel #endif 4517b7e5ba7SIntel 452e261265eSRadu Nicolau 453a8d0d473SBruce Richardson #ifdef RTE_LIB_LATENCYSTATS 45462d3216dSReshma Pattan 45562d3216dSReshma Pattan /* 45662d3216dSReshma Pattan * Set when latency stats is enabled in the commandline 45762d3216dSReshma Pattan */ 45862d3216dSReshma Pattan uint8_t latencystats_enabled; 45962d3216dSReshma Pattan 46062d3216dSReshma Pattan /* 4617be78d02SJosh Soref * Lcore ID to service latency statistics. 46262d3216dSReshma Pattan */ 46362d3216dSReshma Pattan lcoreid_t latencystats_lcore_id = -1; 46462d3216dSReshma Pattan 46562d3216dSReshma Pattan #endif 46662d3216dSReshma Pattan 4677b7e5ba7SIntel /* 468af75078fSIntel * Ethernet device configuration. 469af75078fSIntel */ 4701bb4a528SFerruh Yigit struct rte_eth_rxmode rx_mode; 471af75078fSIntel 47207e5f7bdSShahaf Shuler struct rte_eth_txmode tx_mode = { 473295968d1SFerruh Yigit .offloads = RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE, 47407e5f7bdSShahaf Shuler }; 475fd8c20aaSShahaf Shuler 476295968d1SFerruh Yigit struct rte_eth_fdir_conf fdir_conf = { 477af75078fSIntel .mode = RTE_FDIR_MODE_NONE, 478295968d1SFerruh Yigit .pballoc = RTE_ETH_FDIR_PBALLOC_64K, 479af75078fSIntel .status = RTE_FDIR_REPORT_STATUS, 480d9d5e6f2SJingjing Wu .mask = { 48126f579aaSWei Zhao .vlan_tci_mask = 0xFFEF, 482d9d5e6f2SJingjing Wu .ipv4_mask = { 483d9d5e6f2SJingjing Wu .src_ip = 0xFFFFFFFF, 484d9d5e6f2SJingjing Wu .dst_ip = 0xFFFFFFFF, 485d9d5e6f2SJingjing Wu }, 486d9d5e6f2SJingjing Wu .ipv6_mask = { 487d9d5e6f2SJingjing Wu .src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF}, 488d9d5e6f2SJingjing Wu .dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF}, 489d9d5e6f2SJingjing Wu }, 490d9d5e6f2SJingjing Wu .src_port_mask = 0xFFFF, 491d9d5e6f2SJingjing Wu .dst_port_mask = 0xFFFF, 49247b3ac6bSWenzhuo Lu .mac_addr_byte_mask = 0xFF, 49347b3ac6bSWenzhuo Lu .tunnel_type_mask = 1, 49447b3ac6bSWenzhuo Lu .tunnel_id_mask = 0xFFFFFFFF, 495d9d5e6f2SJingjing Wu }, 496af75078fSIntel .drop_queue = 127, 497af75078fSIntel }; 498af75078fSIntel 4992950a769SDeclan Doherty volatile int test_done = 1; /* stop packet forwarding when set to 1. */ 500af75078fSIntel 501a4fd5eeeSElza Mathew /* 502a4fd5eeeSElza Mathew * Display zero values by default for xstats 503a4fd5eeeSElza Mathew */ 504a4fd5eeeSElza Mathew uint8_t xstats_hide_zero; 505a4fd5eeeSElza Mathew 506bc700b67SDharmik Thakkar /* 507bc700b67SDharmik Thakkar * Measure of CPU cycles disabled by default 508bc700b67SDharmik Thakkar */ 509bc700b67SDharmik Thakkar uint8_t record_core_cycles; 510bc700b67SDharmik Thakkar 5110e4b1963SDharmik Thakkar /* 5120e4b1963SDharmik Thakkar * Display of RX and TX bursts disabled by default 5130e4b1963SDharmik Thakkar */ 5140e4b1963SDharmik Thakkar uint8_t record_burst_stats; 5150e4b1963SDharmik Thakkar 516f4d178c1SXueming Li /* 517f4d178c1SXueming Li * Number of ports per shared Rx queue group, 0 disable. 518f4d178c1SXueming Li */ 519f4d178c1SXueming Li uint32_t rxq_share; 520f4d178c1SXueming Li 521c9cafcc8SShahaf Shuler unsigned int num_sockets = 0; 522c9cafcc8SShahaf Shuler unsigned int socket_ids[RTE_MAX_NUMA_NODES]; 5237acf894dSStephen Hurd 524a8d0d473SBruce Richardson #ifdef RTE_LIB_BITRATESTATS 5257e4441c8SRemy Horton /* Bitrate statistics */ 5267e4441c8SRemy Horton struct rte_stats_bitrates *bitrate_data; 527e25e6c70SRemy Horton lcoreid_t bitrate_lcore_id; 528e25e6c70SRemy Horton uint8_t bitrate_enabled; 529e25e6c70SRemy Horton #endif 5307e4441c8SRemy Horton 5316970401eSDavid Marchand #ifdef RTE_LIB_GRO 532b40f8d78SJiayu Hu struct gro_status gro_ports[RTE_MAX_ETHPORTS]; 533b7091f1dSJiayu Hu uint8_t gro_flush_cycles = GRO_DEFAULT_FLUSH_CYCLES; 5346970401eSDavid Marchand #endif 535b40f8d78SJiayu Hu 536f9295aa2SXiaoyu Min /* 537f9295aa2SXiaoyu Min * hexadecimal bitmask of RX mq mode can be enabled. 538f9295aa2SXiaoyu Min */ 539295968d1SFerruh Yigit enum rte_eth_rx_mq_mode rx_mq_mode = RTE_ETH_MQ_RX_VMDQ_DCB_RSS; 540f9295aa2SXiaoyu Min 541b7b78a08SAjit Khaparde /* 542b7b78a08SAjit Khaparde * Used to set forced link speed 543b7b78a08SAjit Khaparde */ 544b7b78a08SAjit Khaparde uint32_t eth_link_speed; 545b7b78a08SAjit Khaparde 546a550baf2SMin Hu (Connor) /* 547a550baf2SMin Hu (Connor) * ID of the current process in multi-process, used to 548a550baf2SMin Hu (Connor) * configure the queues to be polled. 549a550baf2SMin Hu (Connor) */ 550a550baf2SMin Hu (Connor) int proc_id; 551a550baf2SMin Hu (Connor) 552a550baf2SMin Hu (Connor) /* 553a550baf2SMin Hu (Connor) * Number of processes in multi-process, used to 554a550baf2SMin Hu (Connor) * configure the queues to be polled. 555a550baf2SMin Hu (Connor) */ 556a550baf2SMin Hu (Connor) unsigned int num_procs = 1; 557a550baf2SMin Hu (Connor) 558f6d8a6d3SIvan Malov static void 559f6d8a6d3SIvan Malov eth_rx_metadata_negotiate_mp(uint16_t port_id) 560f6d8a6d3SIvan Malov { 561f6d8a6d3SIvan Malov uint64_t rx_meta_features = 0; 562f6d8a6d3SIvan Malov int ret; 563f6d8a6d3SIvan Malov 564f6d8a6d3SIvan Malov if (!is_proc_primary()) 565f6d8a6d3SIvan Malov return; 566f6d8a6d3SIvan Malov 567f6d8a6d3SIvan Malov rx_meta_features |= RTE_ETH_RX_METADATA_USER_FLAG; 568f6d8a6d3SIvan Malov rx_meta_features |= RTE_ETH_RX_METADATA_USER_MARK; 569f6d8a6d3SIvan Malov rx_meta_features |= RTE_ETH_RX_METADATA_TUNNEL_ID; 570f6d8a6d3SIvan Malov 571f6d8a6d3SIvan Malov ret = rte_eth_rx_metadata_negotiate(port_id, &rx_meta_features); 572f6d8a6d3SIvan Malov if (ret == 0) { 573f6d8a6d3SIvan Malov if (!(rx_meta_features & RTE_ETH_RX_METADATA_USER_FLAG)) { 574f6d8a6d3SIvan Malov TESTPMD_LOG(DEBUG, "Flow action FLAG will not affect Rx mbufs on port %u\n", 575f6d8a6d3SIvan Malov port_id); 576f6d8a6d3SIvan Malov } 577f6d8a6d3SIvan Malov 578f6d8a6d3SIvan Malov if (!(rx_meta_features & RTE_ETH_RX_METADATA_USER_MARK)) { 579f6d8a6d3SIvan Malov TESTPMD_LOG(DEBUG, "Flow action MARK will not affect Rx mbufs on port %u\n", 580f6d8a6d3SIvan Malov port_id); 581f6d8a6d3SIvan Malov } 582f6d8a6d3SIvan Malov 583f6d8a6d3SIvan Malov if (!(rx_meta_features & RTE_ETH_RX_METADATA_TUNNEL_ID)) { 584f6d8a6d3SIvan Malov TESTPMD_LOG(DEBUG, "Flow tunnel offload support might be limited or unavailable on port %u\n", 585f6d8a6d3SIvan Malov port_id); 586f6d8a6d3SIvan Malov } 587f6d8a6d3SIvan Malov } else if (ret != -ENOTSUP) { 588f6d8a6d3SIvan Malov rte_exit(EXIT_FAILURE, "Error when negotiating Rx meta features on port %u: %s\n", 589f6d8a6d3SIvan Malov port_id, rte_strerror(-ret)); 590f6d8a6d3SIvan Malov } 591f6d8a6d3SIvan Malov } 592f6d8a6d3SIvan Malov 593a550baf2SMin Hu (Connor) static int 594a550baf2SMin Hu (Connor) eth_dev_configure_mp(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q, 595a550baf2SMin Hu (Connor) const struct rte_eth_conf *dev_conf) 596a550baf2SMin Hu (Connor) { 597a550baf2SMin Hu (Connor) if (is_proc_primary()) 598a550baf2SMin Hu (Connor) return rte_eth_dev_configure(port_id, nb_rx_q, nb_tx_q, 599a550baf2SMin Hu (Connor) dev_conf); 600a550baf2SMin Hu (Connor) return 0; 601a550baf2SMin Hu (Connor) } 602a550baf2SMin Hu (Connor) 603a550baf2SMin Hu (Connor) static int 604e46372d7SHuisong Li change_bonding_slave_port_status(portid_t bond_pid, bool is_stop) 605e46372d7SHuisong Li { 606e46372d7SHuisong Li #ifdef RTE_NET_BOND 607e46372d7SHuisong Li 608e46372d7SHuisong Li portid_t slave_pids[RTE_MAX_ETHPORTS]; 609e46372d7SHuisong Li struct rte_port *port; 610e46372d7SHuisong Li int num_slaves; 611e46372d7SHuisong Li portid_t slave_pid; 612e46372d7SHuisong Li int i; 613e46372d7SHuisong Li 614e46372d7SHuisong Li num_slaves = rte_eth_bond_slaves_get(bond_pid, slave_pids, 615e46372d7SHuisong Li RTE_MAX_ETHPORTS); 616e46372d7SHuisong Li if (num_slaves < 0) { 617e46372d7SHuisong Li fprintf(stderr, "Failed to get slave list for port = %u\n", 618e46372d7SHuisong Li bond_pid); 619e46372d7SHuisong Li return num_slaves; 620e46372d7SHuisong Li } 621e46372d7SHuisong Li 622e46372d7SHuisong Li for (i = 0; i < num_slaves; i++) { 623e46372d7SHuisong Li slave_pid = slave_pids[i]; 624e46372d7SHuisong Li port = &ports[slave_pid]; 625e46372d7SHuisong Li port->port_status = 626e46372d7SHuisong Li is_stop ? RTE_PORT_STOPPED : RTE_PORT_STARTED; 627e46372d7SHuisong Li } 628e46372d7SHuisong Li #else 629e46372d7SHuisong Li RTE_SET_USED(bond_pid); 630e46372d7SHuisong Li RTE_SET_USED(is_stop); 631e46372d7SHuisong Li #endif 632e46372d7SHuisong Li return 0; 633e46372d7SHuisong Li } 634e46372d7SHuisong Li 635e46372d7SHuisong Li static int 636a550baf2SMin Hu (Connor) eth_dev_start_mp(uint16_t port_id) 637a550baf2SMin Hu (Connor) { 638e46372d7SHuisong Li int ret; 639e46372d7SHuisong Li 640e46372d7SHuisong Li if (is_proc_primary()) { 641e46372d7SHuisong Li ret = rte_eth_dev_start(port_id); 642e46372d7SHuisong Li if (ret != 0) 643e46372d7SHuisong Li return ret; 644e46372d7SHuisong Li 645e46372d7SHuisong Li struct rte_port *port = &ports[port_id]; 646e46372d7SHuisong Li 647e46372d7SHuisong Li /* 648e46372d7SHuisong Li * Starting a bonded port also starts all slaves under the bonded 649e46372d7SHuisong Li * device. So if this port is bond device, we need to modify the 650e46372d7SHuisong Li * port status of these slaves. 651e46372d7SHuisong Li */ 652e46372d7SHuisong Li if (port->bond_flag == 1) 653e46372d7SHuisong Li return change_bonding_slave_port_status(port_id, false); 654e46372d7SHuisong Li } 655a550baf2SMin Hu (Connor) 656a550baf2SMin Hu (Connor) return 0; 657a550baf2SMin Hu (Connor) } 658a550baf2SMin Hu (Connor) 659a550baf2SMin Hu (Connor) static int 660a550baf2SMin Hu (Connor) eth_dev_stop_mp(uint16_t port_id) 661a550baf2SMin Hu (Connor) { 662e46372d7SHuisong Li int ret; 663e46372d7SHuisong Li 664e46372d7SHuisong Li if (is_proc_primary()) { 665e46372d7SHuisong Li ret = rte_eth_dev_stop(port_id); 666e46372d7SHuisong Li if (ret != 0) 667e46372d7SHuisong Li return ret; 668e46372d7SHuisong Li 669e46372d7SHuisong Li struct rte_port *port = &ports[port_id]; 670e46372d7SHuisong Li 671e46372d7SHuisong Li /* 672e46372d7SHuisong Li * Stopping a bonded port also stops all slaves under the bonded 673e46372d7SHuisong Li * device. So if this port is bond device, we need to modify the 674e46372d7SHuisong Li * port status of these slaves. 675e46372d7SHuisong Li */ 676e46372d7SHuisong Li if (port->bond_flag == 1) 677e46372d7SHuisong Li return change_bonding_slave_port_status(port_id, true); 678e46372d7SHuisong Li } 679a550baf2SMin Hu (Connor) 680a550baf2SMin Hu (Connor) return 0; 681a550baf2SMin Hu (Connor) } 682a550baf2SMin Hu (Connor) 683a550baf2SMin Hu (Connor) static void 684a550baf2SMin Hu (Connor) mempool_free_mp(struct rte_mempool *mp) 685a550baf2SMin Hu (Connor) { 686a550baf2SMin Hu (Connor) if (is_proc_primary()) 687a550baf2SMin Hu (Connor) rte_mempool_free(mp); 688a550baf2SMin Hu (Connor) } 689a550baf2SMin Hu (Connor) 690a550baf2SMin Hu (Connor) static int 691a550baf2SMin Hu (Connor) eth_dev_set_mtu_mp(uint16_t port_id, uint16_t mtu) 692a550baf2SMin Hu (Connor) { 693a550baf2SMin Hu (Connor) if (is_proc_primary()) 694a550baf2SMin Hu (Connor) return rte_eth_dev_set_mtu(port_id, mtu); 695a550baf2SMin Hu (Connor) 696a550baf2SMin Hu (Connor) return 0; 697a550baf2SMin Hu (Connor) } 698a550baf2SMin Hu (Connor) 699ed30d9b6SIntel /* Forward function declarations */ 700c9cce428SThomas Monjalon static void setup_attached_port(portid_t pi); 701edab33b1STetsuya Mukawa static void check_all_ports_link_status(uint32_t port_mask); 702f8244c63SZhiyong Yang static int eth_event_callback(portid_t port_id, 70376ad4a2dSGaetan Rivet enum rte_eth_event_type type, 704d6af1a13SBernard Iremonger void *param, void *ret_param); 705cc1bf307SJeff Guo static void dev_event_callback(const char *device_name, 706fb73e096SJeff Guo enum rte_dev_event_type type, 707fb73e096SJeff Guo void *param); 70863b72657SIvan Ilchenko static void fill_xstats_display_info(void); 709ce8d5614SIntel 710ce8d5614SIntel /* 711ce8d5614SIntel * Check if all the ports are started. 712ce8d5614SIntel * If yes, return positive value. If not, return zero. 713ce8d5614SIntel */ 714ce8d5614SIntel static int all_ports_started(void); 715ed30d9b6SIntel 7166970401eSDavid Marchand #ifdef RTE_LIB_GSO 71752f38a20SJiayu Hu struct gso_status gso_ports[RTE_MAX_ETHPORTS]; 71835b2d13fSOlivier Matz uint16_t gso_max_segment_size = RTE_ETHER_MAX_LEN - RTE_ETHER_CRC_LEN; 7196970401eSDavid Marchand #endif 72052f38a20SJiayu Hu 721b57b66a9SOri Kam /* Holds the registered mbuf dynamic flags names. */ 722b57b66a9SOri Kam char dynf_names[64][RTE_MBUF_DYN_NAMESIZE]; 723b57b66a9SOri Kam 72463b72657SIvan Ilchenko 725af75078fSIntel /* 72698a7ea33SJerin Jacob * Helper function to check if socket is already discovered. 727c9cafcc8SShahaf Shuler * If yes, return positive value. If not, return zero. 728c9cafcc8SShahaf Shuler */ 729c9cafcc8SShahaf Shuler int 730c9cafcc8SShahaf Shuler new_socket_id(unsigned int socket_id) 731c9cafcc8SShahaf Shuler { 732c9cafcc8SShahaf Shuler unsigned int i; 733c9cafcc8SShahaf Shuler 734c9cafcc8SShahaf Shuler for (i = 0; i < num_sockets; i++) { 735c9cafcc8SShahaf Shuler if (socket_ids[i] == socket_id) 736c9cafcc8SShahaf Shuler return 0; 737c9cafcc8SShahaf Shuler } 738c9cafcc8SShahaf Shuler return 1; 739c9cafcc8SShahaf Shuler } 740c9cafcc8SShahaf Shuler 741c9cafcc8SShahaf Shuler /* 742af75078fSIntel * Setup default configuration. 743af75078fSIntel */ 744af75078fSIntel static void 745af75078fSIntel set_default_fwd_lcores_config(void) 746af75078fSIntel { 747af75078fSIntel unsigned int i; 748af75078fSIntel unsigned int nb_lc; 7497acf894dSStephen Hurd unsigned int sock_num; 750af75078fSIntel 751af75078fSIntel nb_lc = 0; 752af75078fSIntel for (i = 0; i < RTE_MAX_LCORE; i++) { 753dbfb8ec7SPhil Yang if (!rte_lcore_is_enabled(i)) 754dbfb8ec7SPhil Yang continue; 755c9cafcc8SShahaf Shuler sock_num = rte_lcore_to_socket_id(i); 756c9cafcc8SShahaf Shuler if (new_socket_id(sock_num)) { 757c9cafcc8SShahaf Shuler if (num_sockets >= RTE_MAX_NUMA_NODES) { 758c9cafcc8SShahaf Shuler rte_exit(EXIT_FAILURE, 759c9cafcc8SShahaf Shuler "Total sockets greater than %u\n", 760c9cafcc8SShahaf Shuler RTE_MAX_NUMA_NODES); 761c9cafcc8SShahaf Shuler } 762c9cafcc8SShahaf Shuler socket_ids[num_sockets++] = sock_num; 7637acf894dSStephen Hurd } 764cb056611SStephen Hemminger if (i == rte_get_main_lcore()) 765f54fe5eeSStephen Hurd continue; 766f54fe5eeSStephen Hurd fwd_lcores_cpuids[nb_lc++] = i; 767af75078fSIntel } 768af75078fSIntel nb_lcores = (lcoreid_t) nb_lc; 769af75078fSIntel nb_cfg_lcores = nb_lcores; 770af75078fSIntel nb_fwd_lcores = 1; 771af75078fSIntel } 772af75078fSIntel 773af75078fSIntel static void 774af75078fSIntel set_def_peer_eth_addrs(void) 775af75078fSIntel { 776af75078fSIntel portid_t i; 777af75078fSIntel 778af75078fSIntel for (i = 0; i < RTE_MAX_ETHPORTS; i++) { 77935b2d13fSOlivier Matz peer_eth_addrs[i].addr_bytes[0] = RTE_ETHER_LOCAL_ADMIN_ADDR; 780af75078fSIntel peer_eth_addrs[i].addr_bytes[5] = i; 781af75078fSIntel } 782af75078fSIntel } 783af75078fSIntel 784af75078fSIntel static void 785af75078fSIntel set_default_fwd_ports_config(void) 786af75078fSIntel { 787af75078fSIntel portid_t pt_id; 78865a7360cSMatan Azrad int i = 0; 789af75078fSIntel 790effdb8bbSPhil Yang RTE_ETH_FOREACH_DEV(pt_id) { 79165a7360cSMatan Azrad fwd_ports_ids[i++] = pt_id; 792af75078fSIntel 793effdb8bbSPhil Yang /* Update sockets info according to the attached device */ 794effdb8bbSPhil Yang int socket_id = rte_eth_dev_socket_id(pt_id); 795effdb8bbSPhil Yang if (socket_id >= 0 && new_socket_id(socket_id)) { 796effdb8bbSPhil Yang if (num_sockets >= RTE_MAX_NUMA_NODES) { 797effdb8bbSPhil Yang rte_exit(EXIT_FAILURE, 798effdb8bbSPhil Yang "Total sockets greater than %u\n", 799effdb8bbSPhil Yang RTE_MAX_NUMA_NODES); 800effdb8bbSPhil Yang } 801effdb8bbSPhil Yang socket_ids[num_sockets++] = socket_id; 802effdb8bbSPhil Yang } 803effdb8bbSPhil Yang } 804effdb8bbSPhil Yang 805af75078fSIntel nb_cfg_ports = nb_ports; 806af75078fSIntel nb_fwd_ports = nb_ports; 807af75078fSIntel } 808af75078fSIntel 809af75078fSIntel void 810af75078fSIntel set_def_fwd_config(void) 811af75078fSIntel { 812af75078fSIntel set_default_fwd_lcores_config(); 813af75078fSIntel set_def_peer_eth_addrs(); 814af75078fSIntel set_default_fwd_ports_config(); 815af75078fSIntel } 816af75078fSIntel 817761f7ae1SJie Zhou #ifndef RTE_EXEC_ENV_WINDOWS 818c7f5dba7SAnatoly Burakov /* extremely pessimistic estimation of memory required to create a mempool */ 819c7f5dba7SAnatoly Burakov static int 820c7f5dba7SAnatoly Burakov calc_mem_size(uint32_t nb_mbufs, uint32_t mbuf_sz, size_t pgsz, size_t *out) 821c7f5dba7SAnatoly Burakov { 822c7f5dba7SAnatoly Burakov unsigned int n_pages, mbuf_per_pg, leftover; 823c7f5dba7SAnatoly Burakov uint64_t total_mem, mbuf_mem, obj_sz; 824c7f5dba7SAnatoly Burakov 825c7f5dba7SAnatoly Burakov /* there is no good way to predict how much space the mempool will 826c7f5dba7SAnatoly Burakov * occupy because it will allocate chunks on the fly, and some of those 827c7f5dba7SAnatoly Burakov * will come from default DPDK memory while some will come from our 828c7f5dba7SAnatoly Burakov * external memory, so just assume 128MB will be enough for everyone. 829c7f5dba7SAnatoly Burakov */ 830c7f5dba7SAnatoly Burakov uint64_t hdr_mem = 128 << 20; 831c7f5dba7SAnatoly Burakov 832c7f5dba7SAnatoly Burakov /* account for possible non-contiguousness */ 833c7f5dba7SAnatoly Burakov obj_sz = rte_mempool_calc_obj_size(mbuf_sz, 0, NULL); 834c7f5dba7SAnatoly Burakov if (obj_sz > pgsz) { 835c7f5dba7SAnatoly Burakov TESTPMD_LOG(ERR, "Object size is bigger than page size\n"); 836c7f5dba7SAnatoly Burakov return -1; 837c7f5dba7SAnatoly Burakov } 838c7f5dba7SAnatoly Burakov 839c7f5dba7SAnatoly Burakov mbuf_per_pg = pgsz / obj_sz; 840c7f5dba7SAnatoly Burakov leftover = (nb_mbufs % mbuf_per_pg) > 0; 841c7f5dba7SAnatoly Burakov n_pages = (nb_mbufs / mbuf_per_pg) + leftover; 842c7f5dba7SAnatoly Burakov 843c7f5dba7SAnatoly Burakov mbuf_mem = n_pages * pgsz; 844c7f5dba7SAnatoly Burakov 845c7f5dba7SAnatoly Burakov total_mem = RTE_ALIGN(hdr_mem + mbuf_mem, pgsz); 846c7f5dba7SAnatoly Burakov 847c7f5dba7SAnatoly Burakov if (total_mem > SIZE_MAX) { 848c7f5dba7SAnatoly Burakov TESTPMD_LOG(ERR, "Memory size too big\n"); 849c7f5dba7SAnatoly Burakov return -1; 850c7f5dba7SAnatoly Burakov } 851c7f5dba7SAnatoly Burakov *out = (size_t)total_mem; 852c7f5dba7SAnatoly Burakov 853c7f5dba7SAnatoly Burakov return 0; 854c7f5dba7SAnatoly Burakov } 855c7f5dba7SAnatoly Burakov 856c7f5dba7SAnatoly Burakov static int 857c7f5dba7SAnatoly Burakov pagesz_flags(uint64_t page_sz) 858c7f5dba7SAnatoly Burakov { 859c7f5dba7SAnatoly Burakov /* as per mmap() manpage, all page sizes are log2 of page size 860c7f5dba7SAnatoly Burakov * shifted by MAP_HUGE_SHIFT 861c7f5dba7SAnatoly Burakov */ 8629d650537SAnatoly Burakov int log2 = rte_log2_u64(page_sz); 863c7f5dba7SAnatoly Burakov 864c7f5dba7SAnatoly Burakov return (log2 << HUGE_SHIFT); 865c7f5dba7SAnatoly Burakov } 866c7f5dba7SAnatoly Burakov 867c7f5dba7SAnatoly Burakov static void * 868c7f5dba7SAnatoly Burakov alloc_mem(size_t memsz, size_t pgsz, bool huge) 869c7f5dba7SAnatoly Burakov { 870c7f5dba7SAnatoly Burakov void *addr; 871c7f5dba7SAnatoly Burakov int flags; 872c7f5dba7SAnatoly Burakov 873c7f5dba7SAnatoly Burakov /* allocate anonymous hugepages */ 874c7f5dba7SAnatoly Burakov flags = MAP_ANONYMOUS | MAP_PRIVATE; 875c7f5dba7SAnatoly Burakov if (huge) 876c7f5dba7SAnatoly Burakov flags |= HUGE_FLAG | pagesz_flags(pgsz); 877c7f5dba7SAnatoly Burakov 878c7f5dba7SAnatoly Burakov addr = mmap(NULL, memsz, PROT_READ | PROT_WRITE, flags, -1, 0); 879c7f5dba7SAnatoly Burakov if (addr == MAP_FAILED) 880c7f5dba7SAnatoly Burakov return NULL; 881c7f5dba7SAnatoly Burakov 882c7f5dba7SAnatoly Burakov return addr; 883c7f5dba7SAnatoly Burakov } 884c7f5dba7SAnatoly Burakov 885c7f5dba7SAnatoly Burakov struct extmem_param { 886c7f5dba7SAnatoly Burakov void *addr; 887c7f5dba7SAnatoly Burakov size_t len; 888c7f5dba7SAnatoly Burakov size_t pgsz; 889c7f5dba7SAnatoly Burakov rte_iova_t *iova_table; 890c7f5dba7SAnatoly Burakov unsigned int iova_table_len; 891c7f5dba7SAnatoly Burakov }; 892c7f5dba7SAnatoly Burakov 893c7f5dba7SAnatoly Burakov static int 894c7f5dba7SAnatoly Burakov create_extmem(uint32_t nb_mbufs, uint32_t mbuf_sz, struct extmem_param *param, 895c7f5dba7SAnatoly Burakov bool huge) 896c7f5dba7SAnatoly Burakov { 897c7f5dba7SAnatoly Burakov uint64_t pgsizes[] = {RTE_PGSIZE_2M, RTE_PGSIZE_1G, /* x86_64, ARM */ 898c7f5dba7SAnatoly Burakov RTE_PGSIZE_16M, RTE_PGSIZE_16G}; /* POWER */ 899c7f5dba7SAnatoly Burakov unsigned int cur_page, n_pages, pgsz_idx; 900c7f5dba7SAnatoly Burakov size_t mem_sz, cur_pgsz; 901c7f5dba7SAnatoly Burakov rte_iova_t *iovas = NULL; 902c7f5dba7SAnatoly Burakov void *addr; 903c7f5dba7SAnatoly Burakov int ret; 904c7f5dba7SAnatoly Burakov 905c7f5dba7SAnatoly Burakov for (pgsz_idx = 0; pgsz_idx < RTE_DIM(pgsizes); pgsz_idx++) { 906c7f5dba7SAnatoly Burakov /* skip anything that is too big */ 907c7f5dba7SAnatoly Burakov if (pgsizes[pgsz_idx] > SIZE_MAX) 908c7f5dba7SAnatoly Burakov continue; 909c7f5dba7SAnatoly Burakov 910c7f5dba7SAnatoly Burakov cur_pgsz = pgsizes[pgsz_idx]; 911c7f5dba7SAnatoly Burakov 912c7f5dba7SAnatoly Burakov /* if we were told not to allocate hugepages, override */ 913c7f5dba7SAnatoly Burakov if (!huge) 914c7f5dba7SAnatoly Burakov cur_pgsz = sysconf(_SC_PAGESIZE); 915c7f5dba7SAnatoly Burakov 916c7f5dba7SAnatoly Burakov ret = calc_mem_size(nb_mbufs, mbuf_sz, cur_pgsz, &mem_sz); 917c7f5dba7SAnatoly Burakov if (ret < 0) { 918c7f5dba7SAnatoly Burakov TESTPMD_LOG(ERR, "Cannot calculate memory size\n"); 919c7f5dba7SAnatoly Burakov return -1; 920c7f5dba7SAnatoly Burakov } 921c7f5dba7SAnatoly Burakov 922c7f5dba7SAnatoly Burakov /* allocate our memory */ 923c7f5dba7SAnatoly Burakov addr = alloc_mem(mem_sz, cur_pgsz, huge); 924c7f5dba7SAnatoly Burakov 925c7f5dba7SAnatoly Burakov /* if we couldn't allocate memory with a specified page size, 926c7f5dba7SAnatoly Burakov * that doesn't mean we can't do it with other page sizes, so 927c7f5dba7SAnatoly Burakov * try another one. 928c7f5dba7SAnatoly Burakov */ 929c7f5dba7SAnatoly Burakov if (addr == NULL) 930c7f5dba7SAnatoly Burakov continue; 931c7f5dba7SAnatoly Burakov 932c7f5dba7SAnatoly Burakov /* store IOVA addresses for every page in this memory area */ 933c7f5dba7SAnatoly Burakov n_pages = mem_sz / cur_pgsz; 934c7f5dba7SAnatoly Burakov 935c7f5dba7SAnatoly Burakov iovas = malloc(sizeof(*iovas) * n_pages); 936c7f5dba7SAnatoly Burakov 937c7f5dba7SAnatoly Burakov if (iovas == NULL) { 938c7f5dba7SAnatoly Burakov TESTPMD_LOG(ERR, "Cannot allocate memory for iova addresses\n"); 939c7f5dba7SAnatoly Burakov goto fail; 940c7f5dba7SAnatoly Burakov } 941c7f5dba7SAnatoly Burakov /* lock memory if it's not huge pages */ 942c7f5dba7SAnatoly Burakov if (!huge) 943c7f5dba7SAnatoly Burakov mlock(addr, mem_sz); 944c7f5dba7SAnatoly Burakov 945c7f5dba7SAnatoly Burakov /* populate IOVA addresses */ 946c7f5dba7SAnatoly Burakov for (cur_page = 0; cur_page < n_pages; cur_page++) { 947c7f5dba7SAnatoly Burakov rte_iova_t iova; 948c7f5dba7SAnatoly Burakov size_t offset; 949c7f5dba7SAnatoly Burakov void *cur; 950c7f5dba7SAnatoly Burakov 951c7f5dba7SAnatoly Burakov offset = cur_pgsz * cur_page; 952c7f5dba7SAnatoly Burakov cur = RTE_PTR_ADD(addr, offset); 953c7f5dba7SAnatoly Burakov 954c7f5dba7SAnatoly Burakov /* touch the page before getting its IOVA */ 955c7f5dba7SAnatoly Burakov *(volatile char *)cur = 0; 956c7f5dba7SAnatoly Burakov 957c7f5dba7SAnatoly Burakov iova = rte_mem_virt2iova(cur); 958c7f5dba7SAnatoly Burakov 959c7f5dba7SAnatoly Burakov iovas[cur_page] = iova; 960c7f5dba7SAnatoly Burakov } 961c7f5dba7SAnatoly Burakov 962c7f5dba7SAnatoly Burakov break; 963c7f5dba7SAnatoly Burakov } 964c7f5dba7SAnatoly Burakov /* if we couldn't allocate anything */ 965c7f5dba7SAnatoly Burakov if (iovas == NULL) 966c7f5dba7SAnatoly Burakov return -1; 967c7f5dba7SAnatoly Burakov 968c7f5dba7SAnatoly Burakov param->addr = addr; 969c7f5dba7SAnatoly Burakov param->len = mem_sz; 970c7f5dba7SAnatoly Burakov param->pgsz = cur_pgsz; 971c7f5dba7SAnatoly Burakov param->iova_table = iovas; 972c7f5dba7SAnatoly Burakov param->iova_table_len = n_pages; 973c7f5dba7SAnatoly Burakov 974c7f5dba7SAnatoly Burakov return 0; 975c7f5dba7SAnatoly Burakov fail: 976c7f5dba7SAnatoly Burakov free(iovas); 977c7f5dba7SAnatoly Burakov if (addr) 978c7f5dba7SAnatoly Burakov munmap(addr, mem_sz); 979c7f5dba7SAnatoly Burakov 980c7f5dba7SAnatoly Burakov return -1; 981c7f5dba7SAnatoly Burakov } 982c7f5dba7SAnatoly Burakov 983c7f5dba7SAnatoly Burakov static int 984c7f5dba7SAnatoly Burakov setup_extmem(uint32_t nb_mbufs, uint32_t mbuf_sz, bool huge) 985c7f5dba7SAnatoly Burakov { 986c7f5dba7SAnatoly Burakov struct extmem_param param; 987c7f5dba7SAnatoly Burakov int socket_id, ret; 988c7f5dba7SAnatoly Burakov 989c7f5dba7SAnatoly Burakov memset(¶m, 0, sizeof(param)); 990c7f5dba7SAnatoly Burakov 991c7f5dba7SAnatoly Burakov /* check if our heap exists */ 992c7f5dba7SAnatoly Burakov socket_id = rte_malloc_heap_get_socket(EXTMEM_HEAP_NAME); 993c7f5dba7SAnatoly Burakov if (socket_id < 0) { 994c7f5dba7SAnatoly Burakov /* create our heap */ 995c7f5dba7SAnatoly Burakov ret = rte_malloc_heap_create(EXTMEM_HEAP_NAME); 996c7f5dba7SAnatoly Burakov if (ret < 0) { 997c7f5dba7SAnatoly Burakov TESTPMD_LOG(ERR, "Cannot create heap\n"); 998c7f5dba7SAnatoly Burakov return -1; 999c7f5dba7SAnatoly Burakov } 1000c7f5dba7SAnatoly Burakov } 1001c7f5dba7SAnatoly Burakov 1002c7f5dba7SAnatoly Burakov ret = create_extmem(nb_mbufs, mbuf_sz, ¶m, huge); 1003c7f5dba7SAnatoly Burakov if (ret < 0) { 1004c7f5dba7SAnatoly Burakov TESTPMD_LOG(ERR, "Cannot create memory area\n"); 1005c7f5dba7SAnatoly Burakov return -1; 1006c7f5dba7SAnatoly Burakov } 1007c7f5dba7SAnatoly Burakov 1008c7f5dba7SAnatoly Burakov /* we now have a valid memory area, so add it to heap */ 1009c7f5dba7SAnatoly Burakov ret = rte_malloc_heap_memory_add(EXTMEM_HEAP_NAME, 1010c7f5dba7SAnatoly Burakov param.addr, param.len, param.iova_table, 1011c7f5dba7SAnatoly Burakov param.iova_table_len, param.pgsz); 1012c7f5dba7SAnatoly Burakov 1013c7f5dba7SAnatoly Burakov /* when using VFIO, memory is automatically mapped for DMA by EAL */ 1014c7f5dba7SAnatoly Burakov 1015c7f5dba7SAnatoly Burakov /* not needed any more */ 1016c7f5dba7SAnatoly Burakov free(param.iova_table); 1017c7f5dba7SAnatoly Burakov 1018c7f5dba7SAnatoly Burakov if (ret < 0) { 1019c7f5dba7SAnatoly Burakov TESTPMD_LOG(ERR, "Cannot add memory to heap\n"); 1020c7f5dba7SAnatoly Burakov munmap(param.addr, param.len); 1021c7f5dba7SAnatoly Burakov return -1; 1022c7f5dba7SAnatoly Burakov } 1023c7f5dba7SAnatoly Burakov 1024c7f5dba7SAnatoly Burakov /* success */ 1025c7f5dba7SAnatoly Burakov 1026c7f5dba7SAnatoly Burakov TESTPMD_LOG(DEBUG, "Allocated %zuMB of external memory\n", 1027c7f5dba7SAnatoly Burakov param.len >> 20); 1028c7f5dba7SAnatoly Burakov 1029c7f5dba7SAnatoly Burakov return 0; 1030c7f5dba7SAnatoly Burakov } 10313a0968c8SShahaf Shuler static void 10323a0968c8SShahaf Shuler dma_unmap_cb(struct rte_mempool *mp __rte_unused, void *opaque __rte_unused, 10333a0968c8SShahaf Shuler struct rte_mempool_memhdr *memhdr, unsigned mem_idx __rte_unused) 10343a0968c8SShahaf Shuler { 10353a0968c8SShahaf Shuler uint16_t pid = 0; 10363a0968c8SShahaf Shuler int ret; 10373a0968c8SShahaf Shuler 10383a0968c8SShahaf Shuler RTE_ETH_FOREACH_DEV(pid) { 10390a0821bcSPaulis Gributs struct rte_eth_dev_info dev_info; 10403a0968c8SShahaf Shuler 10410a0821bcSPaulis Gributs ret = eth_dev_info_get_print_err(pid, &dev_info); 10420a0821bcSPaulis Gributs if (ret != 0) { 10430a0821bcSPaulis Gributs TESTPMD_LOG(DEBUG, 10440a0821bcSPaulis Gributs "unable to get device info for port %d on addr 0x%p," 10450a0821bcSPaulis Gributs "mempool unmapping will not be performed\n", 10460a0821bcSPaulis Gributs pid, memhdr->addr); 10470a0821bcSPaulis Gributs continue; 10480a0821bcSPaulis Gributs } 10490a0821bcSPaulis Gributs 10500a0821bcSPaulis Gributs ret = rte_dev_dma_unmap(dev_info.device, memhdr->addr, 0, memhdr->len); 10513a0968c8SShahaf Shuler if (ret) { 10523a0968c8SShahaf Shuler TESTPMD_LOG(DEBUG, 10533a0968c8SShahaf Shuler "unable to DMA unmap addr 0x%p " 10543a0968c8SShahaf Shuler "for device %s\n", 10550a0821bcSPaulis Gributs memhdr->addr, dev_info.device->name); 10563a0968c8SShahaf Shuler } 10573a0968c8SShahaf Shuler } 10583a0968c8SShahaf Shuler ret = rte_extmem_unregister(memhdr->addr, memhdr->len); 10593a0968c8SShahaf Shuler if (ret) { 10603a0968c8SShahaf Shuler TESTPMD_LOG(DEBUG, 10613a0968c8SShahaf Shuler "unable to un-register addr 0x%p\n", memhdr->addr); 10623a0968c8SShahaf Shuler } 10633a0968c8SShahaf Shuler } 10643a0968c8SShahaf Shuler 10653a0968c8SShahaf Shuler static void 10663a0968c8SShahaf Shuler dma_map_cb(struct rte_mempool *mp __rte_unused, void *opaque __rte_unused, 10673a0968c8SShahaf Shuler struct rte_mempool_memhdr *memhdr, unsigned mem_idx __rte_unused) 10683a0968c8SShahaf Shuler { 10693a0968c8SShahaf Shuler uint16_t pid = 0; 10703a0968c8SShahaf Shuler size_t page_size = sysconf(_SC_PAGESIZE); 10713a0968c8SShahaf Shuler int ret; 10723a0968c8SShahaf Shuler 10733a0968c8SShahaf Shuler ret = rte_extmem_register(memhdr->addr, memhdr->len, NULL, 0, 10743a0968c8SShahaf Shuler page_size); 10753a0968c8SShahaf Shuler if (ret) { 10763a0968c8SShahaf Shuler TESTPMD_LOG(DEBUG, 10773a0968c8SShahaf Shuler "unable to register addr 0x%p\n", memhdr->addr); 10783a0968c8SShahaf Shuler return; 10793a0968c8SShahaf Shuler } 10803a0968c8SShahaf Shuler RTE_ETH_FOREACH_DEV(pid) { 10810a0821bcSPaulis Gributs struct rte_eth_dev_info dev_info; 10823a0968c8SShahaf Shuler 10830a0821bcSPaulis Gributs ret = eth_dev_info_get_print_err(pid, &dev_info); 10840a0821bcSPaulis Gributs if (ret != 0) { 10850a0821bcSPaulis Gributs TESTPMD_LOG(DEBUG, 10860a0821bcSPaulis Gributs "unable to get device info for port %d on addr 0x%p," 10870a0821bcSPaulis Gributs "mempool mapping will not be performed\n", 10880a0821bcSPaulis Gributs pid, memhdr->addr); 10890a0821bcSPaulis Gributs continue; 10900a0821bcSPaulis Gributs } 10910a0821bcSPaulis Gributs ret = rte_dev_dma_map(dev_info.device, memhdr->addr, 0, memhdr->len); 10923a0968c8SShahaf Shuler if (ret) { 10933a0968c8SShahaf Shuler TESTPMD_LOG(DEBUG, 10943a0968c8SShahaf Shuler "unable to DMA map addr 0x%p " 10953a0968c8SShahaf Shuler "for device %s\n", 10960a0821bcSPaulis Gributs memhdr->addr, dev_info.device->name); 10973a0968c8SShahaf Shuler } 10983a0968c8SShahaf Shuler } 10993a0968c8SShahaf Shuler } 1100761f7ae1SJie Zhou #endif 1101c7f5dba7SAnatoly Burakov 110272512e18SViacheslav Ovsiienko static unsigned int 110372512e18SViacheslav Ovsiienko setup_extbuf(uint32_t nb_mbufs, uint16_t mbuf_sz, unsigned int socket_id, 110472512e18SViacheslav Ovsiienko char *pool_name, struct rte_pktmbuf_extmem **ext_mem) 110572512e18SViacheslav Ovsiienko { 110672512e18SViacheslav Ovsiienko struct rte_pktmbuf_extmem *xmem; 110772512e18SViacheslav Ovsiienko unsigned int ext_num, zone_num, elt_num; 110872512e18SViacheslav Ovsiienko uint16_t elt_size; 110972512e18SViacheslav Ovsiienko 111072512e18SViacheslav Ovsiienko elt_size = RTE_ALIGN_CEIL(mbuf_sz, RTE_CACHE_LINE_SIZE); 111172512e18SViacheslav Ovsiienko elt_num = EXTBUF_ZONE_SIZE / elt_size; 111272512e18SViacheslav Ovsiienko zone_num = (nb_mbufs + elt_num - 1) / elt_num; 111372512e18SViacheslav Ovsiienko 111472512e18SViacheslav Ovsiienko xmem = malloc(sizeof(struct rte_pktmbuf_extmem) * zone_num); 111572512e18SViacheslav Ovsiienko if (xmem == NULL) { 111672512e18SViacheslav Ovsiienko TESTPMD_LOG(ERR, "Cannot allocate memory for " 111772512e18SViacheslav Ovsiienko "external buffer descriptors\n"); 111872512e18SViacheslav Ovsiienko *ext_mem = NULL; 111972512e18SViacheslav Ovsiienko return 0; 112072512e18SViacheslav Ovsiienko } 112172512e18SViacheslav Ovsiienko for (ext_num = 0; ext_num < zone_num; ext_num++) { 112272512e18SViacheslav Ovsiienko struct rte_pktmbuf_extmem *xseg = xmem + ext_num; 112372512e18SViacheslav Ovsiienko const struct rte_memzone *mz; 112472512e18SViacheslav Ovsiienko char mz_name[RTE_MEMZONE_NAMESIZE]; 112572512e18SViacheslav Ovsiienko int ret; 112672512e18SViacheslav Ovsiienko 112772512e18SViacheslav Ovsiienko ret = snprintf(mz_name, sizeof(mz_name), 112872512e18SViacheslav Ovsiienko RTE_MEMPOOL_MZ_FORMAT "_xb_%u", pool_name, ext_num); 112972512e18SViacheslav Ovsiienko if (ret < 0 || ret >= (int)sizeof(mz_name)) { 113072512e18SViacheslav Ovsiienko errno = ENAMETOOLONG; 113172512e18SViacheslav Ovsiienko ext_num = 0; 113272512e18SViacheslav Ovsiienko break; 113372512e18SViacheslav Ovsiienko } 113413b19642SDmitry Kozlyuk mz = rte_memzone_reserve(mz_name, EXTBUF_ZONE_SIZE, 113572512e18SViacheslav Ovsiienko socket_id, 113672512e18SViacheslav Ovsiienko RTE_MEMZONE_IOVA_CONTIG | 113772512e18SViacheslav Ovsiienko RTE_MEMZONE_1GB | 113813b19642SDmitry Kozlyuk RTE_MEMZONE_SIZE_HINT_ONLY); 113972512e18SViacheslav Ovsiienko if (mz == NULL) { 114072512e18SViacheslav Ovsiienko /* 114172512e18SViacheslav Ovsiienko * The caller exits on external buffer creation 114272512e18SViacheslav Ovsiienko * error, so there is no need to free memzones. 114372512e18SViacheslav Ovsiienko */ 114472512e18SViacheslav Ovsiienko errno = ENOMEM; 114572512e18SViacheslav Ovsiienko ext_num = 0; 114672512e18SViacheslav Ovsiienko break; 114772512e18SViacheslav Ovsiienko } 114872512e18SViacheslav Ovsiienko xseg->buf_ptr = mz->addr; 114972512e18SViacheslav Ovsiienko xseg->buf_iova = mz->iova; 115072512e18SViacheslav Ovsiienko xseg->buf_len = EXTBUF_ZONE_SIZE; 115172512e18SViacheslav Ovsiienko xseg->elt_size = elt_size; 115272512e18SViacheslav Ovsiienko } 115372512e18SViacheslav Ovsiienko if (ext_num == 0 && xmem != NULL) { 115472512e18SViacheslav Ovsiienko free(xmem); 115572512e18SViacheslav Ovsiienko xmem = NULL; 115672512e18SViacheslav Ovsiienko } 115772512e18SViacheslav Ovsiienko *ext_mem = xmem; 115872512e18SViacheslav Ovsiienko return ext_num; 115972512e18SViacheslav Ovsiienko } 116072512e18SViacheslav Ovsiienko 1161af75078fSIntel /* 1162af75078fSIntel * Configuration initialisation done once at init time. 1163af75078fSIntel */ 1164401b744dSShahaf Shuler static struct rte_mempool * 1165af75078fSIntel mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf, 116626cbb419SViacheslav Ovsiienko unsigned int socket_id, uint16_t size_idx) 1167af75078fSIntel { 1168af75078fSIntel char pool_name[RTE_MEMPOOL_NAMESIZE]; 1169bece7b6cSChristian Ehrhardt struct rte_mempool *rte_mp = NULL; 1170761f7ae1SJie Zhou #ifndef RTE_EXEC_ENV_WINDOWS 1171af75078fSIntel uint32_t mb_size; 1172af75078fSIntel 1173dfb03bbeSOlivier Matz mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size; 1174761f7ae1SJie Zhou #endif 117526cbb419SViacheslav Ovsiienko mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name), size_idx); 1176a550baf2SMin Hu (Connor) if (!is_proc_primary()) { 1177a550baf2SMin Hu (Connor) rte_mp = rte_mempool_lookup(pool_name); 1178a550baf2SMin Hu (Connor) if (rte_mp == NULL) 1179a550baf2SMin Hu (Connor) rte_exit(EXIT_FAILURE, 1180a550baf2SMin Hu (Connor) "Get mbuf pool for socket %u failed: %s\n", 1181a550baf2SMin Hu (Connor) socket_id, rte_strerror(rte_errno)); 1182a550baf2SMin Hu (Connor) return rte_mp; 1183a550baf2SMin Hu (Connor) } 1184148f963fSBruce Richardson 1185285fd101SOlivier Matz TESTPMD_LOG(INFO, 1186d1eb542eSOlivier Matz "create a new mbuf pool <%s>: n=%u, size=%u, socket=%u\n", 1187d1eb542eSOlivier Matz pool_name, nb_mbuf, mbuf_seg_size, socket_id); 1188d1eb542eSOlivier Matz 1189c7f5dba7SAnatoly Burakov switch (mp_alloc_type) { 1190c7f5dba7SAnatoly Burakov case MP_ALLOC_NATIVE: 1191c7f5dba7SAnatoly Burakov { 1192c7f5dba7SAnatoly Burakov /* wrapper to rte_mempool_create() */ 1193c7f5dba7SAnatoly Burakov TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n", 1194c7f5dba7SAnatoly Burakov rte_mbuf_best_mempool_ops()); 1195c7f5dba7SAnatoly Burakov rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf, 1196c7f5dba7SAnatoly Burakov mb_mempool_cache, 0, mbuf_seg_size, socket_id); 1197c7f5dba7SAnatoly Burakov break; 1198c7f5dba7SAnatoly Burakov } 1199761f7ae1SJie Zhou #ifndef RTE_EXEC_ENV_WINDOWS 1200c7f5dba7SAnatoly Burakov case MP_ALLOC_ANON: 1201c7f5dba7SAnatoly Burakov { 1202b19a0c75SOlivier Matz rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf, 1203c7f5dba7SAnatoly Burakov mb_size, (unsigned int) mb_mempool_cache, 1204148f963fSBruce Richardson sizeof(struct rte_pktmbuf_pool_private), 120559fcf854SShahaf Shuler socket_id, mempool_flags); 120624427bb9SOlivier Matz if (rte_mp == NULL) 120724427bb9SOlivier Matz goto err; 1208b19a0c75SOlivier Matz 1209b19a0c75SOlivier Matz if (rte_mempool_populate_anon(rte_mp) == 0) { 1210b19a0c75SOlivier Matz rte_mempool_free(rte_mp); 1211b19a0c75SOlivier Matz rte_mp = NULL; 121224427bb9SOlivier Matz goto err; 1213b19a0c75SOlivier Matz } 1214b19a0c75SOlivier Matz rte_pktmbuf_pool_init(rte_mp, NULL); 1215b19a0c75SOlivier Matz rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL); 12163a0968c8SShahaf Shuler rte_mempool_mem_iter(rte_mp, dma_map_cb, NULL); 1217c7f5dba7SAnatoly Burakov break; 1218c7f5dba7SAnatoly Burakov } 1219c7f5dba7SAnatoly Burakov case MP_ALLOC_XMEM: 1220c7f5dba7SAnatoly Burakov case MP_ALLOC_XMEM_HUGE: 1221c7f5dba7SAnatoly Burakov { 1222c7f5dba7SAnatoly Burakov int heap_socket; 1223c7f5dba7SAnatoly Burakov bool huge = mp_alloc_type == MP_ALLOC_XMEM_HUGE; 1224c7f5dba7SAnatoly Burakov 1225c7f5dba7SAnatoly Burakov if (setup_extmem(nb_mbuf, mbuf_seg_size, huge) < 0) 1226c7f5dba7SAnatoly Burakov rte_exit(EXIT_FAILURE, "Could not create external memory\n"); 1227c7f5dba7SAnatoly Burakov 1228c7f5dba7SAnatoly Burakov heap_socket = 1229c7f5dba7SAnatoly Burakov rte_malloc_heap_get_socket(EXTMEM_HEAP_NAME); 1230c7f5dba7SAnatoly Burakov if (heap_socket < 0) 1231c7f5dba7SAnatoly Burakov rte_exit(EXIT_FAILURE, "Could not get external memory socket ID\n"); 1232c7f5dba7SAnatoly Burakov 12330e798567SPavan Nikhilesh TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n", 12340e798567SPavan Nikhilesh rte_mbuf_best_mempool_ops()); 1235ea0c20eaSOlivier Matz rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf, 1236c7f5dba7SAnatoly Burakov mb_mempool_cache, 0, mbuf_seg_size, 1237c7f5dba7SAnatoly Burakov heap_socket); 1238c7f5dba7SAnatoly Burakov break; 1239c7f5dba7SAnatoly Burakov } 1240761f7ae1SJie Zhou #endif 124172512e18SViacheslav Ovsiienko case MP_ALLOC_XBUF: 124272512e18SViacheslav Ovsiienko { 124372512e18SViacheslav Ovsiienko struct rte_pktmbuf_extmem *ext_mem; 124472512e18SViacheslav Ovsiienko unsigned int ext_num; 124572512e18SViacheslav Ovsiienko 124672512e18SViacheslav Ovsiienko ext_num = setup_extbuf(nb_mbuf, mbuf_seg_size, 124772512e18SViacheslav Ovsiienko socket_id, pool_name, &ext_mem); 124872512e18SViacheslav Ovsiienko if (ext_num == 0) 124972512e18SViacheslav Ovsiienko rte_exit(EXIT_FAILURE, 125072512e18SViacheslav Ovsiienko "Can't create pinned data buffers\n"); 125172512e18SViacheslav Ovsiienko 125272512e18SViacheslav Ovsiienko TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n", 125372512e18SViacheslav Ovsiienko rte_mbuf_best_mempool_ops()); 125472512e18SViacheslav Ovsiienko rte_mp = rte_pktmbuf_pool_create_extbuf 125572512e18SViacheslav Ovsiienko (pool_name, nb_mbuf, mb_mempool_cache, 125672512e18SViacheslav Ovsiienko 0, mbuf_seg_size, socket_id, 125772512e18SViacheslav Ovsiienko ext_mem, ext_num); 125872512e18SViacheslav Ovsiienko free(ext_mem); 125972512e18SViacheslav Ovsiienko break; 126072512e18SViacheslav Ovsiienko } 1261c7f5dba7SAnatoly Burakov default: 1262c7f5dba7SAnatoly Burakov { 1263c7f5dba7SAnatoly Burakov rte_exit(EXIT_FAILURE, "Invalid mempool creation mode\n"); 1264c7f5dba7SAnatoly Burakov } 1265bece7b6cSChristian Ehrhardt } 1266148f963fSBruce Richardson 1267761f7ae1SJie Zhou #ifndef RTE_EXEC_ENV_WINDOWS 126824427bb9SOlivier Matz err: 1269761f7ae1SJie Zhou #endif 1270af75078fSIntel if (rte_mp == NULL) { 1271d1eb542eSOlivier Matz rte_exit(EXIT_FAILURE, 1272d1eb542eSOlivier Matz "Creation of mbuf pool for socket %u failed: %s\n", 1273d1eb542eSOlivier Matz socket_id, rte_strerror(rte_errno)); 1274148f963fSBruce Richardson } else if (verbose_level > 0) { 1275591a9d79SStephen Hemminger rte_mempool_dump(stdout, rte_mp); 1276af75078fSIntel } 1277401b744dSShahaf Shuler return rte_mp; 1278af75078fSIntel } 1279af75078fSIntel 128020a0286fSLiu Xiaofeng /* 128120a0286fSLiu Xiaofeng * Check given socket id is valid or not with NUMA mode, 128220a0286fSLiu Xiaofeng * if valid, return 0, else return -1 128320a0286fSLiu Xiaofeng */ 128420a0286fSLiu Xiaofeng static int 128520a0286fSLiu Xiaofeng check_socket_id(const unsigned int socket_id) 128620a0286fSLiu Xiaofeng { 128720a0286fSLiu Xiaofeng static int warning_once = 0; 128820a0286fSLiu Xiaofeng 1289c9cafcc8SShahaf Shuler if (new_socket_id(socket_id)) { 129020a0286fSLiu Xiaofeng if (!warning_once && numa_support) 129161a3b0e5SAndrew Rybchenko fprintf(stderr, 129261a3b0e5SAndrew Rybchenko "Warning: NUMA should be configured manually by using --port-numa-config and --ring-numa-config parameters along with --numa.\n"); 129320a0286fSLiu Xiaofeng warning_once = 1; 129420a0286fSLiu Xiaofeng return -1; 129520a0286fSLiu Xiaofeng } 129620a0286fSLiu Xiaofeng return 0; 129720a0286fSLiu Xiaofeng } 129820a0286fSLiu Xiaofeng 12993f7311baSWei Dai /* 13003f7311baSWei Dai * Get the allowed maximum number of RX queues. 13013f7311baSWei Dai * *pid return the port id which has minimal value of 13023f7311baSWei Dai * max_rx_queues in all ports. 13033f7311baSWei Dai */ 13043f7311baSWei Dai queueid_t 13053f7311baSWei Dai get_allowed_max_nb_rxq(portid_t *pid) 13063f7311baSWei Dai { 13079e6b36c3SDavid Marchand queueid_t allowed_max_rxq = RTE_MAX_QUEUES_PER_PORT; 13086f51deb9SIvan Ilchenko bool max_rxq_valid = false; 13093f7311baSWei Dai portid_t pi; 13103f7311baSWei Dai struct rte_eth_dev_info dev_info; 13113f7311baSWei Dai 13123f7311baSWei Dai RTE_ETH_FOREACH_DEV(pi) { 13136f51deb9SIvan Ilchenko if (eth_dev_info_get_print_err(pi, &dev_info) != 0) 13146f51deb9SIvan Ilchenko continue; 13156f51deb9SIvan Ilchenko 13166f51deb9SIvan Ilchenko max_rxq_valid = true; 13173f7311baSWei Dai if (dev_info.max_rx_queues < allowed_max_rxq) { 13183f7311baSWei Dai allowed_max_rxq = dev_info.max_rx_queues; 13193f7311baSWei Dai *pid = pi; 13203f7311baSWei Dai } 13213f7311baSWei Dai } 13226f51deb9SIvan Ilchenko return max_rxq_valid ? allowed_max_rxq : 0; 13233f7311baSWei Dai } 13243f7311baSWei Dai 13253f7311baSWei Dai /* 13263f7311baSWei Dai * Check input rxq is valid or not. 13273f7311baSWei Dai * If input rxq is not greater than any of maximum number 13283f7311baSWei Dai * of RX queues of all ports, it is valid. 13293f7311baSWei Dai * if valid, return 0, else return -1 13303f7311baSWei Dai */ 13313f7311baSWei Dai int 13323f7311baSWei Dai check_nb_rxq(queueid_t rxq) 13333f7311baSWei Dai { 13343f7311baSWei Dai queueid_t allowed_max_rxq; 13353f7311baSWei Dai portid_t pid = 0; 13363f7311baSWei Dai 13373f7311baSWei Dai allowed_max_rxq = get_allowed_max_nb_rxq(&pid); 13383f7311baSWei Dai if (rxq > allowed_max_rxq) { 133961a3b0e5SAndrew Rybchenko fprintf(stderr, 134061a3b0e5SAndrew Rybchenko "Fail: input rxq (%u) can't be greater than max_rx_queues (%u) of port %u\n", 134161a3b0e5SAndrew Rybchenko rxq, allowed_max_rxq, pid); 13423f7311baSWei Dai return -1; 13433f7311baSWei Dai } 13443f7311baSWei Dai return 0; 13453f7311baSWei Dai } 13463f7311baSWei Dai 134736db4f6cSWei Dai /* 134836db4f6cSWei Dai * Get the allowed maximum number of TX queues. 134936db4f6cSWei Dai * *pid return the port id which has minimal value of 135036db4f6cSWei Dai * max_tx_queues in all ports. 135136db4f6cSWei Dai */ 135236db4f6cSWei Dai queueid_t 135336db4f6cSWei Dai get_allowed_max_nb_txq(portid_t *pid) 135436db4f6cSWei Dai { 13559e6b36c3SDavid Marchand queueid_t allowed_max_txq = RTE_MAX_QUEUES_PER_PORT; 13566f51deb9SIvan Ilchenko bool max_txq_valid = false; 135736db4f6cSWei Dai portid_t pi; 135836db4f6cSWei Dai struct rte_eth_dev_info dev_info; 135936db4f6cSWei Dai 136036db4f6cSWei Dai RTE_ETH_FOREACH_DEV(pi) { 13616f51deb9SIvan Ilchenko if (eth_dev_info_get_print_err(pi, &dev_info) != 0) 13626f51deb9SIvan Ilchenko continue; 13636f51deb9SIvan Ilchenko 13646f51deb9SIvan Ilchenko max_txq_valid = true; 136536db4f6cSWei Dai if (dev_info.max_tx_queues < allowed_max_txq) { 136636db4f6cSWei Dai allowed_max_txq = dev_info.max_tx_queues; 136736db4f6cSWei Dai *pid = pi; 136836db4f6cSWei Dai } 136936db4f6cSWei Dai } 13706f51deb9SIvan Ilchenko return max_txq_valid ? allowed_max_txq : 0; 137136db4f6cSWei Dai } 137236db4f6cSWei Dai 137336db4f6cSWei Dai /* 137436db4f6cSWei Dai * Check input txq is valid or not. 137536db4f6cSWei Dai * If input txq is not greater than any of maximum number 137636db4f6cSWei Dai * of TX queues of all ports, it is valid. 137736db4f6cSWei Dai * if valid, return 0, else return -1 137836db4f6cSWei Dai */ 137936db4f6cSWei Dai int 138036db4f6cSWei Dai check_nb_txq(queueid_t txq) 138136db4f6cSWei Dai { 138236db4f6cSWei Dai queueid_t allowed_max_txq; 138336db4f6cSWei Dai portid_t pid = 0; 138436db4f6cSWei Dai 138536db4f6cSWei Dai allowed_max_txq = get_allowed_max_nb_txq(&pid); 138636db4f6cSWei Dai if (txq > allowed_max_txq) { 138761a3b0e5SAndrew Rybchenko fprintf(stderr, 138861a3b0e5SAndrew Rybchenko "Fail: input txq (%u) can't be greater than max_tx_queues (%u) of port %u\n", 138961a3b0e5SAndrew Rybchenko txq, allowed_max_txq, pid); 139036db4f6cSWei Dai return -1; 139136db4f6cSWei Dai } 139236db4f6cSWei Dai return 0; 139336db4f6cSWei Dai } 139436db4f6cSWei Dai 13951c69df45SOri Kam /* 139699e040d3SLijun Ou * Get the allowed maximum number of RXDs of every rx queue. 139799e040d3SLijun Ou * *pid return the port id which has minimal value of 139899e040d3SLijun Ou * max_rxd in all queues of all ports. 139999e040d3SLijun Ou */ 140099e040d3SLijun Ou static uint16_t 140199e040d3SLijun Ou get_allowed_max_nb_rxd(portid_t *pid) 140299e040d3SLijun Ou { 140399e040d3SLijun Ou uint16_t allowed_max_rxd = UINT16_MAX; 140499e040d3SLijun Ou portid_t pi; 140599e040d3SLijun Ou struct rte_eth_dev_info dev_info; 140699e040d3SLijun Ou 140799e040d3SLijun Ou RTE_ETH_FOREACH_DEV(pi) { 140899e040d3SLijun Ou if (eth_dev_info_get_print_err(pi, &dev_info) != 0) 140999e040d3SLijun Ou continue; 141099e040d3SLijun Ou 141199e040d3SLijun Ou if (dev_info.rx_desc_lim.nb_max < allowed_max_rxd) { 141299e040d3SLijun Ou allowed_max_rxd = dev_info.rx_desc_lim.nb_max; 141399e040d3SLijun Ou *pid = pi; 141499e040d3SLijun Ou } 141599e040d3SLijun Ou } 141699e040d3SLijun Ou return allowed_max_rxd; 141799e040d3SLijun Ou } 141899e040d3SLijun Ou 141999e040d3SLijun Ou /* 142099e040d3SLijun Ou * Get the allowed minimal number of RXDs of every rx queue. 142199e040d3SLijun Ou * *pid return the port id which has minimal value of 142299e040d3SLijun Ou * min_rxd in all queues of all ports. 142399e040d3SLijun Ou */ 142499e040d3SLijun Ou static uint16_t 142599e040d3SLijun Ou get_allowed_min_nb_rxd(portid_t *pid) 142699e040d3SLijun Ou { 142799e040d3SLijun Ou uint16_t allowed_min_rxd = 0; 142899e040d3SLijun Ou portid_t pi; 142999e040d3SLijun Ou struct rte_eth_dev_info dev_info; 143099e040d3SLijun Ou 143199e040d3SLijun Ou RTE_ETH_FOREACH_DEV(pi) { 143299e040d3SLijun Ou if (eth_dev_info_get_print_err(pi, &dev_info) != 0) 143399e040d3SLijun Ou continue; 143499e040d3SLijun Ou 143599e040d3SLijun Ou if (dev_info.rx_desc_lim.nb_min > allowed_min_rxd) { 143699e040d3SLijun Ou allowed_min_rxd = dev_info.rx_desc_lim.nb_min; 143799e040d3SLijun Ou *pid = pi; 143899e040d3SLijun Ou } 143999e040d3SLijun Ou } 144099e040d3SLijun Ou 144199e040d3SLijun Ou return allowed_min_rxd; 144299e040d3SLijun Ou } 144399e040d3SLijun Ou 144499e040d3SLijun Ou /* 144599e040d3SLijun Ou * Check input rxd is valid or not. 144699e040d3SLijun Ou * If input rxd is not greater than any of maximum number 144799e040d3SLijun Ou * of RXDs of every Rx queues and is not less than any of 144899e040d3SLijun Ou * minimal number of RXDs of every Rx queues, it is valid. 144999e040d3SLijun Ou * if valid, return 0, else return -1 145099e040d3SLijun Ou */ 145199e040d3SLijun Ou int 145299e040d3SLijun Ou check_nb_rxd(queueid_t rxd) 145399e040d3SLijun Ou { 145499e040d3SLijun Ou uint16_t allowed_max_rxd; 145599e040d3SLijun Ou uint16_t allowed_min_rxd; 145699e040d3SLijun Ou portid_t pid = 0; 145799e040d3SLijun Ou 145899e040d3SLijun Ou allowed_max_rxd = get_allowed_max_nb_rxd(&pid); 145999e040d3SLijun Ou if (rxd > allowed_max_rxd) { 146061a3b0e5SAndrew Rybchenko fprintf(stderr, 146161a3b0e5SAndrew Rybchenko "Fail: input rxd (%u) can't be greater than max_rxds (%u) of port %u\n", 146261a3b0e5SAndrew Rybchenko rxd, allowed_max_rxd, pid); 146399e040d3SLijun Ou return -1; 146499e040d3SLijun Ou } 146599e040d3SLijun Ou 146699e040d3SLijun Ou allowed_min_rxd = get_allowed_min_nb_rxd(&pid); 146799e040d3SLijun Ou if (rxd < allowed_min_rxd) { 146861a3b0e5SAndrew Rybchenko fprintf(stderr, 146961a3b0e5SAndrew Rybchenko "Fail: input rxd (%u) can't be less than min_rxds (%u) of port %u\n", 147061a3b0e5SAndrew Rybchenko rxd, allowed_min_rxd, pid); 147199e040d3SLijun Ou return -1; 147299e040d3SLijun Ou } 147399e040d3SLijun Ou 147499e040d3SLijun Ou return 0; 147599e040d3SLijun Ou } 147699e040d3SLijun Ou 147799e040d3SLijun Ou /* 147899e040d3SLijun Ou * Get the allowed maximum number of TXDs of every rx queues. 147999e040d3SLijun Ou * *pid return the port id which has minimal value of 148099e040d3SLijun Ou * max_txd in every tx queue. 148199e040d3SLijun Ou */ 148299e040d3SLijun Ou static uint16_t 148399e040d3SLijun Ou get_allowed_max_nb_txd(portid_t *pid) 148499e040d3SLijun Ou { 148599e040d3SLijun Ou uint16_t allowed_max_txd = UINT16_MAX; 148699e040d3SLijun Ou portid_t pi; 148799e040d3SLijun Ou struct rte_eth_dev_info dev_info; 148899e040d3SLijun Ou 148999e040d3SLijun Ou RTE_ETH_FOREACH_DEV(pi) { 149099e040d3SLijun Ou if (eth_dev_info_get_print_err(pi, &dev_info) != 0) 149199e040d3SLijun Ou continue; 149299e040d3SLijun Ou 149399e040d3SLijun Ou if (dev_info.tx_desc_lim.nb_max < allowed_max_txd) { 149499e040d3SLijun Ou allowed_max_txd = dev_info.tx_desc_lim.nb_max; 149599e040d3SLijun Ou *pid = pi; 149699e040d3SLijun Ou } 149799e040d3SLijun Ou } 149899e040d3SLijun Ou return allowed_max_txd; 149999e040d3SLijun Ou } 150099e040d3SLijun Ou 150199e040d3SLijun Ou /* 150299e040d3SLijun Ou * Get the allowed maximum number of TXDs of every tx queues. 150399e040d3SLijun Ou * *pid return the port id which has minimal value of 150499e040d3SLijun Ou * min_txd in every tx queue. 150599e040d3SLijun Ou */ 150699e040d3SLijun Ou static uint16_t 150799e040d3SLijun Ou get_allowed_min_nb_txd(portid_t *pid) 150899e040d3SLijun Ou { 150999e040d3SLijun Ou uint16_t allowed_min_txd = 0; 151099e040d3SLijun Ou portid_t pi; 151199e040d3SLijun Ou struct rte_eth_dev_info dev_info; 151299e040d3SLijun Ou 151399e040d3SLijun Ou RTE_ETH_FOREACH_DEV(pi) { 151499e040d3SLijun Ou if (eth_dev_info_get_print_err(pi, &dev_info) != 0) 151599e040d3SLijun Ou continue; 151699e040d3SLijun Ou 151799e040d3SLijun Ou if (dev_info.tx_desc_lim.nb_min > allowed_min_txd) { 151899e040d3SLijun Ou allowed_min_txd = dev_info.tx_desc_lim.nb_min; 151999e040d3SLijun Ou *pid = pi; 152099e040d3SLijun Ou } 152199e040d3SLijun Ou } 152299e040d3SLijun Ou 152399e040d3SLijun Ou return allowed_min_txd; 152499e040d3SLijun Ou } 152599e040d3SLijun Ou 152699e040d3SLijun Ou /* 152799e040d3SLijun Ou * Check input txd is valid or not. 152899e040d3SLijun Ou * If input txd is not greater than any of maximum number 152999e040d3SLijun Ou * of TXDs of every Rx queues, it is valid. 153099e040d3SLijun Ou * if valid, return 0, else return -1 153199e040d3SLijun Ou */ 153299e040d3SLijun Ou int 153399e040d3SLijun Ou check_nb_txd(queueid_t txd) 153499e040d3SLijun Ou { 153599e040d3SLijun Ou uint16_t allowed_max_txd; 153699e040d3SLijun Ou uint16_t allowed_min_txd; 153799e040d3SLijun Ou portid_t pid = 0; 153899e040d3SLijun Ou 153999e040d3SLijun Ou allowed_max_txd = get_allowed_max_nb_txd(&pid); 154099e040d3SLijun Ou if (txd > allowed_max_txd) { 154161a3b0e5SAndrew Rybchenko fprintf(stderr, 154261a3b0e5SAndrew Rybchenko "Fail: input txd (%u) can't be greater than max_txds (%u) of port %u\n", 154361a3b0e5SAndrew Rybchenko txd, allowed_max_txd, pid); 154499e040d3SLijun Ou return -1; 154599e040d3SLijun Ou } 154699e040d3SLijun Ou 154799e040d3SLijun Ou allowed_min_txd = get_allowed_min_nb_txd(&pid); 154899e040d3SLijun Ou if (txd < allowed_min_txd) { 154961a3b0e5SAndrew Rybchenko fprintf(stderr, 155061a3b0e5SAndrew Rybchenko "Fail: input txd (%u) can't be less than min_txds (%u) of port %u\n", 155161a3b0e5SAndrew Rybchenko txd, allowed_min_txd, pid); 155299e040d3SLijun Ou return -1; 155399e040d3SLijun Ou } 155499e040d3SLijun Ou return 0; 155599e040d3SLijun Ou } 155699e040d3SLijun Ou 155799e040d3SLijun Ou 155899e040d3SLijun Ou /* 15591c69df45SOri Kam * Get the allowed maximum number of hairpin queues. 15601c69df45SOri Kam * *pid return the port id which has minimal value of 15611c69df45SOri Kam * max_hairpin_queues in all ports. 15621c69df45SOri Kam */ 15631c69df45SOri Kam queueid_t 15641c69df45SOri Kam get_allowed_max_nb_hairpinq(portid_t *pid) 15651c69df45SOri Kam { 15669e6b36c3SDavid Marchand queueid_t allowed_max_hairpinq = RTE_MAX_QUEUES_PER_PORT; 15671c69df45SOri Kam portid_t pi; 15681c69df45SOri Kam struct rte_eth_hairpin_cap cap; 15691c69df45SOri Kam 15701c69df45SOri Kam RTE_ETH_FOREACH_DEV(pi) { 15711c69df45SOri Kam if (rte_eth_dev_hairpin_capability_get(pi, &cap) != 0) { 15721c69df45SOri Kam *pid = pi; 15731c69df45SOri Kam return 0; 15741c69df45SOri Kam } 15751c69df45SOri Kam if (cap.max_nb_queues < allowed_max_hairpinq) { 15761c69df45SOri Kam allowed_max_hairpinq = cap.max_nb_queues; 15771c69df45SOri Kam *pid = pi; 15781c69df45SOri Kam } 15791c69df45SOri Kam } 15801c69df45SOri Kam return allowed_max_hairpinq; 15811c69df45SOri Kam } 15821c69df45SOri Kam 15831c69df45SOri Kam /* 15841c69df45SOri Kam * Check input hairpin is valid or not. 15851c69df45SOri Kam * If input hairpin is not greater than any of maximum number 15861c69df45SOri Kam * of hairpin queues of all ports, it is valid. 15871c69df45SOri Kam * if valid, return 0, else return -1 15881c69df45SOri Kam */ 15891c69df45SOri Kam int 15901c69df45SOri Kam check_nb_hairpinq(queueid_t hairpinq) 15911c69df45SOri Kam { 15921c69df45SOri Kam queueid_t allowed_max_hairpinq; 15931c69df45SOri Kam portid_t pid = 0; 15941c69df45SOri Kam 15951c69df45SOri Kam allowed_max_hairpinq = get_allowed_max_nb_hairpinq(&pid); 15961c69df45SOri Kam if (hairpinq > allowed_max_hairpinq) { 159761a3b0e5SAndrew Rybchenko fprintf(stderr, 159861a3b0e5SAndrew Rybchenko "Fail: input hairpin (%u) can't be greater than max_hairpin_queues (%u) of port %u\n", 15991c69df45SOri Kam hairpinq, allowed_max_hairpinq, pid); 16001c69df45SOri Kam return -1; 16011c69df45SOri Kam } 16021c69df45SOri Kam return 0; 16031c69df45SOri Kam } 16041c69df45SOri Kam 16051bb4a528SFerruh Yigit static int 16061bb4a528SFerruh Yigit get_eth_overhead(struct rte_eth_dev_info *dev_info) 16071bb4a528SFerruh Yigit { 16081bb4a528SFerruh Yigit uint32_t eth_overhead; 16091bb4a528SFerruh Yigit 16101bb4a528SFerruh Yigit if (dev_info->max_mtu != UINT16_MAX && 16111bb4a528SFerruh Yigit dev_info->max_rx_pktlen > dev_info->max_mtu) 16121bb4a528SFerruh Yigit eth_overhead = dev_info->max_rx_pktlen - dev_info->max_mtu; 16131bb4a528SFerruh Yigit else 16141bb4a528SFerruh Yigit eth_overhead = RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN; 16151bb4a528SFerruh Yigit 16161bb4a528SFerruh Yigit return eth_overhead; 16171bb4a528SFerruh Yigit } 16181bb4a528SFerruh Yigit 1619af75078fSIntel static void 1620b6b8a1ebSViacheslav Ovsiienko init_config_port_offloads(portid_t pid, uint32_t socket_id) 1621b6b8a1ebSViacheslav Ovsiienko { 1622b6b8a1ebSViacheslav Ovsiienko struct rte_port *port = &ports[pid]; 1623b6b8a1ebSViacheslav Ovsiienko int ret; 1624b6b8a1ebSViacheslav Ovsiienko int i; 1625b6b8a1ebSViacheslav Ovsiienko 1626f6d8a6d3SIvan Malov eth_rx_metadata_negotiate_mp(pid); 1627f6d8a6d3SIvan Malov 1628b6b8a1ebSViacheslav Ovsiienko port->dev_conf.txmode = tx_mode; 1629b6b8a1ebSViacheslav Ovsiienko port->dev_conf.rxmode = rx_mode; 1630b6b8a1ebSViacheslav Ovsiienko 1631b6b8a1ebSViacheslav Ovsiienko ret = eth_dev_info_get_print_err(pid, &port->dev_info); 1632b6b8a1ebSViacheslav Ovsiienko if (ret != 0) 1633b6b8a1ebSViacheslav Ovsiienko rte_exit(EXIT_FAILURE, "rte_eth_dev_info_get() failed\n"); 1634b6b8a1ebSViacheslav Ovsiienko 1635295968d1SFerruh Yigit if (!(port->dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)) 1636b6b8a1ebSViacheslav Ovsiienko port->dev_conf.txmode.offloads &= 1637295968d1SFerruh Yigit ~RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE; 1638b6b8a1ebSViacheslav Ovsiienko 1639b6b8a1ebSViacheslav Ovsiienko /* Apply Rx offloads configuration */ 1640b6b8a1ebSViacheslav Ovsiienko for (i = 0; i < port->dev_info.max_rx_queues; i++) 16413c4426dbSDmitry Kozlyuk port->rxq[i].conf.offloads = port->dev_conf.rxmode.offloads; 1642b6b8a1ebSViacheslav Ovsiienko /* Apply Tx offloads configuration */ 1643b6b8a1ebSViacheslav Ovsiienko for (i = 0; i < port->dev_info.max_tx_queues; i++) 16443c4426dbSDmitry Kozlyuk port->txq[i].conf.offloads = port->dev_conf.txmode.offloads; 1645b6b8a1ebSViacheslav Ovsiienko 1646b6b8a1ebSViacheslav Ovsiienko if (eth_link_speed) 1647b6b8a1ebSViacheslav Ovsiienko port->dev_conf.link_speeds = eth_link_speed; 1648b6b8a1ebSViacheslav Ovsiienko 16491bb4a528SFerruh Yigit if (max_rx_pkt_len) 16501bb4a528SFerruh Yigit port->dev_conf.rxmode.mtu = max_rx_pkt_len - 16511bb4a528SFerruh Yigit get_eth_overhead(&port->dev_info); 16521bb4a528SFerruh Yigit 1653b6b8a1ebSViacheslav Ovsiienko /* set flag to initialize port/queue */ 1654b6b8a1ebSViacheslav Ovsiienko port->need_reconfig = 1; 1655b6b8a1ebSViacheslav Ovsiienko port->need_reconfig_queues = 1; 1656b6b8a1ebSViacheslav Ovsiienko port->socket_id = socket_id; 1657b6b8a1ebSViacheslav Ovsiienko port->tx_metadata = 0; 1658b6b8a1ebSViacheslav Ovsiienko 1659b6b8a1ebSViacheslav Ovsiienko /* 1660b6b8a1ebSViacheslav Ovsiienko * Check for maximum number of segments per MTU. 1661b6b8a1ebSViacheslav Ovsiienko * Accordingly update the mbuf data size. 1662b6b8a1ebSViacheslav Ovsiienko */ 1663b6b8a1ebSViacheslav Ovsiienko if (port->dev_info.rx_desc_lim.nb_mtu_seg_max != UINT16_MAX && 1664b6b8a1ebSViacheslav Ovsiienko port->dev_info.rx_desc_lim.nb_mtu_seg_max != 0) { 16651bb4a528SFerruh Yigit uint32_t eth_overhead = get_eth_overhead(&port->dev_info); 16661bb4a528SFerruh Yigit uint16_t mtu; 1667b6b8a1ebSViacheslav Ovsiienko 16681bb4a528SFerruh Yigit if (rte_eth_dev_get_mtu(pid, &mtu) == 0) { 16691bb4a528SFerruh Yigit uint16_t data_size = (mtu + eth_overhead) / 16701bb4a528SFerruh Yigit port->dev_info.rx_desc_lim.nb_mtu_seg_max; 16711bb4a528SFerruh Yigit uint16_t buffer_size = data_size + RTE_PKTMBUF_HEADROOM; 16721bb4a528SFerruh Yigit 16731bb4a528SFerruh Yigit if (buffer_size > mbuf_data_size[0]) { 16741bb4a528SFerruh Yigit mbuf_data_size[0] = buffer_size; 1675b6b8a1ebSViacheslav Ovsiienko TESTPMD_LOG(WARNING, 1676b6b8a1ebSViacheslav Ovsiienko "Configured mbuf size of the first segment %hu\n", 1677b6b8a1ebSViacheslav Ovsiienko mbuf_data_size[0]); 1678b6b8a1ebSViacheslav Ovsiienko } 1679b6b8a1ebSViacheslav Ovsiienko } 1680b6b8a1ebSViacheslav Ovsiienko } 16811bb4a528SFerruh Yigit } 1682b6b8a1ebSViacheslav Ovsiienko 1683b6b8a1ebSViacheslav Ovsiienko static void 1684af75078fSIntel init_config(void) 1685af75078fSIntel { 1686ce8d5614SIntel portid_t pid; 1687af75078fSIntel struct rte_mempool *mbp; 1688af75078fSIntel unsigned int nb_mbuf_per_pool; 1689af75078fSIntel lcoreid_t lc_id; 16906970401eSDavid Marchand #ifdef RTE_LIB_GRO 1691b7091f1dSJiayu Hu struct rte_gro_param gro_param; 16926970401eSDavid Marchand #endif 16936970401eSDavid Marchand #ifdef RTE_LIB_GSO 169452f38a20SJiayu Hu uint32_t gso_types; 16956970401eSDavid Marchand #endif 1696487f9a59SYulong Pei 1697af75078fSIntel /* Configuration of logical cores. */ 1698af75078fSIntel fwd_lcores = rte_zmalloc("testpmd: fwd_lcores", 1699af75078fSIntel sizeof(struct fwd_lcore *) * nb_lcores, 1700fdf20fa7SSergio Gonzalez Monroy RTE_CACHE_LINE_SIZE); 1701af75078fSIntel if (fwd_lcores == NULL) { 1702ce8d5614SIntel rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) " 1703ce8d5614SIntel "failed\n", nb_lcores); 1704af75078fSIntel } 1705af75078fSIntel for (lc_id = 0; lc_id < nb_lcores; lc_id++) { 1706af75078fSIntel fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore", 1707af75078fSIntel sizeof(struct fwd_lcore), 1708fdf20fa7SSergio Gonzalez Monroy RTE_CACHE_LINE_SIZE); 1709af75078fSIntel if (fwd_lcores[lc_id] == NULL) { 1710ce8d5614SIntel rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) " 1711ce8d5614SIntel "failed\n"); 1712af75078fSIntel } 1713af75078fSIntel fwd_lcores[lc_id]->cpuid_idx = lc_id; 1714af75078fSIntel } 1715af75078fSIntel 17167d89b261SGaetan Rivet RTE_ETH_FOREACH_DEV(pid) { 1717b6b8a1ebSViacheslav Ovsiienko uint32_t socket_id; 17186f51deb9SIvan Ilchenko 1719b6ea6408SIntel if (numa_support) { 1720b6b8a1ebSViacheslav Ovsiienko socket_id = port_numa[pid]; 1721b6b8a1ebSViacheslav Ovsiienko if (port_numa[pid] == NUMA_NO_CONFIG) { 1722b6b8a1ebSViacheslav Ovsiienko socket_id = rte_eth_dev_socket_id(pid); 172320a0286fSLiu Xiaofeng 172429841336SPhil Yang /* 172529841336SPhil Yang * if socket_id is invalid, 172629841336SPhil Yang * set to the first available socket. 172729841336SPhil Yang */ 172820a0286fSLiu Xiaofeng if (check_socket_id(socket_id) < 0) 172929841336SPhil Yang socket_id = socket_ids[0]; 1730b6ea6408SIntel } 1731b6b8a1ebSViacheslav Ovsiienko } else { 1732b6b8a1ebSViacheslav Ovsiienko socket_id = (socket_num == UMA_NO_CONFIG) ? 1733b6b8a1ebSViacheslav Ovsiienko 0 : socket_num; 1734b6ea6408SIntel } 1735b6b8a1ebSViacheslav Ovsiienko /* Apply default TxRx configuration for all ports */ 1736b6b8a1ebSViacheslav Ovsiienko init_config_port_offloads(pid, socket_id); 1737ce8d5614SIntel } 17383ab64341SOlivier Matz /* 17393ab64341SOlivier Matz * Create pools of mbuf. 17403ab64341SOlivier Matz * If NUMA support is disabled, create a single pool of mbuf in 17413ab64341SOlivier Matz * socket 0 memory by default. 17423ab64341SOlivier Matz * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1. 17433ab64341SOlivier Matz * 17443ab64341SOlivier Matz * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and 17453ab64341SOlivier Matz * nb_txd can be configured at run time. 17463ab64341SOlivier Matz */ 17473ab64341SOlivier Matz if (param_total_num_mbufs) 17483ab64341SOlivier Matz nb_mbuf_per_pool = param_total_num_mbufs; 17493ab64341SOlivier Matz else { 17503ab64341SOlivier Matz nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX + 17513ab64341SOlivier Matz (nb_lcores * mb_mempool_cache) + 17523ab64341SOlivier Matz RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST; 17533ab64341SOlivier Matz nb_mbuf_per_pool *= RTE_MAX_ETHPORTS; 17543ab64341SOlivier Matz } 17553ab64341SOlivier Matz 1756b6ea6408SIntel if (numa_support) { 175726cbb419SViacheslav Ovsiienko uint8_t i, j; 1758ce8d5614SIntel 1759c9cafcc8SShahaf Shuler for (i = 0; i < num_sockets; i++) 176026cbb419SViacheslav Ovsiienko for (j = 0; j < mbuf_data_size_n; j++) 176126cbb419SViacheslav Ovsiienko mempools[i * MAX_SEGS_BUFFER_SPLIT + j] = 176226cbb419SViacheslav Ovsiienko mbuf_pool_create(mbuf_data_size[j], 1763401b744dSShahaf Shuler nb_mbuf_per_pool, 176426cbb419SViacheslav Ovsiienko socket_ids[i], j); 17653ab64341SOlivier Matz } else { 176626cbb419SViacheslav Ovsiienko uint8_t i; 176726cbb419SViacheslav Ovsiienko 176826cbb419SViacheslav Ovsiienko for (i = 0; i < mbuf_data_size_n; i++) 176926cbb419SViacheslav Ovsiienko mempools[i] = mbuf_pool_create 177026cbb419SViacheslav Ovsiienko (mbuf_data_size[i], 1771401b744dSShahaf Shuler nb_mbuf_per_pool, 177226cbb419SViacheslav Ovsiienko socket_num == UMA_NO_CONFIG ? 177326cbb419SViacheslav Ovsiienko 0 : socket_num, i); 17743ab64341SOlivier Matz } 1775b6ea6408SIntel 1776b6ea6408SIntel init_port_config(); 17775886ae07SAdrien Mazarguil 17786970401eSDavid Marchand #ifdef RTE_LIB_GSO 1779295968d1SFerruh Yigit gso_types = RTE_ETH_TX_OFFLOAD_TCP_TSO | RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO | 1780295968d1SFerruh Yigit RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO | RTE_ETH_TX_OFFLOAD_UDP_TSO; 17816970401eSDavid Marchand #endif 17825886ae07SAdrien Mazarguil /* 17835886ae07SAdrien Mazarguil * Records which Mbuf pool to use by each logical core, if needed. 17845886ae07SAdrien Mazarguil */ 17855886ae07SAdrien Mazarguil for (lc_id = 0; lc_id < nb_lcores; lc_id++) { 17868fd8bebcSAdrien Mazarguil mbp = mbuf_pool_find( 178726cbb419SViacheslav Ovsiienko rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]), 0); 17888fd8bebcSAdrien Mazarguil 17895886ae07SAdrien Mazarguil if (mbp == NULL) 179026cbb419SViacheslav Ovsiienko mbp = mbuf_pool_find(0, 0); 17915886ae07SAdrien Mazarguil fwd_lcores[lc_id]->mbp = mbp; 17926970401eSDavid Marchand #ifdef RTE_LIB_GSO 179352f38a20SJiayu Hu /* initialize GSO context */ 179452f38a20SJiayu Hu fwd_lcores[lc_id]->gso_ctx.direct_pool = mbp; 179552f38a20SJiayu Hu fwd_lcores[lc_id]->gso_ctx.indirect_pool = mbp; 179652f38a20SJiayu Hu fwd_lcores[lc_id]->gso_ctx.gso_types = gso_types; 179735b2d13fSOlivier Matz fwd_lcores[lc_id]->gso_ctx.gso_size = RTE_ETHER_MAX_LEN - 179835b2d13fSOlivier Matz RTE_ETHER_CRC_LEN; 179952f38a20SJiayu Hu fwd_lcores[lc_id]->gso_ctx.flag = 0; 18006970401eSDavid Marchand #endif 18015886ae07SAdrien Mazarguil } 18025886ae07SAdrien Mazarguil 18030c0db76fSBernard Iremonger fwd_config_setup(); 1804b7091f1dSJiayu Hu 18056970401eSDavid Marchand #ifdef RTE_LIB_GRO 1806b7091f1dSJiayu Hu /* create a gro context for each lcore */ 1807b7091f1dSJiayu Hu gro_param.gro_types = RTE_GRO_TCP_IPV4; 1808b7091f1dSJiayu Hu gro_param.max_flow_num = GRO_MAX_FLUSH_CYCLES; 1809b7091f1dSJiayu Hu gro_param.max_item_per_flow = MAX_PKT_BURST; 1810b7091f1dSJiayu Hu for (lc_id = 0; lc_id < nb_lcores; lc_id++) { 1811b7091f1dSJiayu Hu gro_param.socket_id = rte_lcore_to_socket_id( 1812b7091f1dSJiayu Hu fwd_lcores_cpuids[lc_id]); 1813b7091f1dSJiayu Hu fwd_lcores[lc_id]->gro_ctx = rte_gro_ctx_create(&gro_param); 1814b7091f1dSJiayu Hu if (fwd_lcores[lc_id]->gro_ctx == NULL) { 1815b7091f1dSJiayu Hu rte_exit(EXIT_FAILURE, 1816b7091f1dSJiayu Hu "rte_gro_ctx_create() failed\n"); 1817b7091f1dSJiayu Hu } 1818b7091f1dSJiayu Hu } 18196970401eSDavid Marchand #endif 1820ce8d5614SIntel } 1821ce8d5614SIntel 18222950a769SDeclan Doherty 18232950a769SDeclan Doherty void 1824a21d5a4bSDeclan Doherty reconfig(portid_t new_port_id, unsigned socket_id) 18252950a769SDeclan Doherty { 18262950a769SDeclan Doherty /* Reconfiguration of Ethernet ports. */ 1827b6b8a1ebSViacheslav Ovsiienko init_config_port_offloads(new_port_id, socket_id); 18282950a769SDeclan Doherty init_port_config(); 18292950a769SDeclan Doherty } 18302950a769SDeclan Doherty 1831ce8d5614SIntel int 1832ce8d5614SIntel init_fwd_streams(void) 1833ce8d5614SIntel { 1834ce8d5614SIntel portid_t pid; 1835ce8d5614SIntel struct rte_port *port; 1836ce8d5614SIntel streamid_t sm_id, nb_fwd_streams_new; 18375a8fb55cSReshma Pattan queueid_t q; 1838ce8d5614SIntel 1839ce8d5614SIntel /* set socket id according to numa or not */ 18407d89b261SGaetan Rivet RTE_ETH_FOREACH_DEV(pid) { 1841ce8d5614SIntel port = &ports[pid]; 1842ce8d5614SIntel if (nb_rxq > port->dev_info.max_rx_queues) { 184361a3b0e5SAndrew Rybchenko fprintf(stderr, 184461a3b0e5SAndrew Rybchenko "Fail: nb_rxq(%d) is greater than max_rx_queues(%d)\n", 184561a3b0e5SAndrew Rybchenko nb_rxq, port->dev_info.max_rx_queues); 1846ce8d5614SIntel return -1; 1847ce8d5614SIntel } 1848ce8d5614SIntel if (nb_txq > port->dev_info.max_tx_queues) { 184961a3b0e5SAndrew Rybchenko fprintf(stderr, 185061a3b0e5SAndrew Rybchenko "Fail: nb_txq(%d) is greater than max_tx_queues(%d)\n", 185161a3b0e5SAndrew Rybchenko nb_txq, port->dev_info.max_tx_queues); 1852ce8d5614SIntel return -1; 1853ce8d5614SIntel } 185420a0286fSLiu Xiaofeng if (numa_support) { 185520a0286fSLiu Xiaofeng if (port_numa[pid] != NUMA_NO_CONFIG) 185620a0286fSLiu Xiaofeng port->socket_id = port_numa[pid]; 185720a0286fSLiu Xiaofeng else { 1858b6ea6408SIntel port->socket_id = rte_eth_dev_socket_id(pid); 185920a0286fSLiu Xiaofeng 186029841336SPhil Yang /* 186129841336SPhil Yang * if socket_id is invalid, 186229841336SPhil Yang * set to the first available socket. 186329841336SPhil Yang */ 186420a0286fSLiu Xiaofeng if (check_socket_id(port->socket_id) < 0) 186529841336SPhil Yang port->socket_id = socket_ids[0]; 186620a0286fSLiu Xiaofeng } 186720a0286fSLiu Xiaofeng } 1868b6ea6408SIntel else { 1869b6ea6408SIntel if (socket_num == UMA_NO_CONFIG) 1870af75078fSIntel port->socket_id = 0; 1871b6ea6408SIntel else 1872b6ea6408SIntel port->socket_id = socket_num; 1873b6ea6408SIntel } 1874af75078fSIntel } 1875af75078fSIntel 18765a8fb55cSReshma Pattan q = RTE_MAX(nb_rxq, nb_txq); 18775a8fb55cSReshma Pattan if (q == 0) { 187861a3b0e5SAndrew Rybchenko fprintf(stderr, 187961a3b0e5SAndrew Rybchenko "Fail: Cannot allocate fwd streams as number of queues is 0\n"); 18805a8fb55cSReshma Pattan return -1; 18815a8fb55cSReshma Pattan } 18825a8fb55cSReshma Pattan nb_fwd_streams_new = (streamid_t)(nb_ports * q); 1883ce8d5614SIntel if (nb_fwd_streams_new == nb_fwd_streams) 1884ce8d5614SIntel return 0; 1885ce8d5614SIntel /* clear the old */ 1886ce8d5614SIntel if (fwd_streams != NULL) { 1887ce8d5614SIntel for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) { 1888ce8d5614SIntel if (fwd_streams[sm_id] == NULL) 1889ce8d5614SIntel continue; 1890ce8d5614SIntel rte_free(fwd_streams[sm_id]); 1891ce8d5614SIntel fwd_streams[sm_id] = NULL; 1892af75078fSIntel } 1893ce8d5614SIntel rte_free(fwd_streams); 1894ce8d5614SIntel fwd_streams = NULL; 1895ce8d5614SIntel } 1896ce8d5614SIntel 1897ce8d5614SIntel /* init new */ 1898ce8d5614SIntel nb_fwd_streams = nb_fwd_streams_new; 18991f84c469SMatan Azrad if (nb_fwd_streams) { 1900ce8d5614SIntel fwd_streams = rte_zmalloc("testpmd: fwd_streams", 19011f84c469SMatan Azrad sizeof(struct fwd_stream *) * nb_fwd_streams, 19021f84c469SMatan Azrad RTE_CACHE_LINE_SIZE); 1903ce8d5614SIntel if (fwd_streams == NULL) 19041f84c469SMatan Azrad rte_exit(EXIT_FAILURE, "rte_zmalloc(%d" 19051f84c469SMatan Azrad " (struct fwd_stream *)) failed\n", 19061f84c469SMatan Azrad nb_fwd_streams); 1907ce8d5614SIntel 1908af75078fSIntel for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) { 19091f84c469SMatan Azrad fwd_streams[sm_id] = rte_zmalloc("testpmd:" 19101f84c469SMatan Azrad " struct fwd_stream", sizeof(struct fwd_stream), 19111f84c469SMatan Azrad RTE_CACHE_LINE_SIZE); 1912ce8d5614SIntel if (fwd_streams[sm_id] == NULL) 19131f84c469SMatan Azrad rte_exit(EXIT_FAILURE, "rte_zmalloc" 19141f84c469SMatan Azrad "(struct fwd_stream) failed\n"); 19151f84c469SMatan Azrad } 1916af75078fSIntel } 1917ce8d5614SIntel 1918ce8d5614SIntel return 0; 1919af75078fSIntel } 1920af75078fSIntel 1921af75078fSIntel static void 1922af75078fSIntel pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs) 1923af75078fSIntel { 19247569b8c1SHonnappa Nagarahalli uint64_t total_burst, sburst; 192585de481aSHonnappa Nagarahalli uint64_t nb_burst; 19267569b8c1SHonnappa Nagarahalli uint64_t burst_stats[4]; 19277569b8c1SHonnappa Nagarahalli uint16_t pktnb_stats[4]; 1928af75078fSIntel uint16_t nb_pkt; 19297569b8c1SHonnappa Nagarahalli int burst_percent[4], sburstp; 19307569b8c1SHonnappa Nagarahalli int i; 1931af75078fSIntel 1932af75078fSIntel /* 1933af75078fSIntel * First compute the total number of packet bursts and the 1934af75078fSIntel * two highest numbers of bursts of the same number of packets. 1935af75078fSIntel */ 19367569b8c1SHonnappa Nagarahalli memset(&burst_stats, 0x0, sizeof(burst_stats)); 19377569b8c1SHonnappa Nagarahalli memset(&pktnb_stats, 0x0, sizeof(pktnb_stats)); 19387569b8c1SHonnappa Nagarahalli 19397569b8c1SHonnappa Nagarahalli /* Show stats for 0 burst size always */ 19407569b8c1SHonnappa Nagarahalli total_burst = pbs->pkt_burst_spread[0]; 19417569b8c1SHonnappa Nagarahalli burst_stats[0] = pbs->pkt_burst_spread[0]; 19427569b8c1SHonnappa Nagarahalli pktnb_stats[0] = 0; 19437569b8c1SHonnappa Nagarahalli 19447569b8c1SHonnappa Nagarahalli /* Find the next 2 burst sizes with highest occurrences. */ 19456a8b64fdSEli Britstein for (nb_pkt = 1; nb_pkt < MAX_PKT_BURST + 1; nb_pkt++) { 1946af75078fSIntel nb_burst = pbs->pkt_burst_spread[nb_pkt]; 19477569b8c1SHonnappa Nagarahalli 1948af75078fSIntel if (nb_burst == 0) 1949af75078fSIntel continue; 19507569b8c1SHonnappa Nagarahalli 1951af75078fSIntel total_burst += nb_burst; 19527569b8c1SHonnappa Nagarahalli 19537569b8c1SHonnappa Nagarahalli if (nb_burst > burst_stats[1]) { 19547569b8c1SHonnappa Nagarahalli burst_stats[2] = burst_stats[1]; 19557569b8c1SHonnappa Nagarahalli pktnb_stats[2] = pktnb_stats[1]; 1956fe613657SDaniel Shelepov burst_stats[1] = nb_burst; 1957fe613657SDaniel Shelepov pktnb_stats[1] = nb_pkt; 19587569b8c1SHonnappa Nagarahalli } else if (nb_burst > burst_stats[2]) { 19597569b8c1SHonnappa Nagarahalli burst_stats[2] = nb_burst; 19607569b8c1SHonnappa Nagarahalli pktnb_stats[2] = nb_pkt; 1961af75078fSIntel } 1962af75078fSIntel } 1963af75078fSIntel if (total_burst == 0) 1964af75078fSIntel return; 19657569b8c1SHonnappa Nagarahalli 19667569b8c1SHonnappa Nagarahalli printf(" %s-bursts : %"PRIu64" [", rx_tx, total_burst); 19677569b8c1SHonnappa Nagarahalli for (i = 0, sburst = 0, sburstp = 0; i < 4; i++) { 19687569b8c1SHonnappa Nagarahalli if (i == 3) { 19697569b8c1SHonnappa Nagarahalli printf("%d%% of other]\n", 100 - sburstp); 1970af75078fSIntel return; 1971af75078fSIntel } 19727569b8c1SHonnappa Nagarahalli 19737569b8c1SHonnappa Nagarahalli sburst += burst_stats[i]; 19747569b8c1SHonnappa Nagarahalli if (sburst == total_burst) { 19757569b8c1SHonnappa Nagarahalli printf("%d%% of %d pkts]\n", 19767569b8c1SHonnappa Nagarahalli 100 - sburstp, (int) pktnb_stats[i]); 1977af75078fSIntel return; 1978af75078fSIntel } 19797569b8c1SHonnappa Nagarahalli 19807569b8c1SHonnappa Nagarahalli burst_percent[i] = 19817569b8c1SHonnappa Nagarahalli (double)burst_stats[i] / total_burst * 100; 19827569b8c1SHonnappa Nagarahalli printf("%d%% of %d pkts + ", 19837569b8c1SHonnappa Nagarahalli burst_percent[i], (int) pktnb_stats[i]); 19847569b8c1SHonnappa Nagarahalli sburstp += burst_percent[i]; 1985af75078fSIntel } 1986af75078fSIntel } 1987af75078fSIntel 1988af75078fSIntel static void 1989af75078fSIntel fwd_stream_stats_display(streamid_t stream_id) 1990af75078fSIntel { 1991af75078fSIntel struct fwd_stream *fs; 1992af75078fSIntel static const char *fwd_top_stats_border = "-------"; 1993af75078fSIntel 1994af75078fSIntel fs = fwd_streams[stream_id]; 1995af75078fSIntel if ((fs->rx_packets == 0) && (fs->tx_packets == 0) && 1996af75078fSIntel (fs->fwd_dropped == 0)) 1997af75078fSIntel return; 1998af75078fSIntel printf("\n %s Forward Stats for RX Port=%2d/Queue=%2d -> " 1999af75078fSIntel "TX Port=%2d/Queue=%2d %s\n", 2000af75078fSIntel fwd_top_stats_border, fs->rx_port, fs->rx_queue, 2001af75078fSIntel fs->tx_port, fs->tx_queue, fwd_top_stats_border); 2002c185d42cSDavid Marchand printf(" RX-packets: %-14"PRIu64" TX-packets: %-14"PRIu64 2003c185d42cSDavid Marchand " TX-dropped: %-14"PRIu64, 2004af75078fSIntel fs->rx_packets, fs->tx_packets, fs->fwd_dropped); 2005af75078fSIntel 2006af75078fSIntel /* if checksum mode */ 2007af75078fSIntel if (cur_fwd_eng == &csum_fwd_engine) { 2008c185d42cSDavid Marchand printf(" RX- bad IP checksum: %-14"PRIu64 2009c185d42cSDavid Marchand " Rx- bad L4 checksum: %-14"PRIu64 2010c185d42cSDavid Marchand " Rx- bad outer L4 checksum: %-14"PRIu64"\n", 201158d475b7SJerin Jacob fs->rx_bad_ip_csum, fs->rx_bad_l4_csum, 201258d475b7SJerin Jacob fs->rx_bad_outer_l4_csum); 2013d139cf23SLance Richardson printf(" RX- bad outer IP checksum: %-14"PRIu64"\n", 2014d139cf23SLance Richardson fs->rx_bad_outer_ip_csum); 201594d65546SDavid Marchand } else { 201694d65546SDavid Marchand printf("\n"); 2017af75078fSIntel } 2018af75078fSIntel 20190e4b1963SDharmik Thakkar if (record_burst_stats) { 2020af75078fSIntel pkt_burst_stats_display("RX", &fs->rx_burst_stats); 2021af75078fSIntel pkt_burst_stats_display("TX", &fs->tx_burst_stats); 20220e4b1963SDharmik Thakkar } 2023af75078fSIntel } 2024af75078fSIntel 202553324971SDavid Marchand void 202653324971SDavid Marchand fwd_stats_display(void) 202753324971SDavid Marchand { 202853324971SDavid Marchand static const char *fwd_stats_border = "----------------------"; 202953324971SDavid Marchand static const char *acc_stats_border = "+++++++++++++++"; 203053324971SDavid Marchand struct { 203153324971SDavid Marchand struct fwd_stream *rx_stream; 203253324971SDavid Marchand struct fwd_stream *tx_stream; 203353324971SDavid Marchand uint64_t tx_dropped; 203453324971SDavid Marchand uint64_t rx_bad_ip_csum; 203553324971SDavid Marchand uint64_t rx_bad_l4_csum; 203653324971SDavid Marchand uint64_t rx_bad_outer_l4_csum; 2037d139cf23SLance Richardson uint64_t rx_bad_outer_ip_csum; 203853324971SDavid Marchand } ports_stats[RTE_MAX_ETHPORTS]; 203953324971SDavid Marchand uint64_t total_rx_dropped = 0; 204053324971SDavid Marchand uint64_t total_tx_dropped = 0; 204153324971SDavid Marchand uint64_t total_rx_nombuf = 0; 204253324971SDavid Marchand struct rte_eth_stats stats; 204353324971SDavid Marchand uint64_t fwd_cycles = 0; 204453324971SDavid Marchand uint64_t total_recv = 0; 204553324971SDavid Marchand uint64_t total_xmit = 0; 204653324971SDavid Marchand struct rte_port *port; 204753324971SDavid Marchand streamid_t sm_id; 204853324971SDavid Marchand portid_t pt_id; 2049baef6bbfSMin Hu (Connor) int ret; 205053324971SDavid Marchand int i; 205153324971SDavid Marchand 205253324971SDavid Marchand memset(ports_stats, 0, sizeof(ports_stats)); 205353324971SDavid Marchand 205453324971SDavid Marchand for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) { 205553324971SDavid Marchand struct fwd_stream *fs = fwd_streams[sm_id]; 205653324971SDavid Marchand 205753324971SDavid Marchand if (cur_fwd_config.nb_fwd_streams > 205853324971SDavid Marchand cur_fwd_config.nb_fwd_ports) { 205953324971SDavid Marchand fwd_stream_stats_display(sm_id); 206053324971SDavid Marchand } else { 206153324971SDavid Marchand ports_stats[fs->tx_port].tx_stream = fs; 206253324971SDavid Marchand ports_stats[fs->rx_port].rx_stream = fs; 206353324971SDavid Marchand } 206453324971SDavid Marchand 206553324971SDavid Marchand ports_stats[fs->tx_port].tx_dropped += fs->fwd_dropped; 206653324971SDavid Marchand 206753324971SDavid Marchand ports_stats[fs->rx_port].rx_bad_ip_csum += fs->rx_bad_ip_csum; 206853324971SDavid Marchand ports_stats[fs->rx_port].rx_bad_l4_csum += fs->rx_bad_l4_csum; 206953324971SDavid Marchand ports_stats[fs->rx_port].rx_bad_outer_l4_csum += 207053324971SDavid Marchand fs->rx_bad_outer_l4_csum; 2071d139cf23SLance Richardson ports_stats[fs->rx_port].rx_bad_outer_ip_csum += 2072d139cf23SLance Richardson fs->rx_bad_outer_ip_csum; 207353324971SDavid Marchand 2074bc700b67SDharmik Thakkar if (record_core_cycles) 207553324971SDavid Marchand fwd_cycles += fs->core_cycles; 207653324971SDavid Marchand } 207753324971SDavid Marchand for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) { 207853324971SDavid Marchand pt_id = fwd_ports_ids[i]; 207953324971SDavid Marchand port = &ports[pt_id]; 208053324971SDavid Marchand 2081baef6bbfSMin Hu (Connor) ret = rte_eth_stats_get(pt_id, &stats); 2082baef6bbfSMin Hu (Connor) if (ret != 0) { 2083baef6bbfSMin Hu (Connor) fprintf(stderr, 2084baef6bbfSMin Hu (Connor) "%s: Error: failed to get stats (port %u): %d", 2085baef6bbfSMin Hu (Connor) __func__, pt_id, ret); 2086baef6bbfSMin Hu (Connor) continue; 2087baef6bbfSMin Hu (Connor) } 208853324971SDavid Marchand stats.ipackets -= port->stats.ipackets; 208953324971SDavid Marchand stats.opackets -= port->stats.opackets; 209053324971SDavid Marchand stats.ibytes -= port->stats.ibytes; 209153324971SDavid Marchand stats.obytes -= port->stats.obytes; 209253324971SDavid Marchand stats.imissed -= port->stats.imissed; 209353324971SDavid Marchand stats.oerrors -= port->stats.oerrors; 209453324971SDavid Marchand stats.rx_nombuf -= port->stats.rx_nombuf; 209553324971SDavid Marchand 209653324971SDavid Marchand total_recv += stats.ipackets; 209753324971SDavid Marchand total_xmit += stats.opackets; 209853324971SDavid Marchand total_rx_dropped += stats.imissed; 209953324971SDavid Marchand total_tx_dropped += ports_stats[pt_id].tx_dropped; 210053324971SDavid Marchand total_tx_dropped += stats.oerrors; 210153324971SDavid Marchand total_rx_nombuf += stats.rx_nombuf; 210253324971SDavid Marchand 210353324971SDavid Marchand printf("\n %s Forward statistics for port %-2d %s\n", 210453324971SDavid Marchand fwd_stats_border, pt_id, fwd_stats_border); 210553324971SDavid Marchand 210608dcd187SHuisong Li printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64 210708dcd187SHuisong Li "RX-total: %-"PRIu64"\n", stats.ipackets, stats.imissed, 210853324971SDavid Marchand stats.ipackets + stats.imissed); 210953324971SDavid Marchand 2110d139cf23SLance Richardson if (cur_fwd_eng == &csum_fwd_engine) { 211153324971SDavid Marchand printf(" Bad-ipcsum: %-14"PRIu64 211253324971SDavid Marchand " Bad-l4csum: %-14"PRIu64 211353324971SDavid Marchand "Bad-outer-l4csum: %-14"PRIu64"\n", 211453324971SDavid Marchand ports_stats[pt_id].rx_bad_ip_csum, 211553324971SDavid Marchand ports_stats[pt_id].rx_bad_l4_csum, 211653324971SDavid Marchand ports_stats[pt_id].rx_bad_outer_l4_csum); 2117d139cf23SLance Richardson printf(" Bad-outer-ipcsum: %-14"PRIu64"\n", 2118d139cf23SLance Richardson ports_stats[pt_id].rx_bad_outer_ip_csum); 2119d139cf23SLance Richardson } 212053324971SDavid Marchand if (stats.ierrors + stats.rx_nombuf > 0) { 212108dcd187SHuisong Li printf(" RX-error: %-"PRIu64"\n", stats.ierrors); 212208dcd187SHuisong Li printf(" RX-nombufs: %-14"PRIu64"\n", stats.rx_nombuf); 212353324971SDavid Marchand } 212453324971SDavid Marchand 212508dcd187SHuisong Li printf(" TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64 212653324971SDavid Marchand "TX-total: %-"PRIu64"\n", 212753324971SDavid Marchand stats.opackets, ports_stats[pt_id].tx_dropped, 212853324971SDavid Marchand stats.opackets + ports_stats[pt_id].tx_dropped); 212953324971SDavid Marchand 21300e4b1963SDharmik Thakkar if (record_burst_stats) { 213153324971SDavid Marchand if (ports_stats[pt_id].rx_stream) 213253324971SDavid Marchand pkt_burst_stats_display("RX", 213353324971SDavid Marchand &ports_stats[pt_id].rx_stream->rx_burst_stats); 213453324971SDavid Marchand if (ports_stats[pt_id].tx_stream) 213553324971SDavid Marchand pkt_burst_stats_display("TX", 213653324971SDavid Marchand &ports_stats[pt_id].tx_stream->tx_burst_stats); 21370e4b1963SDharmik Thakkar } 213853324971SDavid Marchand 213953324971SDavid Marchand printf(" %s--------------------------------%s\n", 214053324971SDavid Marchand fwd_stats_border, fwd_stats_border); 214153324971SDavid Marchand } 214253324971SDavid Marchand 214353324971SDavid Marchand printf("\n %s Accumulated forward statistics for all ports" 214453324971SDavid Marchand "%s\n", 214553324971SDavid Marchand acc_stats_border, acc_stats_border); 214653324971SDavid Marchand printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: " 214753324971SDavid Marchand "%-"PRIu64"\n" 214853324971SDavid Marchand " TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: " 214953324971SDavid Marchand "%-"PRIu64"\n", 215053324971SDavid Marchand total_recv, total_rx_dropped, total_recv + total_rx_dropped, 215153324971SDavid Marchand total_xmit, total_tx_dropped, total_xmit + total_tx_dropped); 215253324971SDavid Marchand if (total_rx_nombuf > 0) 215353324971SDavid Marchand printf(" RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf); 215453324971SDavid Marchand printf(" %s++++++++++++++++++++++++++++++++++++++++++++++" 215553324971SDavid Marchand "%s\n", 215653324971SDavid Marchand acc_stats_border, acc_stats_border); 2157bc700b67SDharmik Thakkar if (record_core_cycles) { 21584c0497b1SDharmik Thakkar #define CYC_PER_MHZ 1E6 21593a164e00SPhil Yang if (total_recv > 0 || total_xmit > 0) { 21603a164e00SPhil Yang uint64_t total_pkts = 0; 21613a164e00SPhil Yang if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 || 21623a164e00SPhil Yang strcmp(cur_fwd_eng->fwd_mode_name, "flowgen") == 0) 21633a164e00SPhil Yang total_pkts = total_xmit; 21643a164e00SPhil Yang else 21653a164e00SPhil Yang total_pkts = total_recv; 21663a164e00SPhil Yang 21671920832aSDharmik Thakkar printf("\n CPU cycles/packet=%.2F (total cycles=" 21683a164e00SPhil Yang "%"PRIu64" / total %s packets=%"PRIu64") at %"PRIu64 21694c0497b1SDharmik Thakkar " MHz Clock\n", 21703a164e00SPhil Yang (double) fwd_cycles / total_pkts, 21713a164e00SPhil Yang fwd_cycles, cur_fwd_eng->fwd_mode_name, total_pkts, 21724c0497b1SDharmik Thakkar (uint64_t)(rte_get_tsc_hz() / CYC_PER_MHZ)); 21733a164e00SPhil Yang } 2174bc700b67SDharmik Thakkar } 217553324971SDavid Marchand } 217653324971SDavid Marchand 217753324971SDavid Marchand void 217853324971SDavid Marchand fwd_stats_reset(void) 217953324971SDavid Marchand { 218053324971SDavid Marchand streamid_t sm_id; 218153324971SDavid Marchand portid_t pt_id; 2182baef6bbfSMin Hu (Connor) int ret; 218353324971SDavid Marchand int i; 218453324971SDavid Marchand 218553324971SDavid Marchand for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) { 218653324971SDavid Marchand pt_id = fwd_ports_ids[i]; 2187baef6bbfSMin Hu (Connor) ret = rte_eth_stats_get(pt_id, &ports[pt_id].stats); 2188baef6bbfSMin Hu (Connor) if (ret != 0) 2189baef6bbfSMin Hu (Connor) fprintf(stderr, 2190baef6bbfSMin Hu (Connor) "%s: Error: failed to clear stats (port %u):%d", 2191baef6bbfSMin Hu (Connor) __func__, pt_id, ret); 219253324971SDavid Marchand } 219353324971SDavid Marchand for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) { 219453324971SDavid Marchand struct fwd_stream *fs = fwd_streams[sm_id]; 219553324971SDavid Marchand 219653324971SDavid Marchand fs->rx_packets = 0; 219753324971SDavid Marchand fs->tx_packets = 0; 219853324971SDavid Marchand fs->fwd_dropped = 0; 219953324971SDavid Marchand fs->rx_bad_ip_csum = 0; 220053324971SDavid Marchand fs->rx_bad_l4_csum = 0; 220153324971SDavid Marchand fs->rx_bad_outer_l4_csum = 0; 2202d139cf23SLance Richardson fs->rx_bad_outer_ip_csum = 0; 220353324971SDavid Marchand 220453324971SDavid Marchand memset(&fs->rx_burst_stats, 0, sizeof(fs->rx_burst_stats)); 220553324971SDavid Marchand memset(&fs->tx_burst_stats, 0, sizeof(fs->tx_burst_stats)); 220653324971SDavid Marchand fs->core_cycles = 0; 220753324971SDavid Marchand } 220853324971SDavid Marchand } 220953324971SDavid Marchand 2210af75078fSIntel static void 22117741e4cfSIntel flush_fwd_rx_queues(void) 2212af75078fSIntel { 2213af75078fSIntel struct rte_mbuf *pkts_burst[MAX_PKT_BURST]; 2214af75078fSIntel portid_t rxp; 22157741e4cfSIntel portid_t port_id; 2216af75078fSIntel queueid_t rxq; 2217af75078fSIntel uint16_t nb_rx; 2218af75078fSIntel uint16_t i; 2219af75078fSIntel uint8_t j; 2220f487715fSReshma Pattan uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0; 2221594302c7SJames Poole uint64_t timer_period; 2222f487715fSReshma Pattan 2223a550baf2SMin Hu (Connor) if (num_procs > 1) { 2224a550baf2SMin Hu (Connor) printf("multi-process not support for flushing fwd Rx queues, skip the below lines and return.\n"); 2225a550baf2SMin Hu (Connor) return; 2226a550baf2SMin Hu (Connor) } 2227a550baf2SMin Hu (Connor) 2228f487715fSReshma Pattan /* convert to number of cycles */ 2229594302c7SJames Poole timer_period = rte_get_timer_hz(); /* 1 second timeout */ 2230af75078fSIntel 2231af75078fSIntel for (j = 0; j < 2; j++) { 22327741e4cfSIntel for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) { 2233af75078fSIntel for (rxq = 0; rxq < nb_rxq; rxq++) { 22347741e4cfSIntel port_id = fwd_ports_ids[rxp]; 22353c4426dbSDmitry Kozlyuk 22363c4426dbSDmitry Kozlyuk /* Polling stopped queues is prohibited. */ 22373c4426dbSDmitry Kozlyuk if (ports[port_id].rxq[rxq].state == 22383c4426dbSDmitry Kozlyuk RTE_ETH_QUEUE_STATE_STOPPED) 22393c4426dbSDmitry Kozlyuk continue; 22403c4426dbSDmitry Kozlyuk 2241f487715fSReshma Pattan /** 2242f487715fSReshma Pattan * testpmd can stuck in the below do while loop 2243f487715fSReshma Pattan * if rte_eth_rx_burst() always returns nonzero 2244f487715fSReshma Pattan * packets. So timer is added to exit this loop 2245f487715fSReshma Pattan * after 1sec timer expiry. 2246f487715fSReshma Pattan */ 2247f487715fSReshma Pattan prev_tsc = rte_rdtsc(); 2248af75078fSIntel do { 22497741e4cfSIntel nb_rx = rte_eth_rx_burst(port_id, rxq, 2250013af9b6SIntel pkts_burst, MAX_PKT_BURST); 2251af75078fSIntel for (i = 0; i < nb_rx; i++) 2252af75078fSIntel rte_pktmbuf_free(pkts_burst[i]); 2253f487715fSReshma Pattan 2254f487715fSReshma Pattan cur_tsc = rte_rdtsc(); 2255f487715fSReshma Pattan diff_tsc = cur_tsc - prev_tsc; 2256f487715fSReshma Pattan timer_tsc += diff_tsc; 2257f487715fSReshma Pattan } while ((nb_rx > 0) && 2258f487715fSReshma Pattan (timer_tsc < timer_period)); 2259f487715fSReshma Pattan timer_tsc = 0; 2260af75078fSIntel } 2261af75078fSIntel } 2262af75078fSIntel rte_delay_ms(10); /* wait 10 milli-seconds before retrying */ 2263af75078fSIntel } 2264af75078fSIntel } 2265af75078fSIntel 2266af75078fSIntel static void 2267af75078fSIntel run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd) 2268af75078fSIntel { 2269af75078fSIntel struct fwd_stream **fsm; 2270af75078fSIntel streamid_t nb_fs; 2271af75078fSIntel streamid_t sm_id; 2272a8d0d473SBruce Richardson #ifdef RTE_LIB_BITRATESTATS 22737e4441c8SRemy Horton uint64_t tics_per_1sec; 22747e4441c8SRemy Horton uint64_t tics_datum; 22757e4441c8SRemy Horton uint64_t tics_current; 22764918a357SXiaoyun Li uint16_t i, cnt_ports; 2277af75078fSIntel 22784918a357SXiaoyun Li cnt_ports = nb_ports; 22797e4441c8SRemy Horton tics_datum = rte_rdtsc(); 22807e4441c8SRemy Horton tics_per_1sec = rte_get_timer_hz(); 22817e4441c8SRemy Horton #endif 2282af75078fSIntel fsm = &fwd_streams[fc->stream_idx]; 2283af75078fSIntel nb_fs = fc->stream_nb; 2284af75078fSIntel do { 2285af75078fSIntel for (sm_id = 0; sm_id < nb_fs; sm_id++) 22863c4426dbSDmitry Kozlyuk if (!fsm[sm_id]->disabled) 2287af75078fSIntel (*pkt_fwd)(fsm[sm_id]); 2288a8d0d473SBruce Richardson #ifdef RTE_LIB_BITRATESTATS 2289e25e6c70SRemy Horton if (bitrate_enabled != 0 && 2290e25e6c70SRemy Horton bitrate_lcore_id == rte_lcore_id()) { 22917e4441c8SRemy Horton tics_current = rte_rdtsc(); 22927e4441c8SRemy Horton if (tics_current - tics_datum >= tics_per_1sec) { 22937e4441c8SRemy Horton /* Periodic bitrate calculation */ 22944918a357SXiaoyun Li for (i = 0; i < cnt_ports; i++) 2295e25e6c70SRemy Horton rte_stats_bitrate_calc(bitrate_data, 22964918a357SXiaoyun Li ports_ids[i]); 22977e4441c8SRemy Horton tics_datum = tics_current; 22987e4441c8SRemy Horton } 2299e25e6c70SRemy Horton } 23007e4441c8SRemy Horton #endif 2301a8d0d473SBruce Richardson #ifdef RTE_LIB_LATENCYSTATS 230265eb1e54SPablo de Lara if (latencystats_enabled != 0 && 230365eb1e54SPablo de Lara latencystats_lcore_id == rte_lcore_id()) 230462d3216dSReshma Pattan rte_latencystats_update(); 230562d3216dSReshma Pattan #endif 230662d3216dSReshma Pattan 2307af75078fSIntel } while (! fc->stopped); 2308af75078fSIntel } 2309af75078fSIntel 2310af75078fSIntel static int 2311af75078fSIntel start_pkt_forward_on_core(void *fwd_arg) 2312af75078fSIntel { 2313af75078fSIntel run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg, 2314af75078fSIntel cur_fwd_config.fwd_eng->packet_fwd); 2315af75078fSIntel return 0; 2316af75078fSIntel } 2317af75078fSIntel 2318af75078fSIntel /* 2319af75078fSIntel * Run the TXONLY packet forwarding engine to send a single burst of packets. 2320af75078fSIntel * Used to start communication flows in network loopback test configurations. 2321af75078fSIntel */ 2322af75078fSIntel static int 2323af75078fSIntel run_one_txonly_burst_on_core(void *fwd_arg) 2324af75078fSIntel { 2325af75078fSIntel struct fwd_lcore *fwd_lc; 2326af75078fSIntel struct fwd_lcore tmp_lcore; 2327af75078fSIntel 2328af75078fSIntel fwd_lc = (struct fwd_lcore *) fwd_arg; 2329af75078fSIntel tmp_lcore = *fwd_lc; 2330af75078fSIntel tmp_lcore.stopped = 1; 2331af75078fSIntel run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd); 2332af75078fSIntel return 0; 2333af75078fSIntel } 2334af75078fSIntel 2335af75078fSIntel /* 2336af75078fSIntel * Launch packet forwarding: 2337af75078fSIntel * - Setup per-port forwarding context. 2338af75078fSIntel * - launch logical cores with their forwarding configuration. 2339af75078fSIntel */ 2340af75078fSIntel static void 2341af75078fSIntel launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore) 2342af75078fSIntel { 2343af75078fSIntel unsigned int i; 2344af75078fSIntel unsigned int lc_id; 2345af75078fSIntel int diag; 2346af75078fSIntel 2347af75078fSIntel for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) { 2348af75078fSIntel lc_id = fwd_lcores_cpuids[i]; 2349af75078fSIntel if ((interactive == 0) || (lc_id != rte_lcore_id())) { 2350af75078fSIntel fwd_lcores[i]->stopped = 0; 2351af75078fSIntel diag = rte_eal_remote_launch(pkt_fwd_on_lcore, 2352af75078fSIntel fwd_lcores[i], lc_id); 2353af75078fSIntel if (diag != 0) 235461a3b0e5SAndrew Rybchenko fprintf(stderr, 235561a3b0e5SAndrew Rybchenko "launch lcore %u failed - diag=%d\n", 2356af75078fSIntel lc_id, diag); 2357af75078fSIntel } 2358af75078fSIntel } 2359af75078fSIntel } 2360af75078fSIntel 2361af75078fSIntel /* 2362af75078fSIntel * Launch packet forwarding configuration. 2363af75078fSIntel */ 2364af75078fSIntel void 2365af75078fSIntel start_packet_forwarding(int with_tx_first) 2366af75078fSIntel { 2367af75078fSIntel port_fwd_begin_t port_fwd_begin; 2368af75078fSIntel port_fwd_end_t port_fwd_end; 23693c4426dbSDmitry Kozlyuk stream_init_t stream_init = cur_fwd_eng->stream_init; 2370af75078fSIntel unsigned int i; 2371af75078fSIntel 23725a8fb55cSReshma Pattan if (strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") == 0 && !nb_rxq) 23735a8fb55cSReshma Pattan rte_exit(EXIT_FAILURE, "rxq are 0, cannot use rxonly fwd mode\n"); 23745a8fb55cSReshma Pattan 23755a8fb55cSReshma Pattan if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 && !nb_txq) 23765a8fb55cSReshma Pattan rte_exit(EXIT_FAILURE, "txq are 0, cannot use txonly fwd mode\n"); 23775a8fb55cSReshma Pattan 23785a8fb55cSReshma Pattan if ((strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") != 0 && 23795a8fb55cSReshma Pattan strcmp(cur_fwd_eng->fwd_mode_name, "txonly") != 0) && 23805a8fb55cSReshma Pattan (!nb_rxq || !nb_txq)) 23815a8fb55cSReshma Pattan rte_exit(EXIT_FAILURE, 23825a8fb55cSReshma Pattan "Either rxq or txq are 0, cannot use %s fwd mode\n", 23835a8fb55cSReshma Pattan cur_fwd_eng->fwd_mode_name); 23845a8fb55cSReshma Pattan 2385ce8d5614SIntel if (all_ports_started() == 0) { 238661a3b0e5SAndrew Rybchenko fprintf(stderr, "Not all ports were started\n"); 2387ce8d5614SIntel return; 2388ce8d5614SIntel } 2389af75078fSIntel if (test_done == 0) { 239061a3b0e5SAndrew Rybchenko fprintf(stderr, "Packet forwarding already started\n"); 2391af75078fSIntel return; 2392af75078fSIntel } 23937741e4cfSIntel 239447a767b2SMatan Azrad fwd_config_setup(); 239547a767b2SMatan Azrad 239665744833SXueming Li pkt_fwd_config_display(&cur_fwd_config); 239765744833SXueming Li if (!pkt_fwd_shared_rxq_check()) 239865744833SXueming Li return; 239965744833SXueming Li 24003c4426dbSDmitry Kozlyuk if (stream_init != NULL) 24013c4426dbSDmitry Kozlyuk for (i = 0; i < cur_fwd_config.nb_fwd_streams; i++) 24023c4426dbSDmitry Kozlyuk stream_init(fwd_streams[i]); 24033c4426dbSDmitry Kozlyuk 2404a78040c9SAlvin Zhang port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin; 2405a78040c9SAlvin Zhang if (port_fwd_begin != NULL) { 2406a78040c9SAlvin Zhang for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) { 2407a78040c9SAlvin Zhang if (port_fwd_begin(fwd_ports_ids[i])) { 2408a78040c9SAlvin Zhang fprintf(stderr, 2409a78040c9SAlvin Zhang "Packet forwarding is not ready\n"); 2410a78040c9SAlvin Zhang return; 2411a78040c9SAlvin Zhang } 2412a78040c9SAlvin Zhang } 2413a78040c9SAlvin Zhang } 2414a78040c9SAlvin Zhang 2415a78040c9SAlvin Zhang if (with_tx_first) { 2416a78040c9SAlvin Zhang port_fwd_begin = tx_only_engine.port_fwd_begin; 2417a78040c9SAlvin Zhang if (port_fwd_begin != NULL) { 2418a78040c9SAlvin Zhang for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) { 2419a78040c9SAlvin Zhang if (port_fwd_begin(fwd_ports_ids[i])) { 2420a78040c9SAlvin Zhang fprintf(stderr, 2421a78040c9SAlvin Zhang "Packet forwarding is not ready\n"); 2422a78040c9SAlvin Zhang return; 2423a78040c9SAlvin Zhang } 2424a78040c9SAlvin Zhang } 2425a78040c9SAlvin Zhang } 2426a78040c9SAlvin Zhang } 2427a78040c9SAlvin Zhang 2428a78040c9SAlvin Zhang test_done = 0; 2429a78040c9SAlvin Zhang 24307741e4cfSIntel if(!no_flush_rx) 24317741e4cfSIntel flush_fwd_rx_queues(); 24327741e4cfSIntel 2433af75078fSIntel rxtx_config_display(); 2434af75078fSIntel 243553324971SDavid Marchand fwd_stats_reset(); 2436af75078fSIntel if (with_tx_first) { 2437acbf77a6SZhihong Wang while (with_tx_first--) { 2438acbf77a6SZhihong Wang launch_packet_forwarding( 2439acbf77a6SZhihong Wang run_one_txonly_burst_on_core); 2440af75078fSIntel rte_eal_mp_wait_lcore(); 2441acbf77a6SZhihong Wang } 2442af75078fSIntel port_fwd_end = tx_only_engine.port_fwd_end; 2443af75078fSIntel if (port_fwd_end != NULL) { 2444af75078fSIntel for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) 2445af75078fSIntel (*port_fwd_end)(fwd_ports_ids[i]); 2446af75078fSIntel } 2447af75078fSIntel } 2448af75078fSIntel launch_packet_forwarding(start_pkt_forward_on_core); 2449af75078fSIntel } 2450af75078fSIntel 2451af75078fSIntel void 2452af75078fSIntel stop_packet_forwarding(void) 2453af75078fSIntel { 2454af75078fSIntel port_fwd_end_t port_fwd_end; 2455af75078fSIntel lcoreid_t lc_id; 245653324971SDavid Marchand portid_t pt_id; 245753324971SDavid Marchand int i; 2458af75078fSIntel 2459af75078fSIntel if (test_done) { 246061a3b0e5SAndrew Rybchenko fprintf(stderr, "Packet forwarding not started\n"); 2461af75078fSIntel return; 2462af75078fSIntel } 2463af75078fSIntel printf("Telling cores to stop..."); 2464af75078fSIntel for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) 2465af75078fSIntel fwd_lcores[lc_id]->stopped = 1; 2466af75078fSIntel printf("\nWaiting for lcores to finish...\n"); 2467af75078fSIntel rte_eal_mp_wait_lcore(); 2468af75078fSIntel port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end; 2469af75078fSIntel if (port_fwd_end != NULL) { 2470af75078fSIntel for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) { 2471af75078fSIntel pt_id = fwd_ports_ids[i]; 2472af75078fSIntel (*port_fwd_end)(pt_id); 2473af75078fSIntel } 2474af75078fSIntel } 2475c185d42cSDavid Marchand 247653324971SDavid Marchand fwd_stats_display(); 247758d475b7SJerin Jacob 2478af75078fSIntel printf("\nDone.\n"); 2479af75078fSIntel test_done = 1; 2480af75078fSIntel } 2481af75078fSIntel 2482cfae07fdSOuyang Changchun void 2483cfae07fdSOuyang Changchun dev_set_link_up(portid_t pid) 2484cfae07fdSOuyang Changchun { 2485492ab604SZhiyong Yang if (rte_eth_dev_set_link_up(pid) < 0) 248661a3b0e5SAndrew Rybchenko fprintf(stderr, "\nSet link up fail.\n"); 2487cfae07fdSOuyang Changchun } 2488cfae07fdSOuyang Changchun 2489cfae07fdSOuyang Changchun void 2490cfae07fdSOuyang Changchun dev_set_link_down(portid_t pid) 2491cfae07fdSOuyang Changchun { 2492492ab604SZhiyong Yang if (rte_eth_dev_set_link_down(pid) < 0) 249361a3b0e5SAndrew Rybchenko fprintf(stderr, "\nSet link down fail.\n"); 2494cfae07fdSOuyang Changchun } 2495cfae07fdSOuyang Changchun 2496ce8d5614SIntel static int 2497ce8d5614SIntel all_ports_started(void) 2498ce8d5614SIntel { 2499ce8d5614SIntel portid_t pi; 2500ce8d5614SIntel struct rte_port *port; 2501ce8d5614SIntel 25027d89b261SGaetan Rivet RTE_ETH_FOREACH_DEV(pi) { 2503ce8d5614SIntel port = &ports[pi]; 2504ce8d5614SIntel /* Check if there is a port which is not started */ 250541b05095SBernard Iremonger if ((port->port_status != RTE_PORT_STARTED) && 250641b05095SBernard Iremonger (port->slave_flag == 0)) 2507ce8d5614SIntel return 0; 2508ce8d5614SIntel } 2509ce8d5614SIntel 2510ce8d5614SIntel /* No port is not started */ 2511ce8d5614SIntel return 1; 2512ce8d5614SIntel } 2513ce8d5614SIntel 2514148f963fSBruce Richardson int 25156018eb8cSShahaf Shuler port_is_stopped(portid_t port_id) 25166018eb8cSShahaf Shuler { 25176018eb8cSShahaf Shuler struct rte_port *port = &ports[port_id]; 25186018eb8cSShahaf Shuler 25196018eb8cSShahaf Shuler if ((port->port_status != RTE_PORT_STOPPED) && 25206018eb8cSShahaf Shuler (port->slave_flag == 0)) 25216018eb8cSShahaf Shuler return 0; 25226018eb8cSShahaf Shuler return 1; 25236018eb8cSShahaf Shuler } 25246018eb8cSShahaf Shuler 25256018eb8cSShahaf Shuler int 2526edab33b1STetsuya Mukawa all_ports_stopped(void) 2527edab33b1STetsuya Mukawa { 2528edab33b1STetsuya Mukawa portid_t pi; 2529edab33b1STetsuya Mukawa 25307d89b261SGaetan Rivet RTE_ETH_FOREACH_DEV(pi) { 25316018eb8cSShahaf Shuler if (!port_is_stopped(pi)) 2532edab33b1STetsuya Mukawa return 0; 2533edab33b1STetsuya Mukawa } 2534edab33b1STetsuya Mukawa 2535edab33b1STetsuya Mukawa return 1; 2536edab33b1STetsuya Mukawa } 2537edab33b1STetsuya Mukawa 2538edab33b1STetsuya Mukawa int 2539edab33b1STetsuya Mukawa port_is_started(portid_t port_id) 2540edab33b1STetsuya Mukawa { 2541edab33b1STetsuya Mukawa if (port_id_is_invalid(port_id, ENABLED_WARN)) 2542edab33b1STetsuya Mukawa return 0; 2543edab33b1STetsuya Mukawa 2544edab33b1STetsuya Mukawa if (ports[port_id].port_status != RTE_PORT_STARTED) 2545edab33b1STetsuya Mukawa return 0; 2546edab33b1STetsuya Mukawa 2547edab33b1STetsuya Mukawa return 1; 2548edab33b1STetsuya Mukawa } 2549edab33b1STetsuya Mukawa 25501c69df45SOri Kam /* Configure the Rx and Tx hairpin queues for the selected port. */ 25511c69df45SOri Kam static int 255201817b10SBing Zhao setup_hairpin_queues(portid_t pi, portid_t p_pi, uint16_t cnt_pi) 25531c69df45SOri Kam { 25541c69df45SOri Kam queueid_t qi; 25551c69df45SOri Kam struct rte_eth_hairpin_conf hairpin_conf = { 25561c69df45SOri Kam .peer_count = 1, 25571c69df45SOri Kam }; 25581c69df45SOri Kam int i; 25591c69df45SOri Kam int diag; 25601c69df45SOri Kam struct rte_port *port = &ports[pi]; 256101817b10SBing Zhao uint16_t peer_rx_port = pi; 256201817b10SBing Zhao uint16_t peer_tx_port = pi; 256301817b10SBing Zhao uint32_t manual = 1; 256401817b10SBing Zhao uint32_t tx_exp = hairpin_mode & 0x10; 256501817b10SBing Zhao 256601817b10SBing Zhao if (!(hairpin_mode & 0xf)) { 256701817b10SBing Zhao peer_rx_port = pi; 256801817b10SBing Zhao peer_tx_port = pi; 256901817b10SBing Zhao manual = 0; 257001817b10SBing Zhao } else if (hairpin_mode & 0x1) { 257101817b10SBing Zhao peer_tx_port = rte_eth_find_next_owned_by(pi + 1, 257201817b10SBing Zhao RTE_ETH_DEV_NO_OWNER); 257301817b10SBing Zhao if (peer_tx_port >= RTE_MAX_ETHPORTS) 257401817b10SBing Zhao peer_tx_port = rte_eth_find_next_owned_by(0, 257501817b10SBing Zhao RTE_ETH_DEV_NO_OWNER); 257601817b10SBing Zhao if (p_pi != RTE_MAX_ETHPORTS) { 257701817b10SBing Zhao peer_rx_port = p_pi; 257801817b10SBing Zhao } else { 257901817b10SBing Zhao uint16_t next_pi; 258001817b10SBing Zhao 258101817b10SBing Zhao /* Last port will be the peer RX port of the first. */ 258201817b10SBing Zhao RTE_ETH_FOREACH_DEV(next_pi) 258301817b10SBing Zhao peer_rx_port = next_pi; 258401817b10SBing Zhao } 258501817b10SBing Zhao manual = 1; 258601817b10SBing Zhao } else if (hairpin_mode & 0x2) { 258701817b10SBing Zhao if (cnt_pi & 0x1) { 258801817b10SBing Zhao peer_rx_port = p_pi; 258901817b10SBing Zhao } else { 259001817b10SBing Zhao peer_rx_port = rte_eth_find_next_owned_by(pi + 1, 259101817b10SBing Zhao RTE_ETH_DEV_NO_OWNER); 259201817b10SBing Zhao if (peer_rx_port >= RTE_MAX_ETHPORTS) 259301817b10SBing Zhao peer_rx_port = pi; 259401817b10SBing Zhao } 259501817b10SBing Zhao peer_tx_port = peer_rx_port; 259601817b10SBing Zhao manual = 1; 259701817b10SBing Zhao } 25981c69df45SOri Kam 25991c69df45SOri Kam for (qi = nb_txq, i = 0; qi < nb_hairpinq + nb_txq; qi++) { 260001817b10SBing Zhao hairpin_conf.peers[0].port = peer_rx_port; 26011c69df45SOri Kam hairpin_conf.peers[0].queue = i + nb_rxq; 260201817b10SBing Zhao hairpin_conf.manual_bind = !!manual; 260301817b10SBing Zhao hairpin_conf.tx_explicit = !!tx_exp; 26041c69df45SOri Kam diag = rte_eth_tx_hairpin_queue_setup 26051c69df45SOri Kam (pi, qi, nb_txd, &hairpin_conf); 26061c69df45SOri Kam i++; 26071c69df45SOri Kam if (diag == 0) 26081c69df45SOri Kam continue; 26091c69df45SOri Kam 26101c69df45SOri Kam /* Fail to setup rx queue, return */ 2611eac341d3SJoyce Kong if (port->port_status == RTE_PORT_HANDLING) 2612eac341d3SJoyce Kong port->port_status = RTE_PORT_STOPPED; 2613eac341d3SJoyce Kong else 261461a3b0e5SAndrew Rybchenko fprintf(stderr, 261561a3b0e5SAndrew Rybchenko "Port %d can not be set back to stopped\n", pi); 261661a3b0e5SAndrew Rybchenko fprintf(stderr, "Fail to configure port %d hairpin queues\n", 261761a3b0e5SAndrew Rybchenko pi); 26181c69df45SOri Kam /* try to reconfigure queues next time */ 26191c69df45SOri Kam port->need_reconfig_queues = 1; 26201c69df45SOri Kam return -1; 26211c69df45SOri Kam } 26221c69df45SOri Kam for (qi = nb_rxq, i = 0; qi < nb_hairpinq + nb_rxq; qi++) { 262301817b10SBing Zhao hairpin_conf.peers[0].port = peer_tx_port; 26241c69df45SOri Kam hairpin_conf.peers[0].queue = i + nb_txq; 262501817b10SBing Zhao hairpin_conf.manual_bind = !!manual; 262601817b10SBing Zhao hairpin_conf.tx_explicit = !!tx_exp; 26271c69df45SOri Kam diag = rte_eth_rx_hairpin_queue_setup 26281c69df45SOri Kam (pi, qi, nb_rxd, &hairpin_conf); 26291c69df45SOri Kam i++; 26301c69df45SOri Kam if (diag == 0) 26311c69df45SOri Kam continue; 26321c69df45SOri Kam 26331c69df45SOri Kam /* Fail to setup rx queue, return */ 2634eac341d3SJoyce Kong if (port->port_status == RTE_PORT_HANDLING) 2635eac341d3SJoyce Kong port->port_status = RTE_PORT_STOPPED; 2636eac341d3SJoyce Kong else 263761a3b0e5SAndrew Rybchenko fprintf(stderr, 263861a3b0e5SAndrew Rybchenko "Port %d can not be set back to stopped\n", pi); 263961a3b0e5SAndrew Rybchenko fprintf(stderr, "Fail to configure port %d hairpin queues\n", 264061a3b0e5SAndrew Rybchenko pi); 26411c69df45SOri Kam /* try to reconfigure queues next time */ 26421c69df45SOri Kam port->need_reconfig_queues = 1; 26431c69df45SOri Kam return -1; 26441c69df45SOri Kam } 26451c69df45SOri Kam return 0; 26461c69df45SOri Kam } 26471c69df45SOri Kam 26482befc67fSViacheslav Ovsiienko /* Configure the Rx with optional split. */ 26492befc67fSViacheslav Ovsiienko int 26502befc67fSViacheslav Ovsiienko rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id, 26512befc67fSViacheslav Ovsiienko uint16_t nb_rx_desc, unsigned int socket_id, 26522befc67fSViacheslav Ovsiienko struct rte_eth_rxconf *rx_conf, struct rte_mempool *mp) 26532befc67fSViacheslav Ovsiienko { 26542befc67fSViacheslav Ovsiienko union rte_eth_rxseg rx_useg[MAX_SEGS_BUFFER_SPLIT] = {}; 26552befc67fSViacheslav Ovsiienko unsigned int i, mp_n; 26562befc67fSViacheslav Ovsiienko int ret; 26572befc67fSViacheslav Ovsiienko 26582befc67fSViacheslav Ovsiienko if (rx_pkt_nb_segs <= 1 || 26592befc67fSViacheslav Ovsiienko (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) == 0) { 26602befc67fSViacheslav Ovsiienko rx_conf->rx_seg = NULL; 26612befc67fSViacheslav Ovsiienko rx_conf->rx_nseg = 0; 26622befc67fSViacheslav Ovsiienko ret = rte_eth_rx_queue_setup(port_id, rx_queue_id, 26632befc67fSViacheslav Ovsiienko nb_rx_desc, socket_id, 26642befc67fSViacheslav Ovsiienko rx_conf, mp); 26653c4426dbSDmitry Kozlyuk goto exit; 26662befc67fSViacheslav Ovsiienko } 26672befc67fSViacheslav Ovsiienko for (i = 0; i < rx_pkt_nb_segs; i++) { 26682befc67fSViacheslav Ovsiienko struct rte_eth_rxseg_split *rx_seg = &rx_useg[i].split; 26692befc67fSViacheslav Ovsiienko struct rte_mempool *mpx; 26702befc67fSViacheslav Ovsiienko /* 26712befc67fSViacheslav Ovsiienko * Use last valid pool for the segments with number 26722befc67fSViacheslav Ovsiienko * exceeding the pool index. 26732befc67fSViacheslav Ovsiienko */ 2674*1108c33eSRaja Zidane mp_n = (i >= mbuf_data_size_n) ? mbuf_data_size_n - 1 : i; 26752befc67fSViacheslav Ovsiienko mpx = mbuf_pool_find(socket_id, mp_n); 26762befc67fSViacheslav Ovsiienko /* Handle zero as mbuf data buffer size. */ 26772befc67fSViacheslav Ovsiienko rx_seg->length = rx_pkt_seg_lengths[i] ? 26782befc67fSViacheslav Ovsiienko rx_pkt_seg_lengths[i] : 26792befc67fSViacheslav Ovsiienko mbuf_data_size[mp_n]; 26802befc67fSViacheslav Ovsiienko rx_seg->offset = i < rx_pkt_nb_offs ? 26812befc67fSViacheslav Ovsiienko rx_pkt_seg_offsets[i] : 0; 26822befc67fSViacheslav Ovsiienko rx_seg->mp = mpx ? mpx : mp; 26832befc67fSViacheslav Ovsiienko } 26842befc67fSViacheslav Ovsiienko rx_conf->rx_nseg = rx_pkt_nb_segs; 26852befc67fSViacheslav Ovsiienko rx_conf->rx_seg = rx_useg; 26862befc67fSViacheslav Ovsiienko ret = rte_eth_rx_queue_setup(port_id, rx_queue_id, nb_rx_desc, 26872befc67fSViacheslav Ovsiienko socket_id, rx_conf, NULL); 26882befc67fSViacheslav Ovsiienko rx_conf->rx_seg = NULL; 26892befc67fSViacheslav Ovsiienko rx_conf->rx_nseg = 0; 26903c4426dbSDmitry Kozlyuk exit: 26913c4426dbSDmitry Kozlyuk ports[port_id].rxq[rx_queue_id].state = rx_conf->rx_deferred_start ? 26923c4426dbSDmitry Kozlyuk RTE_ETH_QUEUE_STATE_STOPPED : 26933c4426dbSDmitry Kozlyuk RTE_ETH_QUEUE_STATE_STARTED; 26942befc67fSViacheslav Ovsiienko return ret; 26952befc67fSViacheslav Ovsiienko } 26962befc67fSViacheslav Ovsiienko 269763b72657SIvan Ilchenko static int 269863b72657SIvan Ilchenko alloc_xstats_display_info(portid_t pi) 269963b72657SIvan Ilchenko { 270063b72657SIvan Ilchenko uint64_t **ids_supp = &ports[pi].xstats_info.ids_supp; 270163b72657SIvan Ilchenko uint64_t **prev_values = &ports[pi].xstats_info.prev_values; 270263b72657SIvan Ilchenko uint64_t **curr_values = &ports[pi].xstats_info.curr_values; 270363b72657SIvan Ilchenko 270463b72657SIvan Ilchenko if (xstats_display_num == 0) 270563b72657SIvan Ilchenko return 0; 270663b72657SIvan Ilchenko 270763b72657SIvan Ilchenko *ids_supp = calloc(xstats_display_num, sizeof(**ids_supp)); 270863b72657SIvan Ilchenko if (*ids_supp == NULL) 270963b72657SIvan Ilchenko goto fail_ids_supp; 271063b72657SIvan Ilchenko 271163b72657SIvan Ilchenko *prev_values = calloc(xstats_display_num, 271263b72657SIvan Ilchenko sizeof(**prev_values)); 271363b72657SIvan Ilchenko if (*prev_values == NULL) 271463b72657SIvan Ilchenko goto fail_prev_values; 271563b72657SIvan Ilchenko 271663b72657SIvan Ilchenko *curr_values = calloc(xstats_display_num, 271763b72657SIvan Ilchenko sizeof(**curr_values)); 271863b72657SIvan Ilchenko if (*curr_values == NULL) 271963b72657SIvan Ilchenko goto fail_curr_values; 272063b72657SIvan Ilchenko 272163b72657SIvan Ilchenko ports[pi].xstats_info.allocated = true; 272263b72657SIvan Ilchenko 272363b72657SIvan Ilchenko return 0; 272463b72657SIvan Ilchenko 272563b72657SIvan Ilchenko fail_curr_values: 272663b72657SIvan Ilchenko free(*prev_values); 272763b72657SIvan Ilchenko fail_prev_values: 272863b72657SIvan Ilchenko free(*ids_supp); 272963b72657SIvan Ilchenko fail_ids_supp: 273063b72657SIvan Ilchenko return -ENOMEM; 273163b72657SIvan Ilchenko } 273263b72657SIvan Ilchenko 273363b72657SIvan Ilchenko static void 273463b72657SIvan Ilchenko free_xstats_display_info(portid_t pi) 273563b72657SIvan Ilchenko { 273663b72657SIvan Ilchenko if (!ports[pi].xstats_info.allocated) 273763b72657SIvan Ilchenko return; 273863b72657SIvan Ilchenko free(ports[pi].xstats_info.ids_supp); 273963b72657SIvan Ilchenko free(ports[pi].xstats_info.prev_values); 274063b72657SIvan Ilchenko free(ports[pi].xstats_info.curr_values); 274163b72657SIvan Ilchenko ports[pi].xstats_info.allocated = false; 274263b72657SIvan Ilchenko } 274363b72657SIvan Ilchenko 274463b72657SIvan Ilchenko /** Fill helper structures for specified port to show extended statistics. */ 274563b72657SIvan Ilchenko static void 274663b72657SIvan Ilchenko fill_xstats_display_info_for_port(portid_t pi) 274763b72657SIvan Ilchenko { 274863b72657SIvan Ilchenko unsigned int stat, stat_supp; 274963b72657SIvan Ilchenko const char *xstat_name; 275063b72657SIvan Ilchenko struct rte_port *port; 275163b72657SIvan Ilchenko uint64_t *ids_supp; 275263b72657SIvan Ilchenko int rc; 275363b72657SIvan Ilchenko 275463b72657SIvan Ilchenko if (xstats_display_num == 0) 275563b72657SIvan Ilchenko return; 275663b72657SIvan Ilchenko 275763b72657SIvan Ilchenko if (pi == (portid_t)RTE_PORT_ALL) { 275863b72657SIvan Ilchenko fill_xstats_display_info(); 275963b72657SIvan Ilchenko return; 276063b72657SIvan Ilchenko } 276163b72657SIvan Ilchenko 276263b72657SIvan Ilchenko port = &ports[pi]; 276363b72657SIvan Ilchenko if (port->port_status != RTE_PORT_STARTED) 276463b72657SIvan Ilchenko return; 276563b72657SIvan Ilchenko 276663b72657SIvan Ilchenko if (!port->xstats_info.allocated && alloc_xstats_display_info(pi) != 0) 276763b72657SIvan Ilchenko rte_exit(EXIT_FAILURE, 276863b72657SIvan Ilchenko "Failed to allocate xstats display memory\n"); 276963b72657SIvan Ilchenko 277063b72657SIvan Ilchenko ids_supp = port->xstats_info.ids_supp; 277163b72657SIvan Ilchenko for (stat = stat_supp = 0; stat < xstats_display_num; stat++) { 277263b72657SIvan Ilchenko xstat_name = xstats_display[stat].name; 277363b72657SIvan Ilchenko rc = rte_eth_xstats_get_id_by_name(pi, xstat_name, 277463b72657SIvan Ilchenko ids_supp + stat_supp); 277563b72657SIvan Ilchenko if (rc != 0) { 277663b72657SIvan Ilchenko fprintf(stderr, "No xstat '%s' on port %u - skip it %u\n", 277763b72657SIvan Ilchenko xstat_name, pi, stat); 277863b72657SIvan Ilchenko continue; 277963b72657SIvan Ilchenko } 278063b72657SIvan Ilchenko stat_supp++; 278163b72657SIvan Ilchenko } 278263b72657SIvan Ilchenko 278363b72657SIvan Ilchenko port->xstats_info.ids_supp_sz = stat_supp; 278463b72657SIvan Ilchenko } 278563b72657SIvan Ilchenko 278663b72657SIvan Ilchenko /** Fill helper structures for all ports to show extended statistics. */ 278763b72657SIvan Ilchenko static void 278863b72657SIvan Ilchenko fill_xstats_display_info(void) 278963b72657SIvan Ilchenko { 279063b72657SIvan Ilchenko portid_t pi; 279163b72657SIvan Ilchenko 279263b72657SIvan Ilchenko if (xstats_display_num == 0) 279363b72657SIvan Ilchenko return; 279463b72657SIvan Ilchenko 279563b72657SIvan Ilchenko RTE_ETH_FOREACH_DEV(pi) 279663b72657SIvan Ilchenko fill_xstats_display_info_for_port(pi); 279763b72657SIvan Ilchenko } 279863b72657SIvan Ilchenko 2799edab33b1STetsuya Mukawa int 2800ce8d5614SIntel start_port(portid_t pid) 2801ce8d5614SIntel { 280292d2703eSMichael Qiu int diag, need_check_link_status = -1; 2803ce8d5614SIntel portid_t pi; 280401817b10SBing Zhao portid_t p_pi = RTE_MAX_ETHPORTS; 280501817b10SBing Zhao portid_t pl[RTE_MAX_ETHPORTS]; 280601817b10SBing Zhao portid_t peer_pl[RTE_MAX_ETHPORTS]; 280701817b10SBing Zhao uint16_t cnt_pi = 0; 280801817b10SBing Zhao uint16_t cfg_pi = 0; 280901817b10SBing Zhao int peer_pi; 2810ce8d5614SIntel queueid_t qi; 2811ce8d5614SIntel struct rte_port *port; 28121c69df45SOri Kam struct rte_eth_hairpin_cap cap; 2813ce8d5614SIntel 28144468635fSMichael Qiu if (port_id_is_invalid(pid, ENABLED_WARN)) 28154468635fSMichael Qiu return 0; 28164468635fSMichael Qiu 28177d89b261SGaetan Rivet RTE_ETH_FOREACH_DEV(pi) { 2818edab33b1STetsuya Mukawa if (pid != pi && pid != (portid_t)RTE_PORT_ALL) 2819ce8d5614SIntel continue; 2820ce8d5614SIntel 2821d8c079a5SMin Hu (Connor) if (port_is_bonding_slave(pi)) { 2822d8c079a5SMin Hu (Connor) fprintf(stderr, 2823d8c079a5SMin Hu (Connor) "Please remove port %d from bonded device.\n", 2824d8c079a5SMin Hu (Connor) pi); 2825d8c079a5SMin Hu (Connor) continue; 2826d8c079a5SMin Hu (Connor) } 2827d8c079a5SMin Hu (Connor) 282892d2703eSMichael Qiu need_check_link_status = 0; 2829ce8d5614SIntel port = &ports[pi]; 2830eac341d3SJoyce Kong if (port->port_status == RTE_PORT_STOPPED) 2831eac341d3SJoyce Kong port->port_status = RTE_PORT_HANDLING; 2832eac341d3SJoyce Kong else { 283361a3b0e5SAndrew Rybchenko fprintf(stderr, "Port %d is now not stopped\n", pi); 2834ce8d5614SIntel continue; 2835ce8d5614SIntel } 2836ce8d5614SIntel 2837ce8d5614SIntel if (port->need_reconfig > 0) { 2838655eae01SJie Wang struct rte_eth_conf dev_conf; 2839655eae01SJie Wang int k; 2840655eae01SJie Wang 2841ce8d5614SIntel port->need_reconfig = 0; 2842ce8d5614SIntel 28437ee3e944SVasily Philipov if (flow_isolate_all) { 28447ee3e944SVasily Philipov int ret = port_flow_isolate(pi, 1); 28457ee3e944SVasily Philipov if (ret) { 284661a3b0e5SAndrew Rybchenko fprintf(stderr, 284761a3b0e5SAndrew Rybchenko "Failed to apply isolated mode on port %d\n", 284861a3b0e5SAndrew Rybchenko pi); 28497ee3e944SVasily Philipov return -1; 28507ee3e944SVasily Philipov } 28517ee3e944SVasily Philipov } 2852b5b38ed8SRaslan Darawsheh configure_rxtx_dump_callbacks(0); 28535706de65SJulien Cretin printf("Configuring Port %d (socket %u)\n", pi, 285420a0286fSLiu Xiaofeng port->socket_id); 28551c69df45SOri Kam if (nb_hairpinq > 0 && 28561c69df45SOri Kam rte_eth_dev_hairpin_capability_get(pi, &cap)) { 285761a3b0e5SAndrew Rybchenko fprintf(stderr, 285861a3b0e5SAndrew Rybchenko "Port %d doesn't support hairpin queues\n", 285961a3b0e5SAndrew Rybchenko pi); 28601c69df45SOri Kam return -1; 28611c69df45SOri Kam } 28621bb4a528SFerruh Yigit 2863ce8d5614SIntel /* configure port */ 2864a550baf2SMin Hu (Connor) diag = eth_dev_configure_mp(pi, nb_rxq + nb_hairpinq, 28651c69df45SOri Kam nb_txq + nb_hairpinq, 2866ce8d5614SIntel &(port->dev_conf)); 2867ce8d5614SIntel if (diag != 0) { 2868eac341d3SJoyce Kong if (port->port_status == RTE_PORT_HANDLING) 2869eac341d3SJoyce Kong port->port_status = RTE_PORT_STOPPED; 2870eac341d3SJoyce Kong else 287161a3b0e5SAndrew Rybchenko fprintf(stderr, 287261a3b0e5SAndrew Rybchenko "Port %d can not be set back to stopped\n", 287361a3b0e5SAndrew Rybchenko pi); 287461a3b0e5SAndrew Rybchenko fprintf(stderr, "Fail to configure port %d\n", 287561a3b0e5SAndrew Rybchenko pi); 2876ce8d5614SIntel /* try to reconfigure port next time */ 2877ce8d5614SIntel port->need_reconfig = 1; 2878148f963fSBruce Richardson return -1; 2879ce8d5614SIntel } 2880655eae01SJie Wang /* get device configuration*/ 2881655eae01SJie Wang if (0 != 2882655eae01SJie Wang eth_dev_conf_get_print_err(pi, &dev_conf)) { 2883655eae01SJie Wang fprintf(stderr, 2884655eae01SJie Wang "port %d can not get device configuration\n", 2885655eae01SJie Wang pi); 2886655eae01SJie Wang return -1; 2887655eae01SJie Wang } 2888655eae01SJie Wang /* Apply Rx offloads configuration */ 2889655eae01SJie Wang if (dev_conf.rxmode.offloads != 2890655eae01SJie Wang port->dev_conf.rxmode.offloads) { 2891655eae01SJie Wang port->dev_conf.rxmode.offloads |= 2892655eae01SJie Wang dev_conf.rxmode.offloads; 2893655eae01SJie Wang for (k = 0; 2894655eae01SJie Wang k < port->dev_info.max_rx_queues; 2895655eae01SJie Wang k++) 28963c4426dbSDmitry Kozlyuk port->rxq[k].conf.offloads |= 2897655eae01SJie Wang dev_conf.rxmode.offloads; 2898655eae01SJie Wang } 2899655eae01SJie Wang /* Apply Tx offloads configuration */ 2900655eae01SJie Wang if (dev_conf.txmode.offloads != 2901655eae01SJie Wang port->dev_conf.txmode.offloads) { 2902655eae01SJie Wang port->dev_conf.txmode.offloads |= 2903655eae01SJie Wang dev_conf.txmode.offloads; 2904655eae01SJie Wang for (k = 0; 2905655eae01SJie Wang k < port->dev_info.max_tx_queues; 2906655eae01SJie Wang k++) 29073c4426dbSDmitry Kozlyuk port->txq[k].conf.offloads |= 2908655eae01SJie Wang dev_conf.txmode.offloads; 2909655eae01SJie Wang } 2910ce8d5614SIntel } 2911a550baf2SMin Hu (Connor) if (port->need_reconfig_queues > 0 && is_proc_primary()) { 2912ce8d5614SIntel port->need_reconfig_queues = 0; 2913ce8d5614SIntel /* setup tx queues */ 2914ce8d5614SIntel for (qi = 0; qi < nb_txq; qi++) { 29153c4426dbSDmitry Kozlyuk struct rte_eth_txconf *conf = 29163c4426dbSDmitry Kozlyuk &port->txq[qi].conf; 29173c4426dbSDmitry Kozlyuk 2918b6ea6408SIntel if ((numa_support) && 2919b6ea6408SIntel (txring_numa[pi] != NUMA_NO_CONFIG)) 2920b6ea6408SIntel diag = rte_eth_tx_queue_setup(pi, qi, 2921d44f8a48SQi Zhang port->nb_tx_desc[qi], 2922d44f8a48SQi Zhang txring_numa[pi], 29233c4426dbSDmitry Kozlyuk &(port->txq[qi].conf)); 2924b6ea6408SIntel else 2925b6ea6408SIntel diag = rte_eth_tx_queue_setup(pi, qi, 2926d44f8a48SQi Zhang port->nb_tx_desc[qi], 2927d44f8a48SQi Zhang port->socket_id, 29283c4426dbSDmitry Kozlyuk &(port->txq[qi].conf)); 2929b6ea6408SIntel 29303c4426dbSDmitry Kozlyuk if (diag == 0) { 29313c4426dbSDmitry Kozlyuk port->txq[qi].state = 29323c4426dbSDmitry Kozlyuk conf->tx_deferred_start ? 29333c4426dbSDmitry Kozlyuk RTE_ETH_QUEUE_STATE_STOPPED : 29343c4426dbSDmitry Kozlyuk RTE_ETH_QUEUE_STATE_STARTED; 2935ce8d5614SIntel continue; 29363c4426dbSDmitry Kozlyuk } 2937ce8d5614SIntel 2938ce8d5614SIntel /* Fail to setup tx queue, return */ 2939eac341d3SJoyce Kong if (port->port_status == RTE_PORT_HANDLING) 2940eac341d3SJoyce Kong port->port_status = RTE_PORT_STOPPED; 2941eac341d3SJoyce Kong else 294261a3b0e5SAndrew Rybchenko fprintf(stderr, 294361a3b0e5SAndrew Rybchenko "Port %d can not be set back to stopped\n", 294461a3b0e5SAndrew Rybchenko pi); 294561a3b0e5SAndrew Rybchenko fprintf(stderr, 294661a3b0e5SAndrew Rybchenko "Fail to configure port %d tx queues\n", 2947d44f8a48SQi Zhang pi); 2948ce8d5614SIntel /* try to reconfigure queues next time */ 2949ce8d5614SIntel port->need_reconfig_queues = 1; 2950148f963fSBruce Richardson return -1; 2951ce8d5614SIntel } 2952ce8d5614SIntel for (qi = 0; qi < nb_rxq; qi++) { 2953d44f8a48SQi Zhang /* setup rx queues */ 2954b6ea6408SIntel if ((numa_support) && 2955b6ea6408SIntel (rxring_numa[pi] != NUMA_NO_CONFIG)) { 2956b6ea6408SIntel struct rte_mempool * mp = 295726cbb419SViacheslav Ovsiienko mbuf_pool_find 295826cbb419SViacheslav Ovsiienko (rxring_numa[pi], 0); 2959b6ea6408SIntel if (mp == NULL) { 296061a3b0e5SAndrew Rybchenko fprintf(stderr, 296161a3b0e5SAndrew Rybchenko "Failed to setup RX queue: No mempool allocation on the socket %d\n", 2962b6ea6408SIntel rxring_numa[pi]); 2963148f963fSBruce Richardson return -1; 2964b6ea6408SIntel } 2965b6ea6408SIntel 29662befc67fSViacheslav Ovsiienko diag = rx_queue_setup(pi, qi, 2967d4930794SFerruh Yigit port->nb_rx_desc[qi], 2968d44f8a48SQi Zhang rxring_numa[pi], 29693c4426dbSDmitry Kozlyuk &(port->rxq[qi].conf), 2970d44f8a48SQi Zhang mp); 29711e1d6bddSBernard Iremonger } else { 29721e1d6bddSBernard Iremonger struct rte_mempool *mp = 297326cbb419SViacheslav Ovsiienko mbuf_pool_find 297426cbb419SViacheslav Ovsiienko (port->socket_id, 0); 29751e1d6bddSBernard Iremonger if (mp == NULL) { 297661a3b0e5SAndrew Rybchenko fprintf(stderr, 297761a3b0e5SAndrew Rybchenko "Failed to setup RX queue: No mempool allocation on the socket %d\n", 29781e1d6bddSBernard Iremonger port->socket_id); 29791e1d6bddSBernard Iremonger return -1; 2980b6ea6408SIntel } 29812befc67fSViacheslav Ovsiienko diag = rx_queue_setup(pi, qi, 2982d4930794SFerruh Yigit port->nb_rx_desc[qi], 2983d44f8a48SQi Zhang port->socket_id, 29843c4426dbSDmitry Kozlyuk &(port->rxq[qi].conf), 2985d44f8a48SQi Zhang mp); 29861e1d6bddSBernard Iremonger } 2987ce8d5614SIntel if (diag == 0) 2988ce8d5614SIntel continue; 2989ce8d5614SIntel 2990ce8d5614SIntel /* Fail to setup rx queue, return */ 2991eac341d3SJoyce Kong if (port->port_status == RTE_PORT_HANDLING) 2992eac341d3SJoyce Kong port->port_status = RTE_PORT_STOPPED; 2993eac341d3SJoyce Kong else 299461a3b0e5SAndrew Rybchenko fprintf(stderr, 299561a3b0e5SAndrew Rybchenko "Port %d can not be set back to stopped\n", 299661a3b0e5SAndrew Rybchenko pi); 299761a3b0e5SAndrew Rybchenko fprintf(stderr, 299861a3b0e5SAndrew Rybchenko "Fail to configure port %d rx queues\n", 2999d44f8a48SQi Zhang pi); 3000ce8d5614SIntel /* try to reconfigure queues next time */ 3001ce8d5614SIntel port->need_reconfig_queues = 1; 3002148f963fSBruce Richardson return -1; 3003ce8d5614SIntel } 30041c69df45SOri Kam /* setup hairpin queues */ 300501817b10SBing Zhao if (setup_hairpin_queues(pi, p_pi, cnt_pi) != 0) 30061c69df45SOri Kam return -1; 3007ce8d5614SIntel } 3008b5b38ed8SRaslan Darawsheh configure_rxtx_dump_callbacks(verbose_level); 3009b0a9354aSPavan Nikhilesh if (clear_ptypes) { 3010b0a9354aSPavan Nikhilesh diag = rte_eth_dev_set_ptypes(pi, RTE_PTYPE_UNKNOWN, 3011b0a9354aSPavan Nikhilesh NULL, 0); 3012b0a9354aSPavan Nikhilesh if (diag < 0) 301361a3b0e5SAndrew Rybchenko fprintf(stderr, 3014b0a9354aSPavan Nikhilesh "Port %d: Failed to disable Ptype parsing\n", 3015b0a9354aSPavan Nikhilesh pi); 3016b0a9354aSPavan Nikhilesh } 3017b0a9354aSPavan Nikhilesh 301801817b10SBing Zhao p_pi = pi; 301901817b10SBing Zhao cnt_pi++; 302001817b10SBing Zhao 3021ce8d5614SIntel /* start port */ 3022a550baf2SMin Hu (Connor) diag = eth_dev_start_mp(pi); 302352f2c6f2SAndrew Rybchenko if (diag < 0) { 302461a3b0e5SAndrew Rybchenko fprintf(stderr, "Fail to start port %d: %s\n", 302561a3b0e5SAndrew Rybchenko pi, rte_strerror(-diag)); 3026ce8d5614SIntel 3027ce8d5614SIntel /* Fail to setup rx queue, return */ 3028eac341d3SJoyce Kong if (port->port_status == RTE_PORT_HANDLING) 3029eac341d3SJoyce Kong port->port_status = RTE_PORT_STOPPED; 3030eac341d3SJoyce Kong else 303161a3b0e5SAndrew Rybchenko fprintf(stderr, 303261a3b0e5SAndrew Rybchenko "Port %d can not be set back to stopped\n", 303361a3b0e5SAndrew Rybchenko pi); 3034ce8d5614SIntel continue; 3035ce8d5614SIntel } 3036ce8d5614SIntel 3037eac341d3SJoyce Kong if (port->port_status == RTE_PORT_HANDLING) 3038eac341d3SJoyce Kong port->port_status = RTE_PORT_STARTED; 3039eac341d3SJoyce Kong else 304061a3b0e5SAndrew Rybchenko fprintf(stderr, "Port %d can not be set into started\n", 304161a3b0e5SAndrew Rybchenko pi); 3042ce8d5614SIntel 30435ffc4a2aSYuying Zhang if (eth_macaddr_get_print_err(pi, &port->eth_addr) == 0) 3044c2c4f87bSAman Deep Singh printf("Port %d: " RTE_ETHER_ADDR_PRT_FMT "\n", pi, 3045a7db3afcSAman Deep Singh RTE_ETHER_ADDR_BYTES(&port->eth_addr)); 3046d8c89163SZijie Pan 3047ce8d5614SIntel /* at least one port started, need checking link status */ 3048ce8d5614SIntel need_check_link_status = 1; 304901817b10SBing Zhao 305001817b10SBing Zhao pl[cfg_pi++] = pi; 3051ce8d5614SIntel } 3052ce8d5614SIntel 305392d2703eSMichael Qiu if (need_check_link_status == 1 && !no_link_check) 3054edab33b1STetsuya Mukawa check_all_ports_link_status(RTE_PORT_ALL); 305592d2703eSMichael Qiu else if (need_check_link_status == 0) 305661a3b0e5SAndrew Rybchenko fprintf(stderr, "Please stop the ports first\n"); 3057ce8d5614SIntel 305801817b10SBing Zhao if (hairpin_mode & 0xf) { 305901817b10SBing Zhao uint16_t i; 306001817b10SBing Zhao int j; 306101817b10SBing Zhao 306201817b10SBing Zhao /* bind all started hairpin ports */ 306301817b10SBing Zhao for (i = 0; i < cfg_pi; i++) { 306401817b10SBing Zhao pi = pl[i]; 306501817b10SBing Zhao /* bind current Tx to all peer Rx */ 306601817b10SBing Zhao peer_pi = rte_eth_hairpin_get_peer_ports(pi, peer_pl, 306701817b10SBing Zhao RTE_MAX_ETHPORTS, 1); 306801817b10SBing Zhao if (peer_pi < 0) 306901817b10SBing Zhao return peer_pi; 307001817b10SBing Zhao for (j = 0; j < peer_pi; j++) { 307101817b10SBing Zhao if (!port_is_started(peer_pl[j])) 307201817b10SBing Zhao continue; 307301817b10SBing Zhao diag = rte_eth_hairpin_bind(pi, peer_pl[j]); 307401817b10SBing Zhao if (diag < 0) { 307561a3b0e5SAndrew Rybchenko fprintf(stderr, 307661a3b0e5SAndrew Rybchenko "Error during binding hairpin Tx port %u to %u: %s\n", 307701817b10SBing Zhao pi, peer_pl[j], 307801817b10SBing Zhao rte_strerror(-diag)); 307901817b10SBing Zhao return -1; 308001817b10SBing Zhao } 308101817b10SBing Zhao } 308201817b10SBing Zhao /* bind all peer Tx to current Rx */ 308301817b10SBing Zhao peer_pi = rte_eth_hairpin_get_peer_ports(pi, peer_pl, 308401817b10SBing Zhao RTE_MAX_ETHPORTS, 0); 308501817b10SBing Zhao if (peer_pi < 0) 308601817b10SBing Zhao return peer_pi; 308701817b10SBing Zhao for (j = 0; j < peer_pi; j++) { 308801817b10SBing Zhao if (!port_is_started(peer_pl[j])) 308901817b10SBing Zhao continue; 309001817b10SBing Zhao diag = rte_eth_hairpin_bind(peer_pl[j], pi); 309101817b10SBing Zhao if (diag < 0) { 309261a3b0e5SAndrew Rybchenko fprintf(stderr, 309361a3b0e5SAndrew Rybchenko "Error during binding hairpin Tx port %u to %u: %s\n", 309401817b10SBing Zhao peer_pl[j], pi, 309501817b10SBing Zhao rte_strerror(-diag)); 309601817b10SBing Zhao return -1; 309701817b10SBing Zhao } 309801817b10SBing Zhao } 309901817b10SBing Zhao } 310001817b10SBing Zhao } 310101817b10SBing Zhao 310263b72657SIvan Ilchenko fill_xstats_display_info_for_port(pid); 310363b72657SIvan Ilchenko 3104ce8d5614SIntel printf("Done\n"); 3105148f963fSBruce Richardson return 0; 3106ce8d5614SIntel } 3107ce8d5614SIntel 3108ce8d5614SIntel void 3109ce8d5614SIntel stop_port(portid_t pid) 3110ce8d5614SIntel { 3111ce8d5614SIntel portid_t pi; 3112ce8d5614SIntel struct rte_port *port; 3113ce8d5614SIntel int need_check_link_status = 0; 311401817b10SBing Zhao portid_t peer_pl[RTE_MAX_ETHPORTS]; 311501817b10SBing Zhao int peer_pi; 3116ce8d5614SIntel 31174468635fSMichael Qiu if (port_id_is_invalid(pid, ENABLED_WARN)) 31184468635fSMichael Qiu return; 31194468635fSMichael Qiu 3120ce8d5614SIntel printf("Stopping ports...\n"); 3121ce8d5614SIntel 31227d89b261SGaetan Rivet RTE_ETH_FOREACH_DEV(pi) { 31234468635fSMichael Qiu if (pid != pi && pid != (portid_t)RTE_PORT_ALL) 3124ce8d5614SIntel continue; 3125ce8d5614SIntel 3126a8ef3e3aSBernard Iremonger if (port_is_forwarding(pi) != 0 && test_done == 0) { 312761a3b0e5SAndrew Rybchenko fprintf(stderr, 312861a3b0e5SAndrew Rybchenko "Please remove port %d from forwarding configuration.\n", 312961a3b0e5SAndrew Rybchenko pi); 3130a8ef3e3aSBernard Iremonger continue; 3131a8ef3e3aSBernard Iremonger } 3132a8ef3e3aSBernard Iremonger 31330e545d30SBernard Iremonger if (port_is_bonding_slave(pi)) { 313461a3b0e5SAndrew Rybchenko fprintf(stderr, 313561a3b0e5SAndrew Rybchenko "Please remove port %d from bonded device.\n", 313661a3b0e5SAndrew Rybchenko pi); 31370e545d30SBernard Iremonger continue; 31380e545d30SBernard Iremonger } 31390e545d30SBernard Iremonger 3140ce8d5614SIntel port = &ports[pi]; 3141eac341d3SJoyce Kong if (port->port_status == RTE_PORT_STARTED) 3142eac341d3SJoyce Kong port->port_status = RTE_PORT_HANDLING; 3143eac341d3SJoyce Kong else 3144ce8d5614SIntel continue; 3145ce8d5614SIntel 314601817b10SBing Zhao if (hairpin_mode & 0xf) { 314701817b10SBing Zhao int j; 314801817b10SBing Zhao 314901817b10SBing Zhao rte_eth_hairpin_unbind(pi, RTE_MAX_ETHPORTS); 315001817b10SBing Zhao /* unbind all peer Tx from current Rx */ 315101817b10SBing Zhao peer_pi = rte_eth_hairpin_get_peer_ports(pi, peer_pl, 315201817b10SBing Zhao RTE_MAX_ETHPORTS, 0); 315301817b10SBing Zhao if (peer_pi < 0) 315401817b10SBing Zhao continue; 315501817b10SBing Zhao for (j = 0; j < peer_pi; j++) { 315601817b10SBing Zhao if (!port_is_started(peer_pl[j])) 315701817b10SBing Zhao continue; 315801817b10SBing Zhao rte_eth_hairpin_unbind(peer_pl[j], pi); 315901817b10SBing Zhao } 316001817b10SBing Zhao } 316101817b10SBing Zhao 31620f93edbfSGregory Etelson if (port->flow_list) 31630f93edbfSGregory Etelson port_flow_flush(pi); 31640f93edbfSGregory Etelson 3165a550baf2SMin Hu (Connor) if (eth_dev_stop_mp(pi) != 0) 3166e62c5a12SIvan Ilchenko RTE_LOG(ERR, EAL, "rte_eth_dev_stop failed for port %u\n", 3167e62c5a12SIvan Ilchenko pi); 3168ce8d5614SIntel 3169eac341d3SJoyce Kong if (port->port_status == RTE_PORT_HANDLING) 3170eac341d3SJoyce Kong port->port_status = RTE_PORT_STOPPED; 3171eac341d3SJoyce Kong else 317261a3b0e5SAndrew Rybchenko fprintf(stderr, "Port %d can not be set into stopped\n", 317361a3b0e5SAndrew Rybchenko pi); 3174ce8d5614SIntel need_check_link_status = 1; 3175ce8d5614SIntel } 3176bc202406SDavid Marchand if (need_check_link_status && !no_link_check) 3177edab33b1STetsuya Mukawa check_all_ports_link_status(RTE_PORT_ALL); 3178ce8d5614SIntel 3179ce8d5614SIntel printf("Done\n"); 3180ce8d5614SIntel } 3181ce8d5614SIntel 3182ce6959bfSWisam Jaddo static void 31834f1de450SThomas Monjalon remove_invalid_ports_in(portid_t *array, portid_t *total) 3184ce6959bfSWisam Jaddo { 31854f1de450SThomas Monjalon portid_t i; 31864f1de450SThomas Monjalon portid_t new_total = 0; 3187ce6959bfSWisam Jaddo 31884f1de450SThomas Monjalon for (i = 0; i < *total; i++) 31894f1de450SThomas Monjalon if (!port_id_is_invalid(array[i], DISABLED_WARN)) { 31904f1de450SThomas Monjalon array[new_total] = array[i]; 31914f1de450SThomas Monjalon new_total++; 3192ce6959bfSWisam Jaddo } 31934f1de450SThomas Monjalon *total = new_total; 31944f1de450SThomas Monjalon } 31954f1de450SThomas Monjalon 31964f1de450SThomas Monjalon static void 31974f1de450SThomas Monjalon remove_invalid_ports(void) 31984f1de450SThomas Monjalon { 31994f1de450SThomas Monjalon remove_invalid_ports_in(ports_ids, &nb_ports); 32004f1de450SThomas Monjalon remove_invalid_ports_in(fwd_ports_ids, &nb_fwd_ports); 32014f1de450SThomas Monjalon nb_cfg_ports = nb_fwd_ports; 3202ce6959bfSWisam Jaddo } 3203ce6959bfSWisam Jaddo 3204ce8d5614SIntel void 3205ce8d5614SIntel close_port(portid_t pid) 3206ce8d5614SIntel { 3207ce8d5614SIntel portid_t pi; 3208ce8d5614SIntel struct rte_port *port; 3209ce8d5614SIntel 32104468635fSMichael Qiu if (port_id_is_invalid(pid, ENABLED_WARN)) 32114468635fSMichael Qiu return; 32124468635fSMichael Qiu 3213ce8d5614SIntel printf("Closing ports...\n"); 3214ce8d5614SIntel 32157d89b261SGaetan Rivet RTE_ETH_FOREACH_DEV(pi) { 32164468635fSMichael Qiu if (pid != pi && pid != (portid_t)RTE_PORT_ALL) 3217ce8d5614SIntel continue; 3218ce8d5614SIntel 3219a8ef3e3aSBernard Iremonger if (port_is_forwarding(pi) != 0 && test_done == 0) { 322061a3b0e5SAndrew Rybchenko fprintf(stderr, 322161a3b0e5SAndrew Rybchenko "Please remove port %d from forwarding configuration.\n", 322261a3b0e5SAndrew Rybchenko pi); 3223a8ef3e3aSBernard Iremonger continue; 3224a8ef3e3aSBernard Iremonger } 3225a8ef3e3aSBernard Iremonger 32260e545d30SBernard Iremonger if (port_is_bonding_slave(pi)) { 322761a3b0e5SAndrew Rybchenko fprintf(stderr, 322861a3b0e5SAndrew Rybchenko "Please remove port %d from bonded device.\n", 322961a3b0e5SAndrew Rybchenko pi); 32300e545d30SBernard Iremonger continue; 32310e545d30SBernard Iremonger } 32320e545d30SBernard Iremonger 3233ce8d5614SIntel port = &ports[pi]; 3234eac341d3SJoyce Kong if (port->port_status == RTE_PORT_CLOSED) { 323561a3b0e5SAndrew Rybchenko fprintf(stderr, "Port %d is already closed\n", pi); 3236d4e8ad64SMichael Qiu continue; 3237d4e8ad64SMichael Qiu } 3238d4e8ad64SMichael Qiu 3239a550baf2SMin Hu (Connor) if (is_proc_primary()) { 3240938a184aSAdrien Mazarguil port_flow_flush(pi); 324159f3a8acSGregory Etelson port_flex_item_flush(pi); 3242f7352c17SDmitry Kozlyuk port_action_handle_flush(pi); 3243ce8d5614SIntel rte_eth_dev_close(pi); 3244ce8d5614SIntel } 324563b72657SIvan Ilchenko 324663b72657SIvan Ilchenko free_xstats_display_info(pi); 3247a550baf2SMin Hu (Connor) } 3248ce8d5614SIntel 324985c6571cSThomas Monjalon remove_invalid_ports(); 3250ce8d5614SIntel printf("Done\n"); 3251ce8d5614SIntel } 3252ce8d5614SIntel 3253edab33b1STetsuya Mukawa void 325497f1e196SWei Dai reset_port(portid_t pid) 325597f1e196SWei Dai { 325697f1e196SWei Dai int diag; 325797f1e196SWei Dai portid_t pi; 325897f1e196SWei Dai struct rte_port *port; 325997f1e196SWei Dai 326097f1e196SWei Dai if (port_id_is_invalid(pid, ENABLED_WARN)) 326197f1e196SWei Dai return; 326297f1e196SWei Dai 32631cde1b9aSShougang Wang if ((pid == (portid_t)RTE_PORT_ALL && !all_ports_stopped()) || 32641cde1b9aSShougang Wang (pid != (portid_t)RTE_PORT_ALL && !port_is_stopped(pid))) { 326561a3b0e5SAndrew Rybchenko fprintf(stderr, 326661a3b0e5SAndrew Rybchenko "Can not reset port(s), please stop port(s) first.\n"); 32671cde1b9aSShougang Wang return; 32681cde1b9aSShougang Wang } 32691cde1b9aSShougang Wang 327097f1e196SWei Dai printf("Resetting ports...\n"); 327197f1e196SWei Dai 327297f1e196SWei Dai RTE_ETH_FOREACH_DEV(pi) { 327397f1e196SWei Dai if (pid != pi && pid != (portid_t)RTE_PORT_ALL) 327497f1e196SWei Dai continue; 327597f1e196SWei Dai 327697f1e196SWei Dai if (port_is_forwarding(pi) != 0 && test_done == 0) { 327761a3b0e5SAndrew Rybchenko fprintf(stderr, 327861a3b0e5SAndrew Rybchenko "Please remove port %d from forwarding configuration.\n", 327961a3b0e5SAndrew Rybchenko pi); 328097f1e196SWei Dai continue; 328197f1e196SWei Dai } 328297f1e196SWei Dai 328397f1e196SWei Dai if (port_is_bonding_slave(pi)) { 328461a3b0e5SAndrew Rybchenko fprintf(stderr, 328561a3b0e5SAndrew Rybchenko "Please remove port %d from bonded device.\n", 328697f1e196SWei Dai pi); 328797f1e196SWei Dai continue; 328897f1e196SWei Dai } 328997f1e196SWei Dai 329097f1e196SWei Dai diag = rte_eth_dev_reset(pi); 329197f1e196SWei Dai if (diag == 0) { 329297f1e196SWei Dai port = &ports[pi]; 329397f1e196SWei Dai port->need_reconfig = 1; 329497f1e196SWei Dai port->need_reconfig_queues = 1; 329597f1e196SWei Dai } else { 329661a3b0e5SAndrew Rybchenko fprintf(stderr, "Failed to reset port %d. diag=%d\n", 329761a3b0e5SAndrew Rybchenko pi, diag); 329897f1e196SWei Dai } 329997f1e196SWei Dai } 330097f1e196SWei Dai 330197f1e196SWei Dai printf("Done\n"); 330297f1e196SWei Dai } 330397f1e196SWei Dai 330497f1e196SWei Dai void 3305edab33b1STetsuya Mukawa attach_port(char *identifier) 3306ce8d5614SIntel { 33074f1ed78eSThomas Monjalon portid_t pi; 3308c9cce428SThomas Monjalon struct rte_dev_iterator iterator; 3309ce8d5614SIntel 3310edab33b1STetsuya Mukawa printf("Attaching a new port...\n"); 3311edab33b1STetsuya Mukawa 3312edab33b1STetsuya Mukawa if (identifier == NULL) { 331361a3b0e5SAndrew Rybchenko fprintf(stderr, "Invalid parameters are specified\n"); 3314edab33b1STetsuya Mukawa return; 3315ce8d5614SIntel } 3316ce8d5614SIntel 331775b66decSIlya Maximets if (rte_dev_probe(identifier) < 0) { 3318c9cce428SThomas Monjalon TESTPMD_LOG(ERR, "Failed to attach port %s\n", identifier); 3319edab33b1STetsuya Mukawa return; 3320c9cce428SThomas Monjalon } 3321c9cce428SThomas Monjalon 33224f1ed78eSThomas Monjalon /* first attach mode: event */ 33234f1ed78eSThomas Monjalon if (setup_on_probe_event) { 33244f1ed78eSThomas Monjalon /* new ports are detected on RTE_ETH_EVENT_NEW event */ 33254f1ed78eSThomas Monjalon for (pi = 0; pi < RTE_MAX_ETHPORTS; pi++) 33264f1ed78eSThomas Monjalon if (ports[pi].port_status == RTE_PORT_HANDLING && 33274f1ed78eSThomas Monjalon ports[pi].need_setup != 0) 33284f1ed78eSThomas Monjalon setup_attached_port(pi); 33294f1ed78eSThomas Monjalon return; 33304f1ed78eSThomas Monjalon } 33314f1ed78eSThomas Monjalon 33324f1ed78eSThomas Monjalon /* second attach mode: iterator */ 333386fa5de1SThomas Monjalon RTE_ETH_FOREACH_MATCHING_DEV(pi, identifier, &iterator) { 33344f1ed78eSThomas Monjalon /* setup ports matching the devargs used for probing */ 333586fa5de1SThomas Monjalon if (port_is_forwarding(pi)) 333686fa5de1SThomas Monjalon continue; /* port was already attached before */ 3337c9cce428SThomas Monjalon setup_attached_port(pi); 3338c9cce428SThomas Monjalon } 333986fa5de1SThomas Monjalon } 3340c9cce428SThomas Monjalon 3341c9cce428SThomas Monjalon static void 3342c9cce428SThomas Monjalon setup_attached_port(portid_t pi) 3343c9cce428SThomas Monjalon { 3344c9cce428SThomas Monjalon unsigned int socket_id; 334534fc1051SIvan Ilchenko int ret; 3346edab33b1STetsuya Mukawa 3347931126baSBernard Iremonger socket_id = (unsigned)rte_eth_dev_socket_id(pi); 334829841336SPhil Yang /* if socket_id is invalid, set to the first available socket. */ 3349931126baSBernard Iremonger if (check_socket_id(socket_id) < 0) 335029841336SPhil Yang socket_id = socket_ids[0]; 3351931126baSBernard Iremonger reconfig(pi, socket_id); 335234fc1051SIvan Ilchenko ret = rte_eth_promiscuous_enable(pi); 335334fc1051SIvan Ilchenko if (ret != 0) 335461a3b0e5SAndrew Rybchenko fprintf(stderr, 335561a3b0e5SAndrew Rybchenko "Error during enabling promiscuous mode for port %u: %s - ignore\n", 335634fc1051SIvan Ilchenko pi, rte_strerror(-ret)); 3357edab33b1STetsuya Mukawa 33584f1de450SThomas Monjalon ports_ids[nb_ports++] = pi; 33594f1de450SThomas Monjalon fwd_ports_ids[nb_fwd_ports++] = pi; 33604f1de450SThomas Monjalon nb_cfg_ports = nb_fwd_ports; 33614f1ed78eSThomas Monjalon ports[pi].need_setup = 0; 3362edab33b1STetsuya Mukawa ports[pi].port_status = RTE_PORT_STOPPED; 3363edab33b1STetsuya Mukawa 3364edab33b1STetsuya Mukawa printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports); 3365edab33b1STetsuya Mukawa printf("Done\n"); 3366edab33b1STetsuya Mukawa } 3367edab33b1STetsuya Mukawa 33680654d4a8SThomas Monjalon static void 33690654d4a8SThomas Monjalon detach_device(struct rte_device *dev) 33705f4ec54fSChen Jing D(Mark) { 3371f8e5baa2SThomas Monjalon portid_t sibling; 3372f8e5baa2SThomas Monjalon 3373f8e5baa2SThomas Monjalon if (dev == NULL) { 337461a3b0e5SAndrew Rybchenko fprintf(stderr, "Device already removed\n"); 3375f8e5baa2SThomas Monjalon return; 3376f8e5baa2SThomas Monjalon } 3377f8e5baa2SThomas Monjalon 33780654d4a8SThomas Monjalon printf("Removing a device...\n"); 3379938a184aSAdrien Mazarguil 33802a449871SThomas Monjalon RTE_ETH_FOREACH_DEV_OF(sibling, dev) { 33812a449871SThomas Monjalon if (ports[sibling].port_status != RTE_PORT_CLOSED) { 33822a449871SThomas Monjalon if (ports[sibling].port_status != RTE_PORT_STOPPED) { 338361a3b0e5SAndrew Rybchenko fprintf(stderr, "Port %u not stopped\n", 338461a3b0e5SAndrew Rybchenko sibling); 33852a449871SThomas Monjalon return; 33862a449871SThomas Monjalon } 33872a449871SThomas Monjalon port_flow_flush(sibling); 33882a449871SThomas Monjalon } 33892a449871SThomas Monjalon } 33902a449871SThomas Monjalon 339175b66decSIlya Maximets if (rte_dev_remove(dev) < 0) { 3392f8e5baa2SThomas Monjalon TESTPMD_LOG(ERR, "Failed to detach device %s\n", dev->name); 3393edab33b1STetsuya Mukawa return; 33943070419eSGaetan Rivet } 33954f1de450SThomas Monjalon remove_invalid_ports(); 339603ce2c53SMatan Azrad 33970654d4a8SThomas Monjalon printf("Device is detached\n"); 3398f8e5baa2SThomas Monjalon printf("Now total ports is %d\n", nb_ports); 3399edab33b1STetsuya Mukawa printf("Done\n"); 3400edab33b1STetsuya Mukawa return; 34015f4ec54fSChen Jing D(Mark) } 34025f4ec54fSChen Jing D(Mark) 3403af75078fSIntel void 34040654d4a8SThomas Monjalon detach_port_device(portid_t port_id) 34050654d4a8SThomas Monjalon { 34060a0821bcSPaulis Gributs int ret; 34070a0821bcSPaulis Gributs struct rte_eth_dev_info dev_info; 34080a0821bcSPaulis Gributs 34090654d4a8SThomas Monjalon if (port_id_is_invalid(port_id, ENABLED_WARN)) 34100654d4a8SThomas Monjalon return; 34110654d4a8SThomas Monjalon 34120654d4a8SThomas Monjalon if (ports[port_id].port_status != RTE_PORT_CLOSED) { 34130654d4a8SThomas Monjalon if (ports[port_id].port_status != RTE_PORT_STOPPED) { 341461a3b0e5SAndrew Rybchenko fprintf(stderr, "Port not stopped\n"); 34150654d4a8SThomas Monjalon return; 34160654d4a8SThomas Monjalon } 341761a3b0e5SAndrew Rybchenko fprintf(stderr, "Port was not closed\n"); 34180654d4a8SThomas Monjalon } 34190654d4a8SThomas Monjalon 34200a0821bcSPaulis Gributs ret = eth_dev_info_get_print_err(port_id, &dev_info); 34210a0821bcSPaulis Gributs if (ret != 0) { 34220a0821bcSPaulis Gributs TESTPMD_LOG(ERR, 34230a0821bcSPaulis Gributs "Failed to get device info for port %d, not detaching\n", 34240a0821bcSPaulis Gributs port_id); 34250a0821bcSPaulis Gributs return; 34260a0821bcSPaulis Gributs } 34270a0821bcSPaulis Gributs detach_device(dev_info.device); 34280654d4a8SThomas Monjalon } 34290654d4a8SThomas Monjalon 34300654d4a8SThomas Monjalon void 34315edee5f6SThomas Monjalon detach_devargs(char *identifier) 343255e51c96SNithin Dabilpuram { 343355e51c96SNithin Dabilpuram struct rte_dev_iterator iterator; 343455e51c96SNithin Dabilpuram struct rte_devargs da; 343555e51c96SNithin Dabilpuram portid_t port_id; 343655e51c96SNithin Dabilpuram 343755e51c96SNithin Dabilpuram printf("Removing a device...\n"); 343855e51c96SNithin Dabilpuram 343955e51c96SNithin Dabilpuram memset(&da, 0, sizeof(da)); 344055e51c96SNithin Dabilpuram if (rte_devargs_parsef(&da, "%s", identifier)) { 344161a3b0e5SAndrew Rybchenko fprintf(stderr, "cannot parse identifier\n"); 344255e51c96SNithin Dabilpuram return; 344355e51c96SNithin Dabilpuram } 344455e51c96SNithin Dabilpuram 344555e51c96SNithin Dabilpuram RTE_ETH_FOREACH_MATCHING_DEV(port_id, identifier, &iterator) { 344655e51c96SNithin Dabilpuram if (ports[port_id].port_status != RTE_PORT_CLOSED) { 344755e51c96SNithin Dabilpuram if (ports[port_id].port_status != RTE_PORT_STOPPED) { 344861a3b0e5SAndrew Rybchenko fprintf(stderr, "Port %u not stopped\n", 344961a3b0e5SAndrew Rybchenko port_id); 3450149677c9SStephen Hemminger rte_eth_iterator_cleanup(&iterator); 345164051bb1SXueming Li rte_devargs_reset(&da); 345255e51c96SNithin Dabilpuram return; 345355e51c96SNithin Dabilpuram } 345455e51c96SNithin Dabilpuram port_flow_flush(port_id); 345555e51c96SNithin Dabilpuram } 345655e51c96SNithin Dabilpuram } 345755e51c96SNithin Dabilpuram 345855e51c96SNithin Dabilpuram if (rte_eal_hotplug_remove(da.bus->name, da.name) != 0) { 345955e51c96SNithin Dabilpuram TESTPMD_LOG(ERR, "Failed to detach device %s(%s)\n", 346055e51c96SNithin Dabilpuram da.name, da.bus->name); 346164051bb1SXueming Li rte_devargs_reset(&da); 346255e51c96SNithin Dabilpuram return; 346355e51c96SNithin Dabilpuram } 346455e51c96SNithin Dabilpuram 346555e51c96SNithin Dabilpuram remove_invalid_ports(); 346655e51c96SNithin Dabilpuram 346755e51c96SNithin Dabilpuram printf("Device %s is detached\n", identifier); 346855e51c96SNithin Dabilpuram printf("Now total ports is %d\n", nb_ports); 346955e51c96SNithin Dabilpuram printf("Done\n"); 347064051bb1SXueming Li rte_devargs_reset(&da); 347155e51c96SNithin Dabilpuram } 347255e51c96SNithin Dabilpuram 347355e51c96SNithin Dabilpuram void 3474af75078fSIntel pmd_test_exit(void) 3475af75078fSIntel { 3476af75078fSIntel portid_t pt_id; 347726cbb419SViacheslav Ovsiienko unsigned int i; 3478fb73e096SJeff Guo int ret; 3479af75078fSIntel 34808210ec25SPablo de Lara if (test_done == 0) 34818210ec25SPablo de Lara stop_packet_forwarding(); 34828210ec25SPablo de Lara 3483761f7ae1SJie Zhou #ifndef RTE_EXEC_ENV_WINDOWS 348426cbb419SViacheslav Ovsiienko for (i = 0 ; i < RTE_DIM(mempools) ; i++) { 34853a0968c8SShahaf Shuler if (mempools[i]) { 34863a0968c8SShahaf Shuler if (mp_alloc_type == MP_ALLOC_ANON) 34873a0968c8SShahaf Shuler rte_mempool_mem_iter(mempools[i], dma_unmap_cb, 34883a0968c8SShahaf Shuler NULL); 34893a0968c8SShahaf Shuler } 34903a0968c8SShahaf Shuler } 3491761f7ae1SJie Zhou #endif 3492d3a274ceSZhihong Wang if (ports != NULL) { 3493d3a274ceSZhihong Wang no_link_check = 1; 34947d89b261SGaetan Rivet RTE_ETH_FOREACH_DEV(pt_id) { 349508fd782bSCristian Dumitrescu printf("\nStopping port %d...\n", pt_id); 3496af75078fSIntel fflush(stdout); 3497d3a274ceSZhihong Wang stop_port(pt_id); 349808fd782bSCristian Dumitrescu } 349908fd782bSCristian Dumitrescu RTE_ETH_FOREACH_DEV(pt_id) { 350008fd782bSCristian Dumitrescu printf("\nShutting down port %d...\n", pt_id); 350108fd782bSCristian Dumitrescu fflush(stdout); 3502d3a274ceSZhihong Wang close_port(pt_id); 3503af75078fSIntel } 3504d3a274ceSZhihong Wang } 3505fb73e096SJeff Guo 3506fb73e096SJeff Guo if (hot_plug) { 3507fb73e096SJeff Guo ret = rte_dev_event_monitor_stop(); 35082049c511SJeff Guo if (ret) { 3509fb73e096SJeff Guo RTE_LOG(ERR, EAL, 3510fb73e096SJeff Guo "fail to stop device event monitor."); 35112049c511SJeff Guo return; 35122049c511SJeff Guo } 3513fb73e096SJeff Guo 35142049c511SJeff Guo ret = rte_dev_event_callback_unregister(NULL, 3515cc1bf307SJeff Guo dev_event_callback, NULL); 35162049c511SJeff Guo if (ret < 0) { 3517fb73e096SJeff Guo RTE_LOG(ERR, EAL, 35182049c511SJeff Guo "fail to unregister device event callback.\n"); 35192049c511SJeff Guo return; 35202049c511SJeff Guo } 35212049c511SJeff Guo 35222049c511SJeff Guo ret = rte_dev_hotplug_handle_disable(); 35232049c511SJeff Guo if (ret) { 35242049c511SJeff Guo RTE_LOG(ERR, EAL, 35252049c511SJeff Guo "fail to disable hotplug handling.\n"); 35262049c511SJeff Guo return; 35272049c511SJeff Guo } 3528fb73e096SJeff Guo } 352926cbb419SViacheslav Ovsiienko for (i = 0 ; i < RTE_DIM(mempools) ; i++) { 3530401b744dSShahaf Shuler if (mempools[i]) 3531a550baf2SMin Hu (Connor) mempool_free_mp(mempools[i]); 3532401b744dSShahaf Shuler } 353363b72657SIvan Ilchenko free(xstats_display); 3534fb73e096SJeff Guo 3535d3a274ceSZhihong Wang printf("\nBye...\n"); 3536af75078fSIntel } 3537af75078fSIntel 3538af75078fSIntel typedef void (*cmd_func_t)(void); 3539af75078fSIntel struct pmd_test_command { 3540af75078fSIntel const char *cmd_name; 3541af75078fSIntel cmd_func_t cmd_func; 3542af75078fSIntel }; 3543af75078fSIntel 3544ce8d5614SIntel /* Check the link status of all ports in up to 9s, and print them finally */ 3545af75078fSIntel static void 3546edab33b1STetsuya Mukawa check_all_ports_link_status(uint32_t port_mask) 3547af75078fSIntel { 3548ce8d5614SIntel #define CHECK_INTERVAL 100 /* 100ms */ 3549ce8d5614SIntel #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */ 3550f8244c63SZhiyong Yang portid_t portid; 3551f8244c63SZhiyong Yang uint8_t count, all_ports_up, print_flag = 0; 3552ce8d5614SIntel struct rte_eth_link link; 3553e661a08bSIgor Romanov int ret; 3554ba5509a6SIvan Dyukov char link_status[RTE_ETH_LINK_MAX_STR_LEN]; 3555ce8d5614SIntel 3556ce8d5614SIntel printf("Checking link statuses...\n"); 3557ce8d5614SIntel fflush(stdout); 3558ce8d5614SIntel for (count = 0; count <= MAX_CHECK_TIME; count++) { 3559ce8d5614SIntel all_ports_up = 1; 35607d89b261SGaetan Rivet RTE_ETH_FOREACH_DEV(portid) { 3561ce8d5614SIntel if ((port_mask & (1 << portid)) == 0) 3562ce8d5614SIntel continue; 3563ce8d5614SIntel memset(&link, 0, sizeof(link)); 3564e661a08bSIgor Romanov ret = rte_eth_link_get_nowait(portid, &link); 3565e661a08bSIgor Romanov if (ret < 0) { 3566e661a08bSIgor Romanov all_ports_up = 0; 3567e661a08bSIgor Romanov if (print_flag == 1) 356861a3b0e5SAndrew Rybchenko fprintf(stderr, 356961a3b0e5SAndrew Rybchenko "Port %u link get failed: %s\n", 3570e661a08bSIgor Romanov portid, rte_strerror(-ret)); 3571e661a08bSIgor Romanov continue; 3572e661a08bSIgor Romanov } 3573ce8d5614SIntel /* print link status if flag set */ 3574ce8d5614SIntel if (print_flag == 1) { 3575ba5509a6SIvan Dyukov rte_eth_link_to_str(link_status, 3576ba5509a6SIvan Dyukov sizeof(link_status), &link); 3577ba5509a6SIvan Dyukov printf("Port %d %s\n", portid, link_status); 3578ce8d5614SIntel continue; 3579ce8d5614SIntel } 3580ce8d5614SIntel /* clear all_ports_up flag if any link down */ 3581295968d1SFerruh Yigit if (link.link_status == RTE_ETH_LINK_DOWN) { 3582ce8d5614SIntel all_ports_up = 0; 3583ce8d5614SIntel break; 3584ce8d5614SIntel } 3585ce8d5614SIntel } 3586ce8d5614SIntel /* after finally printing all link status, get out */ 3587ce8d5614SIntel if (print_flag == 1) 3588ce8d5614SIntel break; 3589ce8d5614SIntel 3590ce8d5614SIntel if (all_ports_up == 0) { 3591ce8d5614SIntel fflush(stdout); 3592ce8d5614SIntel rte_delay_ms(CHECK_INTERVAL); 3593ce8d5614SIntel } 3594ce8d5614SIntel 3595ce8d5614SIntel /* set the print_flag if all ports up or timeout */ 3596ce8d5614SIntel if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) { 3597ce8d5614SIntel print_flag = 1; 3598ce8d5614SIntel } 35998ea656f8SGaetan Rivet 36008ea656f8SGaetan Rivet if (lsc_interrupt) 36018ea656f8SGaetan Rivet break; 3602ce8d5614SIntel } 3603af75078fSIntel } 3604af75078fSIntel 3605284c908cSGaetan Rivet static void 3606cc1bf307SJeff Guo rmv_port_callback(void *arg) 3607284c908cSGaetan Rivet { 36083b97888aSMatan Azrad int need_to_start = 0; 36090da2a62bSMatan Azrad int org_no_link_check = no_link_check; 361028caa76aSZhiyong Yang portid_t port_id = (intptr_t)arg; 36110a0821bcSPaulis Gributs struct rte_eth_dev_info dev_info; 36120a0821bcSPaulis Gributs int ret; 3613284c908cSGaetan Rivet 3614284c908cSGaetan Rivet RTE_ETH_VALID_PORTID_OR_RET(port_id); 3615284c908cSGaetan Rivet 36163b97888aSMatan Azrad if (!test_done && port_is_forwarding(port_id)) { 36173b97888aSMatan Azrad need_to_start = 1; 36183b97888aSMatan Azrad stop_packet_forwarding(); 36193b97888aSMatan Azrad } 36200da2a62bSMatan Azrad no_link_check = 1; 3621284c908cSGaetan Rivet stop_port(port_id); 36220da2a62bSMatan Azrad no_link_check = org_no_link_check; 36230654d4a8SThomas Monjalon 36240a0821bcSPaulis Gributs ret = eth_dev_info_get_print_err(port_id, &dev_info); 36250a0821bcSPaulis Gributs if (ret != 0) 36260a0821bcSPaulis Gributs TESTPMD_LOG(ERR, 36270a0821bcSPaulis Gributs "Failed to get device info for port %d, not detaching\n", 36280a0821bcSPaulis Gributs port_id); 3629e1d38504SPaulis Gributs else { 3630e1d38504SPaulis Gributs struct rte_device *device = dev_info.device; 3631e1d38504SPaulis Gributs close_port(port_id); 3632e1d38504SPaulis Gributs detach_device(device); /* might be already removed or have more ports */ 3633e1d38504SPaulis Gributs } 36343b97888aSMatan Azrad if (need_to_start) 36353b97888aSMatan Azrad start_packet_forwarding(0); 3636284c908cSGaetan Rivet } 3637284c908cSGaetan Rivet 363876ad4a2dSGaetan Rivet /* This function is used by the interrupt thread */ 3639d6af1a13SBernard Iremonger static int 3640f8244c63SZhiyong Yang eth_event_callback(portid_t port_id, enum rte_eth_event_type type, void *param, 3641d6af1a13SBernard Iremonger void *ret_param) 364276ad4a2dSGaetan Rivet { 364376ad4a2dSGaetan Rivet RTE_SET_USED(param); 3644d6af1a13SBernard Iremonger RTE_SET_USED(ret_param); 364576ad4a2dSGaetan Rivet 364676ad4a2dSGaetan Rivet if (type >= RTE_ETH_EVENT_MAX) { 364761a3b0e5SAndrew Rybchenko fprintf(stderr, 364861a3b0e5SAndrew Rybchenko "\nPort %" PRIu16 ": %s called upon invalid event %d\n", 364976ad4a2dSGaetan Rivet port_id, __func__, type); 365076ad4a2dSGaetan Rivet fflush(stderr); 36513af72783SGaetan Rivet } else if (event_print_mask & (UINT32_C(1) << type)) { 3652f431e010SHerakliusz Lipiec printf("\nPort %" PRIu16 ": %s event\n", port_id, 365397b5d8b5SThomas Monjalon eth_event_desc[type]); 365476ad4a2dSGaetan Rivet fflush(stdout); 365576ad4a2dSGaetan Rivet } 3656284c908cSGaetan Rivet 3657284c908cSGaetan Rivet switch (type) { 36584f1ed78eSThomas Monjalon case RTE_ETH_EVENT_NEW: 36594f1ed78eSThomas Monjalon ports[port_id].need_setup = 1; 36604f1ed78eSThomas Monjalon ports[port_id].port_status = RTE_PORT_HANDLING; 36614f1ed78eSThomas Monjalon break; 3662284c908cSGaetan Rivet case RTE_ETH_EVENT_INTR_RMV: 36634f1ed78eSThomas Monjalon if (port_id_is_invalid(port_id, DISABLED_WARN)) 36644f1ed78eSThomas Monjalon break; 3665284c908cSGaetan Rivet if (rte_eal_alarm_set(100000, 3666cc1bf307SJeff Guo rmv_port_callback, (void *)(intptr_t)port_id)) 366761a3b0e5SAndrew Rybchenko fprintf(stderr, 366861a3b0e5SAndrew Rybchenko "Could not set up deferred device removal\n"); 3669284c908cSGaetan Rivet break; 367085c6571cSThomas Monjalon case RTE_ETH_EVENT_DESTROY: 367185c6571cSThomas Monjalon ports[port_id].port_status = RTE_PORT_CLOSED; 367285c6571cSThomas Monjalon printf("Port %u is closed\n", port_id); 367385c6571cSThomas Monjalon break; 3674284c908cSGaetan Rivet default: 3675284c908cSGaetan Rivet break; 3676284c908cSGaetan Rivet } 3677d6af1a13SBernard Iremonger return 0; 367876ad4a2dSGaetan Rivet } 367976ad4a2dSGaetan Rivet 368097b5d8b5SThomas Monjalon static int 368197b5d8b5SThomas Monjalon register_eth_event_callback(void) 368297b5d8b5SThomas Monjalon { 368397b5d8b5SThomas Monjalon int ret; 368497b5d8b5SThomas Monjalon enum rte_eth_event_type event; 368597b5d8b5SThomas Monjalon 368697b5d8b5SThomas Monjalon for (event = RTE_ETH_EVENT_UNKNOWN; 368797b5d8b5SThomas Monjalon event < RTE_ETH_EVENT_MAX; event++) { 368897b5d8b5SThomas Monjalon ret = rte_eth_dev_callback_register(RTE_ETH_ALL, 368997b5d8b5SThomas Monjalon event, 369097b5d8b5SThomas Monjalon eth_event_callback, 369197b5d8b5SThomas Monjalon NULL); 369297b5d8b5SThomas Monjalon if (ret != 0) { 369397b5d8b5SThomas Monjalon TESTPMD_LOG(ERR, "Failed to register callback for " 369497b5d8b5SThomas Monjalon "%s event\n", eth_event_desc[event]); 369597b5d8b5SThomas Monjalon return -1; 369697b5d8b5SThomas Monjalon } 369797b5d8b5SThomas Monjalon } 369897b5d8b5SThomas Monjalon 369997b5d8b5SThomas Monjalon return 0; 370097b5d8b5SThomas Monjalon } 370197b5d8b5SThomas Monjalon 3702fb73e096SJeff Guo /* This function is used by the interrupt thread */ 3703fb73e096SJeff Guo static void 3704cc1bf307SJeff Guo dev_event_callback(const char *device_name, enum rte_dev_event_type type, 3705fb73e096SJeff Guo __rte_unused void *arg) 3706fb73e096SJeff Guo { 37072049c511SJeff Guo uint16_t port_id; 37082049c511SJeff Guo int ret; 37092049c511SJeff Guo 3710fb73e096SJeff Guo if (type >= RTE_DEV_EVENT_MAX) { 3711fb73e096SJeff Guo fprintf(stderr, "%s called upon invalid event %d\n", 3712fb73e096SJeff Guo __func__, type); 3713fb73e096SJeff Guo fflush(stderr); 3714fb73e096SJeff Guo } 3715fb73e096SJeff Guo 3716fb73e096SJeff Guo switch (type) { 3717fb73e096SJeff Guo case RTE_DEV_EVENT_REMOVE: 3718cc1bf307SJeff Guo RTE_LOG(DEBUG, EAL, "The device: %s has been removed!\n", 3719fb73e096SJeff Guo device_name); 37202049c511SJeff Guo ret = rte_eth_dev_get_port_by_name(device_name, &port_id); 37212049c511SJeff Guo if (ret) { 37222049c511SJeff Guo RTE_LOG(ERR, EAL, "can not get port by device %s!\n", 37232049c511SJeff Guo device_name); 37242049c511SJeff Guo return; 37252049c511SJeff Guo } 3726cc1bf307SJeff Guo /* 3727cc1bf307SJeff Guo * Because the user's callback is invoked in eal interrupt 3728cc1bf307SJeff Guo * callback, the interrupt callback need to be finished before 3729cc1bf307SJeff Guo * it can be unregistered when detaching device. So finish 3730cc1bf307SJeff Guo * callback soon and use a deferred removal to detach device 3731cc1bf307SJeff Guo * is need. It is a workaround, once the device detaching be 3732cc1bf307SJeff Guo * moved into the eal in the future, the deferred removal could 3733cc1bf307SJeff Guo * be deleted. 3734cc1bf307SJeff Guo */ 3735cc1bf307SJeff Guo if (rte_eal_alarm_set(100000, 3736cc1bf307SJeff Guo rmv_port_callback, (void *)(intptr_t)port_id)) 3737cc1bf307SJeff Guo RTE_LOG(ERR, EAL, 3738cc1bf307SJeff Guo "Could not set up deferred device removal\n"); 3739fb73e096SJeff Guo break; 3740fb73e096SJeff Guo case RTE_DEV_EVENT_ADD: 3741fb73e096SJeff Guo RTE_LOG(ERR, EAL, "The device: %s has been added!\n", 3742fb73e096SJeff Guo device_name); 3743fb73e096SJeff Guo /* TODO: After finish kernel driver binding, 3744fb73e096SJeff Guo * begin to attach port. 3745fb73e096SJeff Guo */ 3746fb73e096SJeff Guo break; 3747fb73e096SJeff Guo default: 3748fb73e096SJeff Guo break; 3749fb73e096SJeff Guo } 3750fb73e096SJeff Guo } 3751fb73e096SJeff Guo 3752f2c5125aSPablo de Lara static void 3753f4d178c1SXueming Li rxtx_port_config(portid_t pid) 3754f2c5125aSPablo de Lara { 3755d44f8a48SQi Zhang uint16_t qid; 37565e91aeefSWei Zhao uint64_t offloads; 3757f4d178c1SXueming Li struct rte_port *port = &ports[pid]; 3758f2c5125aSPablo de Lara 3759d44f8a48SQi Zhang for (qid = 0; qid < nb_rxq; qid++) { 37603c4426dbSDmitry Kozlyuk offloads = port->rxq[qid].conf.offloads; 37613c4426dbSDmitry Kozlyuk port->rxq[qid].conf = port->dev_info.default_rxconf; 3762f4d178c1SXueming Li 3763f4d178c1SXueming Li if (rxq_share > 0 && 3764f4d178c1SXueming Li (port->dev_info.dev_capa & RTE_ETH_DEV_CAPA_RXQ_SHARE)) { 3765f4d178c1SXueming Li /* Non-zero share group to enable RxQ share. */ 37663c4426dbSDmitry Kozlyuk port->rxq[qid].conf.share_group = pid / rxq_share + 1; 37673c4426dbSDmitry Kozlyuk port->rxq[qid].conf.share_qid = qid; /* Equal mapping. */ 3768f4d178c1SXueming Li } 3769f4d178c1SXueming Li 3770575e0fd1SWei Zhao if (offloads != 0) 37713c4426dbSDmitry Kozlyuk port->rxq[qid].conf.offloads = offloads; 3772d44f8a48SQi Zhang 3773d44f8a48SQi Zhang /* Check if any Rx parameters have been passed */ 3774f2c5125aSPablo de Lara if (rx_pthresh != RTE_PMD_PARAM_UNSET) 37753c4426dbSDmitry Kozlyuk port->rxq[qid].conf.rx_thresh.pthresh = rx_pthresh; 3776f2c5125aSPablo de Lara 3777f2c5125aSPablo de Lara if (rx_hthresh != RTE_PMD_PARAM_UNSET) 37783c4426dbSDmitry Kozlyuk port->rxq[qid].conf.rx_thresh.hthresh = rx_hthresh; 3779f2c5125aSPablo de Lara 3780f2c5125aSPablo de Lara if (rx_wthresh != RTE_PMD_PARAM_UNSET) 37813c4426dbSDmitry Kozlyuk port->rxq[qid].conf.rx_thresh.wthresh = rx_wthresh; 3782f2c5125aSPablo de Lara 3783f2c5125aSPablo de Lara if (rx_free_thresh != RTE_PMD_PARAM_UNSET) 37843c4426dbSDmitry Kozlyuk port->rxq[qid].conf.rx_free_thresh = rx_free_thresh; 3785f2c5125aSPablo de Lara 3786f2c5125aSPablo de Lara if (rx_drop_en != RTE_PMD_PARAM_UNSET) 37873c4426dbSDmitry Kozlyuk port->rxq[qid].conf.rx_drop_en = rx_drop_en; 3788f2c5125aSPablo de Lara 3789d44f8a48SQi Zhang port->nb_rx_desc[qid] = nb_rxd; 3790d44f8a48SQi Zhang } 3791d44f8a48SQi Zhang 3792d44f8a48SQi Zhang for (qid = 0; qid < nb_txq; qid++) { 37933c4426dbSDmitry Kozlyuk offloads = port->txq[qid].conf.offloads; 37943c4426dbSDmitry Kozlyuk port->txq[qid].conf = port->dev_info.default_txconf; 3795575e0fd1SWei Zhao if (offloads != 0) 37963c4426dbSDmitry Kozlyuk port->txq[qid].conf.offloads = offloads; 3797d44f8a48SQi Zhang 3798d44f8a48SQi Zhang /* Check if any Tx parameters have been passed */ 3799f2c5125aSPablo de Lara if (tx_pthresh != RTE_PMD_PARAM_UNSET) 38003c4426dbSDmitry Kozlyuk port->txq[qid].conf.tx_thresh.pthresh = tx_pthresh; 3801f2c5125aSPablo de Lara 3802f2c5125aSPablo de Lara if (tx_hthresh != RTE_PMD_PARAM_UNSET) 38033c4426dbSDmitry Kozlyuk port->txq[qid].conf.tx_thresh.hthresh = tx_hthresh; 3804f2c5125aSPablo de Lara 3805f2c5125aSPablo de Lara if (tx_wthresh != RTE_PMD_PARAM_UNSET) 38063c4426dbSDmitry Kozlyuk port->txq[qid].conf.tx_thresh.wthresh = tx_wthresh; 3807f2c5125aSPablo de Lara 3808f2c5125aSPablo de Lara if (tx_rs_thresh != RTE_PMD_PARAM_UNSET) 38093c4426dbSDmitry Kozlyuk port->txq[qid].conf.tx_rs_thresh = tx_rs_thresh; 3810f2c5125aSPablo de Lara 3811f2c5125aSPablo de Lara if (tx_free_thresh != RTE_PMD_PARAM_UNSET) 38123c4426dbSDmitry Kozlyuk port->txq[qid].conf.tx_free_thresh = tx_free_thresh; 3813d44f8a48SQi Zhang 3814d44f8a48SQi Zhang port->nb_tx_desc[qid] = nb_txd; 3815d44f8a48SQi Zhang } 3816f2c5125aSPablo de Lara } 3817f2c5125aSPablo de Lara 38180c4abd36SSteve Yang /* 3819b563c142SFerruh Yigit * Helper function to set MTU from frame size 38200c4abd36SSteve Yang * 38210c4abd36SSteve Yang * port->dev_info should be set before calling this function. 38220c4abd36SSteve Yang * 38230c4abd36SSteve Yang * return 0 on success, negative on error 38240c4abd36SSteve Yang */ 38250c4abd36SSteve Yang int 3826b563c142SFerruh Yigit update_mtu_from_frame_size(portid_t portid, uint32_t max_rx_pktlen) 38270c4abd36SSteve Yang { 38280c4abd36SSteve Yang struct rte_port *port = &ports[portid]; 38290c4abd36SSteve Yang uint32_t eth_overhead; 38301bb4a528SFerruh Yigit uint16_t mtu, new_mtu; 38310c4abd36SSteve Yang 38321bb4a528SFerruh Yigit eth_overhead = get_eth_overhead(&port->dev_info); 38331bb4a528SFerruh Yigit 38341bb4a528SFerruh Yigit if (rte_eth_dev_get_mtu(portid, &mtu) != 0) { 38351bb4a528SFerruh Yigit printf("Failed to get MTU for port %u\n", portid); 38361bb4a528SFerruh Yigit return -1; 38371bb4a528SFerruh Yigit } 38381bb4a528SFerruh Yigit 38391bb4a528SFerruh Yigit new_mtu = max_rx_pktlen - eth_overhead; 38400c4abd36SSteve Yang 38411bb4a528SFerruh Yigit if (mtu == new_mtu) 38421bb4a528SFerruh Yigit return 0; 38431bb4a528SFerruh Yigit 38441bb4a528SFerruh Yigit if (eth_dev_set_mtu_mp(portid, new_mtu) != 0) { 384561a3b0e5SAndrew Rybchenko fprintf(stderr, 384661a3b0e5SAndrew Rybchenko "Failed to set MTU to %u for port %u\n", 38471bb4a528SFerruh Yigit new_mtu, portid); 38481bb4a528SFerruh Yigit return -1; 38490c4abd36SSteve Yang } 38500c4abd36SSteve Yang 38511bb4a528SFerruh Yigit port->dev_conf.rxmode.mtu = new_mtu; 38521bb4a528SFerruh Yigit 38530c4abd36SSteve Yang return 0; 38540c4abd36SSteve Yang } 38550c4abd36SSteve Yang 3856013af9b6SIntel void 3857013af9b6SIntel init_port_config(void) 3858013af9b6SIntel { 3859013af9b6SIntel portid_t pid; 3860013af9b6SIntel struct rte_port *port; 3861655eae01SJie Wang int ret, i; 3862013af9b6SIntel 38637d89b261SGaetan Rivet RTE_ETH_FOREACH_DEV(pid) { 3864013af9b6SIntel port = &ports[pid]; 3865013af9b6SIntel port->dev_conf.fdir_conf = fdir_conf; 38666f51deb9SIvan Ilchenko 38676f51deb9SIvan Ilchenko ret = eth_dev_info_get_print_err(pid, &port->dev_info); 38686f51deb9SIvan Ilchenko if (ret != 0) 38696f51deb9SIvan Ilchenko return; 38706f51deb9SIvan Ilchenko 38713ce690d3SBruce Richardson if (nb_rxq > 1) { 3872013af9b6SIntel port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL; 387390892962SQi Zhang port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 3874422515b9SAdrien Mazarguil rss_hf & port->dev_info.flow_type_rss_offloads; 3875af75078fSIntel } else { 3876013af9b6SIntel port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL; 3877013af9b6SIntel port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0; 3878af75078fSIntel } 38793ce690d3SBruce Richardson 38805f592039SJingjing Wu if (port->dcb_flag == 0) { 3881655eae01SJie Wang if (port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0) { 3882f9295aa2SXiaoyu Min port->dev_conf.rxmode.mq_mode = 3883f9295aa2SXiaoyu Min (enum rte_eth_rx_mq_mode) 3884295968d1SFerruh Yigit (rx_mq_mode & RTE_ETH_MQ_RX_RSS); 3885655eae01SJie Wang } else { 3886295968d1SFerruh Yigit port->dev_conf.rxmode.mq_mode = RTE_ETH_MQ_RX_NONE; 3887655eae01SJie Wang port->dev_conf.rxmode.offloads &= 3888295968d1SFerruh Yigit ~RTE_ETH_RX_OFFLOAD_RSS_HASH; 3889655eae01SJie Wang 3890655eae01SJie Wang for (i = 0; 3891655eae01SJie Wang i < port->dev_info.nb_rx_queues; 3892655eae01SJie Wang i++) 38933c4426dbSDmitry Kozlyuk port->rxq[i].conf.offloads &= 3894295968d1SFerruh Yigit ~RTE_ETH_RX_OFFLOAD_RSS_HASH; 3895655eae01SJie Wang } 38963ce690d3SBruce Richardson } 38973ce690d3SBruce Richardson 3898f4d178c1SXueming Li rxtx_port_config(pid); 3899013af9b6SIntel 3900a5279d25SIgor Romanov ret = eth_macaddr_get_print_err(pid, &port->eth_addr); 3901a5279d25SIgor Romanov if (ret != 0) 3902a5279d25SIgor Romanov return; 3903013af9b6SIntel 3904a8d0d473SBruce Richardson #if defined RTE_NET_IXGBE && defined RTE_LIBRTE_IXGBE_BYPASS 3905e261265eSRadu Nicolau rte_pmd_ixgbe_bypass_init(pid); 39067b7e5ba7SIntel #endif 39078ea656f8SGaetan Rivet 39080a0821bcSPaulis Gributs if (lsc_interrupt && (*port->dev_info.dev_flags & RTE_ETH_DEV_INTR_LSC)) 39098ea656f8SGaetan Rivet port->dev_conf.intr_conf.lsc = 1; 39100a0821bcSPaulis Gributs if (rmv_interrupt && (*port->dev_info.dev_flags & RTE_ETH_DEV_INTR_RMV)) 3911284c908cSGaetan Rivet port->dev_conf.intr_conf.rmv = 1; 3912013af9b6SIntel } 3913013af9b6SIntel } 3914013af9b6SIntel 391541b05095SBernard Iremonger void set_port_slave_flag(portid_t slave_pid) 391641b05095SBernard Iremonger { 391741b05095SBernard Iremonger struct rte_port *port; 391841b05095SBernard Iremonger 391941b05095SBernard Iremonger port = &ports[slave_pid]; 392041b05095SBernard Iremonger port->slave_flag = 1; 392141b05095SBernard Iremonger } 392241b05095SBernard Iremonger 392341b05095SBernard Iremonger void clear_port_slave_flag(portid_t slave_pid) 392441b05095SBernard Iremonger { 392541b05095SBernard Iremonger struct rte_port *port; 392641b05095SBernard Iremonger 392741b05095SBernard Iremonger port = &ports[slave_pid]; 392841b05095SBernard Iremonger port->slave_flag = 0; 392941b05095SBernard Iremonger } 393041b05095SBernard Iremonger 39310e545d30SBernard Iremonger uint8_t port_is_bonding_slave(portid_t slave_pid) 39320e545d30SBernard Iremonger { 39330e545d30SBernard Iremonger struct rte_port *port; 39340a0821bcSPaulis Gributs struct rte_eth_dev_info dev_info; 39350a0821bcSPaulis Gributs int ret; 39360e545d30SBernard Iremonger 39370e545d30SBernard Iremonger port = &ports[slave_pid]; 39380a0821bcSPaulis Gributs ret = eth_dev_info_get_print_err(slave_pid, &dev_info); 39390a0821bcSPaulis Gributs if (ret != 0) { 39400a0821bcSPaulis Gributs TESTPMD_LOG(ERR, 39410a0821bcSPaulis Gributs "Failed to get device info for port id %d," 39420a0821bcSPaulis Gributs "cannot determine if the port is a bonded slave", 39430a0821bcSPaulis Gributs slave_pid); 39440a0821bcSPaulis Gributs return 0; 39450a0821bcSPaulis Gributs } 39460a0821bcSPaulis Gributs if ((*dev_info.dev_flags & RTE_ETH_DEV_BONDED_SLAVE) || (port->slave_flag == 1)) 3947b8b8b344SMatan Azrad return 1; 3948b8b8b344SMatan Azrad return 0; 39490e545d30SBernard Iremonger } 39500e545d30SBernard Iremonger 3951013af9b6SIntel const uint16_t vlan_tags[] = { 3952013af9b6SIntel 0, 1, 2, 3, 4, 5, 6, 7, 3953013af9b6SIntel 8, 9, 10, 11, 12, 13, 14, 15, 3954013af9b6SIntel 16, 17, 18, 19, 20, 21, 22, 23, 3955013af9b6SIntel 24, 25, 26, 27, 28, 29, 30, 31 3956013af9b6SIntel }; 3957013af9b6SIntel 3958013af9b6SIntel static int 3959ac7c491cSKonstantin Ananyev get_eth_dcb_conf(portid_t pid, struct rte_eth_conf *eth_conf, 39601a572499SJingjing Wu enum dcb_mode_enable dcb_mode, 39611a572499SJingjing Wu enum rte_eth_nb_tcs num_tcs, 39621a572499SJingjing Wu uint8_t pfc_en) 3963013af9b6SIntel { 3964013af9b6SIntel uint8_t i; 3965ac7c491cSKonstantin Ananyev int32_t rc; 3966ac7c491cSKonstantin Ananyev struct rte_eth_rss_conf rss_conf; 3967af75078fSIntel 3968af75078fSIntel /* 3969013af9b6SIntel * Builds up the correct configuration for dcb+vt based on the vlan tags array 3970013af9b6SIntel * given above, and the number of traffic classes available for use. 3971af75078fSIntel */ 39721a572499SJingjing Wu if (dcb_mode == DCB_VT_ENABLED) { 39731a572499SJingjing Wu struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf = 39741a572499SJingjing Wu ð_conf->rx_adv_conf.vmdq_dcb_conf; 39751a572499SJingjing Wu struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf = 39761a572499SJingjing Wu ð_conf->tx_adv_conf.vmdq_dcb_tx_conf; 3977013af9b6SIntel 3978547d946cSNirmoy Das /* VMDQ+DCB RX and TX configurations */ 39791a572499SJingjing Wu vmdq_rx_conf->enable_default_pool = 0; 39801a572499SJingjing Wu vmdq_rx_conf->default_pool = 0; 39811a572499SJingjing Wu vmdq_rx_conf->nb_queue_pools = 3982295968d1SFerruh Yigit (num_tcs == RTE_ETH_4_TCS ? RTE_ETH_32_POOLS : RTE_ETH_16_POOLS); 39831a572499SJingjing Wu vmdq_tx_conf->nb_queue_pools = 3984295968d1SFerruh Yigit (num_tcs == RTE_ETH_4_TCS ? RTE_ETH_32_POOLS : RTE_ETH_16_POOLS); 3985013af9b6SIntel 39861a572499SJingjing Wu vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools; 39871a572499SJingjing Wu for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) { 39881a572499SJingjing Wu vmdq_rx_conf->pool_map[i].vlan_id = vlan_tags[i]; 39891a572499SJingjing Wu vmdq_rx_conf->pool_map[i].pools = 39901a572499SJingjing Wu 1 << (i % vmdq_rx_conf->nb_queue_pools); 3991af75078fSIntel } 3992295968d1SFerruh Yigit for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++) { 3993f59908feSWei Dai vmdq_rx_conf->dcb_tc[i] = i % num_tcs; 3994f59908feSWei Dai vmdq_tx_conf->dcb_tc[i] = i % num_tcs; 3995013af9b6SIntel } 3996013af9b6SIntel 3997013af9b6SIntel /* set DCB mode of RX and TX of multiple queues */ 3998f9295aa2SXiaoyu Min eth_conf->rxmode.mq_mode = 3999f9295aa2SXiaoyu Min (enum rte_eth_rx_mq_mode) 4000295968d1SFerruh Yigit (rx_mq_mode & RTE_ETH_MQ_RX_VMDQ_DCB); 4001295968d1SFerruh Yigit eth_conf->txmode.mq_mode = RTE_ETH_MQ_TX_VMDQ_DCB; 40021a572499SJingjing Wu } else { 40031a572499SJingjing Wu struct rte_eth_dcb_rx_conf *rx_conf = 40041a572499SJingjing Wu ð_conf->rx_adv_conf.dcb_rx_conf; 40051a572499SJingjing Wu struct rte_eth_dcb_tx_conf *tx_conf = 40061a572499SJingjing Wu ð_conf->tx_adv_conf.dcb_tx_conf; 4007013af9b6SIntel 40085139bc12STing Xu memset(&rss_conf, 0, sizeof(struct rte_eth_rss_conf)); 40095139bc12STing Xu 4010ac7c491cSKonstantin Ananyev rc = rte_eth_dev_rss_hash_conf_get(pid, &rss_conf); 4011ac7c491cSKonstantin Ananyev if (rc != 0) 4012ac7c491cSKonstantin Ananyev return rc; 4013ac7c491cSKonstantin Ananyev 40141a572499SJingjing Wu rx_conf->nb_tcs = num_tcs; 40151a572499SJingjing Wu tx_conf->nb_tcs = num_tcs; 40161a572499SJingjing Wu 4017295968d1SFerruh Yigit for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++) { 4018bcd0e432SJingjing Wu rx_conf->dcb_tc[i] = i % num_tcs; 4019bcd0e432SJingjing Wu tx_conf->dcb_tc[i] = i % num_tcs; 4020013af9b6SIntel } 4021ac7c491cSKonstantin Ananyev 4022f9295aa2SXiaoyu Min eth_conf->rxmode.mq_mode = 4023f9295aa2SXiaoyu Min (enum rte_eth_rx_mq_mode) 4024295968d1SFerruh Yigit (rx_mq_mode & RTE_ETH_MQ_RX_DCB_RSS); 4025ac7c491cSKonstantin Ananyev eth_conf->rx_adv_conf.rss_conf = rss_conf; 4026295968d1SFerruh Yigit eth_conf->txmode.mq_mode = RTE_ETH_MQ_TX_DCB; 40271a572499SJingjing Wu } 40281a572499SJingjing Wu 40291a572499SJingjing Wu if (pfc_en) 40301a572499SJingjing Wu eth_conf->dcb_capability_en = 4031295968d1SFerruh Yigit RTE_ETH_DCB_PG_SUPPORT | RTE_ETH_DCB_PFC_SUPPORT; 4032013af9b6SIntel else 4033295968d1SFerruh Yigit eth_conf->dcb_capability_en = RTE_ETH_DCB_PG_SUPPORT; 4034013af9b6SIntel 4035013af9b6SIntel return 0; 4036013af9b6SIntel } 4037013af9b6SIntel 4038013af9b6SIntel int 40391a572499SJingjing Wu init_port_dcb_config(portid_t pid, 40401a572499SJingjing Wu enum dcb_mode_enable dcb_mode, 40411a572499SJingjing Wu enum rte_eth_nb_tcs num_tcs, 40421a572499SJingjing Wu uint8_t pfc_en) 4043013af9b6SIntel { 4044013af9b6SIntel struct rte_eth_conf port_conf; 4045013af9b6SIntel struct rte_port *rte_port; 4046013af9b6SIntel int retval; 4047013af9b6SIntel uint16_t i; 4048013af9b6SIntel 4049a550baf2SMin Hu (Connor) if (num_procs > 1) { 4050a550baf2SMin Hu (Connor) printf("The multi-process feature doesn't support dcb.\n"); 4051a550baf2SMin Hu (Connor) return -ENOTSUP; 4052a550baf2SMin Hu (Connor) } 40532a977b89SWenzhuo Lu rte_port = &ports[pid]; 4054013af9b6SIntel 4055c1ba6c32SHuisong Li /* retain the original device configuration. */ 4056c1ba6c32SHuisong Li memcpy(&port_conf, &rte_port->dev_conf, sizeof(struct rte_eth_conf)); 4057d5354e89SYanglong Wu 4058013af9b6SIntel /*set configuration of DCB in vt mode and DCB in non-vt mode*/ 4059ac7c491cSKonstantin Ananyev retval = get_eth_dcb_conf(pid, &port_conf, dcb_mode, num_tcs, pfc_en); 4060013af9b6SIntel if (retval < 0) 4061013af9b6SIntel return retval; 4062295968d1SFerruh Yigit port_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_VLAN_FILTER; 4063cbe70fdeSJie Wang /* remove RSS HASH offload for DCB in vt mode */ 4064cbe70fdeSJie Wang if (port_conf.rxmode.mq_mode == RTE_ETH_MQ_RX_VMDQ_DCB) { 4065cbe70fdeSJie Wang port_conf.rxmode.offloads &= ~RTE_ETH_RX_OFFLOAD_RSS_HASH; 4066cbe70fdeSJie Wang for (i = 0; i < nb_rxq; i++) 40673c4426dbSDmitry Kozlyuk rte_port->rxq[i].conf.offloads &= 4068cbe70fdeSJie Wang ~RTE_ETH_RX_OFFLOAD_RSS_HASH; 4069cbe70fdeSJie Wang } 4070013af9b6SIntel 40712f203d44SQi Zhang /* re-configure the device . */ 40722b0e0ebaSChenbo Xia retval = rte_eth_dev_configure(pid, nb_rxq, nb_rxq, &port_conf); 40732b0e0ebaSChenbo Xia if (retval < 0) 40742b0e0ebaSChenbo Xia return retval; 40756f51deb9SIvan Ilchenko 40766f51deb9SIvan Ilchenko retval = eth_dev_info_get_print_err(pid, &rte_port->dev_info); 40776f51deb9SIvan Ilchenko if (retval != 0) 40786f51deb9SIvan Ilchenko return retval; 40792a977b89SWenzhuo Lu 40802a977b89SWenzhuo Lu /* If dev_info.vmdq_pool_base is greater than 0, 40812a977b89SWenzhuo Lu * the queue id of vmdq pools is started after pf queues. 40822a977b89SWenzhuo Lu */ 40832a977b89SWenzhuo Lu if (dcb_mode == DCB_VT_ENABLED && 40842a977b89SWenzhuo Lu rte_port->dev_info.vmdq_pool_base > 0) { 408561a3b0e5SAndrew Rybchenko fprintf(stderr, 408661a3b0e5SAndrew Rybchenko "VMDQ_DCB multi-queue mode is nonsensical for port %d.\n", 408761a3b0e5SAndrew Rybchenko pid); 40882a977b89SWenzhuo Lu return -1; 40892a977b89SWenzhuo Lu } 40902a977b89SWenzhuo Lu 40912a977b89SWenzhuo Lu /* Assume the ports in testpmd have the same dcb capability 40922a977b89SWenzhuo Lu * and has the same number of rxq and txq in dcb mode 40932a977b89SWenzhuo Lu */ 40942a977b89SWenzhuo Lu if (dcb_mode == DCB_VT_ENABLED) { 409586ef65eeSBernard Iremonger if (rte_port->dev_info.max_vfs > 0) { 409686ef65eeSBernard Iremonger nb_rxq = rte_port->dev_info.nb_rx_queues; 409786ef65eeSBernard Iremonger nb_txq = rte_port->dev_info.nb_tx_queues; 409886ef65eeSBernard Iremonger } else { 40992a977b89SWenzhuo Lu nb_rxq = rte_port->dev_info.max_rx_queues; 41002a977b89SWenzhuo Lu nb_txq = rte_port->dev_info.max_tx_queues; 410186ef65eeSBernard Iremonger } 41022a977b89SWenzhuo Lu } else { 41032a977b89SWenzhuo Lu /*if vt is disabled, use all pf queues */ 41042a977b89SWenzhuo Lu if (rte_port->dev_info.vmdq_pool_base == 0) { 41052a977b89SWenzhuo Lu nb_rxq = rte_port->dev_info.max_rx_queues; 41062a977b89SWenzhuo Lu nb_txq = rte_port->dev_info.max_tx_queues; 41072a977b89SWenzhuo Lu } else { 41082a977b89SWenzhuo Lu nb_rxq = (queueid_t)num_tcs; 41092a977b89SWenzhuo Lu nb_txq = (queueid_t)num_tcs; 41102a977b89SWenzhuo Lu 41112a977b89SWenzhuo Lu } 41122a977b89SWenzhuo Lu } 41132a977b89SWenzhuo Lu rx_free_thresh = 64; 41142a977b89SWenzhuo Lu 4115013af9b6SIntel memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf)); 4116013af9b6SIntel 4117f4d178c1SXueming Li rxtx_port_config(pid); 4118013af9b6SIntel /* VLAN filter */ 4119295968d1SFerruh Yigit rte_port->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_VLAN_FILTER; 41201a572499SJingjing Wu for (i = 0; i < RTE_DIM(vlan_tags); i++) 4121013af9b6SIntel rx_vft_set(pid, vlan_tags[i], 1); 4122013af9b6SIntel 4123a5279d25SIgor Romanov retval = eth_macaddr_get_print_err(pid, &rte_port->eth_addr); 4124a5279d25SIgor Romanov if (retval != 0) 4125a5279d25SIgor Romanov return retval; 4126a5279d25SIgor Romanov 41277741e4cfSIntel rte_port->dcb_flag = 1; 41287741e4cfSIntel 4129a690a070SHuisong Li /* Enter DCB configuration status */ 4130a690a070SHuisong Li dcb_config = 1; 4131a690a070SHuisong Li 4132013af9b6SIntel return 0; 4133af75078fSIntel } 4134af75078fSIntel 4135ffc468ffSTetsuya Mukawa static void 4136ffc468ffSTetsuya Mukawa init_port(void) 4137ffc468ffSTetsuya Mukawa { 41381b9f2746SGregory Etelson int i; 41391b9f2746SGregory Etelson 4140ffc468ffSTetsuya Mukawa /* Configuration of Ethernet ports. */ 4141ffc468ffSTetsuya Mukawa ports = rte_zmalloc("testpmd: ports", 4142ffc468ffSTetsuya Mukawa sizeof(struct rte_port) * RTE_MAX_ETHPORTS, 4143ffc468ffSTetsuya Mukawa RTE_CACHE_LINE_SIZE); 4144ffc468ffSTetsuya Mukawa if (ports == NULL) { 4145ffc468ffSTetsuya Mukawa rte_exit(EXIT_FAILURE, 4146ffc468ffSTetsuya Mukawa "rte_zmalloc(%d struct rte_port) failed\n", 4147ffc468ffSTetsuya Mukawa RTE_MAX_ETHPORTS); 4148ffc468ffSTetsuya Mukawa } 41491b9f2746SGregory Etelson for (i = 0; i < RTE_MAX_ETHPORTS; i++) 415063b72657SIvan Ilchenko ports[i].xstats_info.allocated = false; 415163b72657SIvan Ilchenko for (i = 0; i < RTE_MAX_ETHPORTS; i++) 41521b9f2746SGregory Etelson LIST_INIT(&ports[i].flow_tunnel_list); 415329841336SPhil Yang /* Initialize ports NUMA structures */ 415429841336SPhil Yang memset(port_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS); 415529841336SPhil Yang memset(rxring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS); 415629841336SPhil Yang memset(txring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS); 4157ffc468ffSTetsuya Mukawa } 4158ffc468ffSTetsuya Mukawa 4159d3a274ceSZhihong Wang static void 4160d3a274ceSZhihong Wang force_quit(void) 4161d3a274ceSZhihong Wang { 4162d3a274ceSZhihong Wang pmd_test_exit(); 4163d3a274ceSZhihong Wang prompt_exit(); 4164d3a274ceSZhihong Wang } 4165d3a274ceSZhihong Wang 4166d3a274ceSZhihong Wang static void 4167cfea1f30SPablo de Lara print_stats(void) 4168cfea1f30SPablo de Lara { 4169cfea1f30SPablo de Lara uint8_t i; 4170cfea1f30SPablo de Lara const char clr[] = { 27, '[', '2', 'J', '\0' }; 4171cfea1f30SPablo de Lara const char top_left[] = { 27, '[', '1', ';', '1', 'H', '\0' }; 4172cfea1f30SPablo de Lara 4173cfea1f30SPablo de Lara /* Clear screen and move to top left */ 4174cfea1f30SPablo de Lara printf("%s%s", clr, top_left); 4175cfea1f30SPablo de Lara 4176cfea1f30SPablo de Lara printf("\nPort statistics ===================================="); 4177cfea1f30SPablo de Lara for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) 4178cfea1f30SPablo de Lara nic_stats_display(fwd_ports_ids[i]); 4179683d1e82SIgor Romanov 4180683d1e82SIgor Romanov fflush(stdout); 4181cfea1f30SPablo de Lara } 4182cfea1f30SPablo de Lara 4183cfea1f30SPablo de Lara static void 4184d3a274ceSZhihong Wang signal_handler(int signum) 4185d3a274ceSZhihong Wang { 4186d3a274ceSZhihong Wang if (signum == SIGINT || signum == SIGTERM) { 418761a3b0e5SAndrew Rybchenko fprintf(stderr, "\nSignal %d received, preparing to exit...\n", 4188d3a274ceSZhihong Wang signum); 4189a8d0d473SBruce Richardson #ifdef RTE_LIB_PDUMP 4190102b7329SReshma Pattan /* uninitialize packet capture framework */ 4191102b7329SReshma Pattan rte_pdump_uninit(); 4192102b7329SReshma Pattan #endif 4193a8d0d473SBruce Richardson #ifdef RTE_LIB_LATENCYSTATS 41948b36297dSAmit Gupta if (latencystats_enabled != 0) 419562d3216dSReshma Pattan rte_latencystats_uninit(); 419662d3216dSReshma Pattan #endif 4197d3a274ceSZhihong Wang force_quit(); 4198d9a191a0SPhil Yang /* Set flag to indicate the force termination. */ 4199d9a191a0SPhil Yang f_quit = 1; 4200d3a274ceSZhihong Wang /* exit with the expected status */ 4201761f7ae1SJie Zhou #ifndef RTE_EXEC_ENV_WINDOWS 4202d3a274ceSZhihong Wang signal(signum, SIG_DFL); 4203d3a274ceSZhihong Wang kill(getpid(), signum); 4204761f7ae1SJie Zhou #endif 4205d3a274ceSZhihong Wang } 4206d3a274ceSZhihong Wang } 4207d3a274ceSZhihong Wang 4208af75078fSIntel int 4209af75078fSIntel main(int argc, char** argv) 4210af75078fSIntel { 4211af75078fSIntel int diag; 4212f8244c63SZhiyong Yang portid_t port_id; 42134918a357SXiaoyun Li uint16_t count; 4214fb73e096SJeff Guo int ret; 4215af75078fSIntel 4216d3a274ceSZhihong Wang signal(SIGINT, signal_handler); 4217d3a274ceSZhihong Wang signal(SIGTERM, signal_handler); 4218d3a274ceSZhihong Wang 4219285fd101SOlivier Matz testpmd_logtype = rte_log_register("testpmd"); 4220285fd101SOlivier Matz if (testpmd_logtype < 0) 422116267ceeSStephen Hemminger rte_exit(EXIT_FAILURE, "Cannot register log type"); 4222285fd101SOlivier Matz rte_log_set_level(testpmd_logtype, RTE_LOG_DEBUG); 4223285fd101SOlivier Matz 42249201806eSStephen Hemminger diag = rte_eal_init(argc, argv); 42259201806eSStephen Hemminger if (diag < 0) 422616267ceeSStephen Hemminger rte_exit(EXIT_FAILURE, "Cannot init EAL: %s\n", 422716267ceeSStephen Hemminger rte_strerror(rte_errno)); 42289201806eSStephen Hemminger 422997b5d8b5SThomas Monjalon ret = register_eth_event_callback(); 423097b5d8b5SThomas Monjalon if (ret != 0) 423116267ceeSStephen Hemminger rte_exit(EXIT_FAILURE, "Cannot register for ethdev events"); 423297b5d8b5SThomas Monjalon 4233a8d0d473SBruce Richardson #ifdef RTE_LIB_PDUMP 42344aa0d012SAnatoly Burakov /* initialize packet capture framework */ 4235e9436f54STiwei Bie rte_pdump_init(); 42364aa0d012SAnatoly Burakov #endif 42374aa0d012SAnatoly Burakov 42384918a357SXiaoyun Li count = 0; 42394918a357SXiaoyun Li RTE_ETH_FOREACH_DEV(port_id) { 42404918a357SXiaoyun Li ports_ids[count] = port_id; 42414918a357SXiaoyun Li count++; 42424918a357SXiaoyun Li } 42434918a357SXiaoyun Li nb_ports = (portid_t) count; 42444aa0d012SAnatoly Burakov if (nb_ports == 0) 42454aa0d012SAnatoly Burakov TESTPMD_LOG(WARNING, "No probed ethernet devices\n"); 42464aa0d012SAnatoly Burakov 42474aa0d012SAnatoly Burakov /* allocate port structures, and init them */ 42484aa0d012SAnatoly Burakov init_port(); 42494aa0d012SAnatoly Burakov 42504aa0d012SAnatoly Burakov set_def_fwd_config(); 42514aa0d012SAnatoly Burakov if (nb_lcores == 0) 425216267ceeSStephen Hemminger rte_exit(EXIT_FAILURE, "No cores defined for forwarding\n" 425316267ceeSStephen Hemminger "Check the core mask argument\n"); 42544aa0d012SAnatoly Burakov 4255e505d84cSAnatoly Burakov /* Bitrate/latency stats disabled by default */ 4256a8d0d473SBruce Richardson #ifdef RTE_LIB_BITRATESTATS 4257e505d84cSAnatoly Burakov bitrate_enabled = 0; 4258e505d84cSAnatoly Burakov #endif 4259a8d0d473SBruce Richardson #ifdef RTE_LIB_LATENCYSTATS 4260e505d84cSAnatoly Burakov latencystats_enabled = 0; 4261e505d84cSAnatoly Burakov #endif 4262e505d84cSAnatoly Burakov 4263fb7b8b32SAnatoly Burakov /* on FreeBSD, mlockall() is disabled by default */ 42645fbc1d49SBruce Richardson #ifdef RTE_EXEC_ENV_FREEBSD 4265fb7b8b32SAnatoly Burakov do_mlockall = 0; 4266fb7b8b32SAnatoly Burakov #else 4267fb7b8b32SAnatoly Burakov do_mlockall = 1; 4268fb7b8b32SAnatoly Burakov #endif 4269fb7b8b32SAnatoly Burakov 4270e505d84cSAnatoly Burakov argc -= diag; 4271e505d84cSAnatoly Burakov argv += diag; 4272e505d84cSAnatoly Burakov if (argc > 1) 4273e505d84cSAnatoly Burakov launch_args_parse(argc, argv); 4274e505d84cSAnatoly Burakov 4275761f7ae1SJie Zhou #ifndef RTE_EXEC_ENV_WINDOWS 4276e505d84cSAnatoly Burakov if (do_mlockall && mlockall(MCL_CURRENT | MCL_FUTURE)) { 4277285fd101SOlivier Matz TESTPMD_LOG(NOTICE, "mlockall() failed with error \"%s\"\n", 42781c036b16SEelco Chaudron strerror(errno)); 42791c036b16SEelco Chaudron } 4280761f7ae1SJie Zhou #endif 42811c036b16SEelco Chaudron 428299cabef0SPablo de Lara if (tx_first && interactive) 428399cabef0SPablo de Lara rte_exit(EXIT_FAILURE, "--tx-first cannot be used on " 428499cabef0SPablo de Lara "interactive mode.\n"); 42858820cba4SDavid Hunt 42868820cba4SDavid Hunt if (tx_first && lsc_interrupt) { 428761a3b0e5SAndrew Rybchenko fprintf(stderr, 428861a3b0e5SAndrew Rybchenko "Warning: lsc_interrupt needs to be off when using tx_first. Disabling.\n"); 42898820cba4SDavid Hunt lsc_interrupt = 0; 42908820cba4SDavid Hunt } 42918820cba4SDavid Hunt 42925a8fb55cSReshma Pattan if (!nb_rxq && !nb_txq) 429361a3b0e5SAndrew Rybchenko fprintf(stderr, 429461a3b0e5SAndrew Rybchenko "Warning: Either rx or tx queues should be non-zero\n"); 42955a8fb55cSReshma Pattan 42965a8fb55cSReshma Pattan if (nb_rxq > 1 && nb_rxq > nb_txq) 429761a3b0e5SAndrew Rybchenko fprintf(stderr, 429861a3b0e5SAndrew Rybchenko "Warning: nb_rxq=%d enables RSS configuration, but nb_txq=%d will prevent to fully test it.\n", 4299af75078fSIntel nb_rxq, nb_txq); 4300af75078fSIntel 4301af75078fSIntel init_config(); 4302fb73e096SJeff Guo 4303fb73e096SJeff Guo if (hot_plug) { 43042049c511SJeff Guo ret = rte_dev_hotplug_handle_enable(); 4305fb73e096SJeff Guo if (ret) { 43062049c511SJeff Guo RTE_LOG(ERR, EAL, 43072049c511SJeff Guo "fail to enable hotplug handling."); 4308fb73e096SJeff Guo return -1; 4309fb73e096SJeff Guo } 4310fb73e096SJeff Guo 43112049c511SJeff Guo ret = rte_dev_event_monitor_start(); 43122049c511SJeff Guo if (ret) { 43132049c511SJeff Guo RTE_LOG(ERR, EAL, 43142049c511SJeff Guo "fail to start device event monitoring."); 43152049c511SJeff Guo return -1; 43162049c511SJeff Guo } 43172049c511SJeff Guo 43182049c511SJeff Guo ret = rte_dev_event_callback_register(NULL, 4319cc1bf307SJeff Guo dev_event_callback, NULL); 43202049c511SJeff Guo if (ret) { 43212049c511SJeff Guo RTE_LOG(ERR, EAL, 43222049c511SJeff Guo "fail to register device event callback\n"); 43232049c511SJeff Guo return -1; 43242049c511SJeff Guo } 4325fb73e096SJeff Guo } 4326fb73e096SJeff Guo 43276937d210SStephen Hemminger if (!no_device_start && start_port(RTE_PORT_ALL) != 0) 4328148f963fSBruce Richardson rte_exit(EXIT_FAILURE, "Start ports failed\n"); 4329af75078fSIntel 4330ce8d5614SIntel /* set all ports to promiscuous mode by default */ 433134fc1051SIvan Ilchenko RTE_ETH_FOREACH_DEV(port_id) { 433234fc1051SIvan Ilchenko ret = rte_eth_promiscuous_enable(port_id); 433334fc1051SIvan Ilchenko if (ret != 0) 433461a3b0e5SAndrew Rybchenko fprintf(stderr, 433561a3b0e5SAndrew Rybchenko "Error during enabling promiscuous mode for port %u: %s - ignore\n", 433634fc1051SIvan Ilchenko port_id, rte_strerror(-ret)); 433734fc1051SIvan Ilchenko } 4338af75078fSIntel 4339bb9be9a4SDavid Marchand #ifdef RTE_LIB_METRICS 43407e4441c8SRemy Horton /* Init metrics library */ 43417e4441c8SRemy Horton rte_metrics_init(rte_socket_id()); 4342bb9be9a4SDavid Marchand #endif 43437e4441c8SRemy Horton 4344a8d0d473SBruce Richardson #ifdef RTE_LIB_LATENCYSTATS 434562d3216dSReshma Pattan if (latencystats_enabled != 0) { 434662d3216dSReshma Pattan int ret = rte_latencystats_init(1, NULL); 434762d3216dSReshma Pattan if (ret) 434861a3b0e5SAndrew Rybchenko fprintf(stderr, 434961a3b0e5SAndrew Rybchenko "Warning: latencystats init() returned error %d\n", 435061a3b0e5SAndrew Rybchenko ret); 435161a3b0e5SAndrew Rybchenko fprintf(stderr, "Latencystats running on lcore %d\n", 435262d3216dSReshma Pattan latencystats_lcore_id); 435362d3216dSReshma Pattan } 435462d3216dSReshma Pattan #endif 435562d3216dSReshma Pattan 43567e4441c8SRemy Horton /* Setup bitrate stats */ 4357a8d0d473SBruce Richardson #ifdef RTE_LIB_BITRATESTATS 4358e25e6c70SRemy Horton if (bitrate_enabled != 0) { 43597e4441c8SRemy Horton bitrate_data = rte_stats_bitrate_create(); 43607e4441c8SRemy Horton if (bitrate_data == NULL) 4361e25e6c70SRemy Horton rte_exit(EXIT_FAILURE, 4362e25e6c70SRemy Horton "Could not allocate bitrate data.\n"); 43637e4441c8SRemy Horton rte_stats_bitrate_reg(bitrate_data); 4364e25e6c70SRemy Horton } 43657e4441c8SRemy Horton #endif 4366a8d0d473SBruce Richardson #ifdef RTE_LIB_CMDLINE 4367592ab76fSDavid Marchand if (init_cmdline() != 0) 4368592ab76fSDavid Marchand rte_exit(EXIT_FAILURE, 4369592ab76fSDavid Marchand "Could not initialise cmdline context.\n"); 4370592ab76fSDavid Marchand 437181ef862bSAllain Legacy if (strlen(cmdline_filename) != 0) 437281ef862bSAllain Legacy cmdline_read_from_file(cmdline_filename); 437381ef862bSAllain Legacy 4374ca7feb22SCyril Chemparathy if (interactive == 1) { 4375ca7feb22SCyril Chemparathy if (auto_start) { 4376ca7feb22SCyril Chemparathy printf("Start automatic packet forwarding\n"); 4377ca7feb22SCyril Chemparathy start_packet_forwarding(0); 4378ca7feb22SCyril Chemparathy } 4379af75078fSIntel prompt(); 43800de738cfSJiayu Hu pmd_test_exit(); 4381ca7feb22SCyril Chemparathy } else 43820d56cb81SThomas Monjalon #endif 43830d56cb81SThomas Monjalon { 4384af75078fSIntel char c; 4385af75078fSIntel int rc; 4386af75078fSIntel 4387d9a191a0SPhil Yang f_quit = 0; 4388d9a191a0SPhil Yang 4389af75078fSIntel printf("No commandline core given, start packet forwarding\n"); 439099cabef0SPablo de Lara start_packet_forwarding(tx_first); 4391cfea1f30SPablo de Lara if (stats_period != 0) { 4392cfea1f30SPablo de Lara uint64_t prev_time = 0, cur_time, diff_time = 0; 4393cfea1f30SPablo de Lara uint64_t timer_period; 4394cfea1f30SPablo de Lara 4395cfea1f30SPablo de Lara /* Convert to number of cycles */ 4396cfea1f30SPablo de Lara timer_period = stats_period * rte_get_timer_hz(); 4397cfea1f30SPablo de Lara 4398d9a191a0SPhil Yang while (f_quit == 0) { 4399cfea1f30SPablo de Lara cur_time = rte_get_timer_cycles(); 4400cfea1f30SPablo de Lara diff_time += cur_time - prev_time; 4401cfea1f30SPablo de Lara 4402cfea1f30SPablo de Lara if (diff_time >= timer_period) { 4403cfea1f30SPablo de Lara print_stats(); 4404cfea1f30SPablo de Lara /* Reset the timer */ 4405cfea1f30SPablo de Lara diff_time = 0; 4406cfea1f30SPablo de Lara } 4407cfea1f30SPablo de Lara /* Sleep to avoid unnecessary checks */ 4408cfea1f30SPablo de Lara prev_time = cur_time; 4409761f7ae1SJie Zhou rte_delay_us_sleep(US_PER_S); 4410cfea1f30SPablo de Lara } 4411cfea1f30SPablo de Lara } 4412cfea1f30SPablo de Lara 4413af75078fSIntel printf("Press enter to exit\n"); 4414af75078fSIntel rc = read(0, &c, 1); 4415d3a274ceSZhihong Wang pmd_test_exit(); 4416af75078fSIntel if (rc < 0) 4417af75078fSIntel return 1; 4418af75078fSIntel } 4419af75078fSIntel 44205e516c89SStephen Hemminger ret = rte_eal_cleanup(); 44215e516c89SStephen Hemminger if (ret != 0) 44225e516c89SStephen Hemminger rte_exit(EXIT_FAILURE, 44235e516c89SStephen Hemminger "EAL cleanup failed: %s\n", strerror(-ret)); 44245e516c89SStephen Hemminger 44255e516c89SStephen Hemminger return EXIT_SUCCESS; 4426af75078fSIntel } 4427