1174a1631SBruce Richardson /* SPDX-License-Identifier: BSD-3-Clause 2174a1631SBruce Richardson * Copyright(c) 2010-2017 Intel Corporation 3af75078fSIntel */ 4af75078fSIntel 5af75078fSIntel #include <stdarg.h> 6af75078fSIntel #include <stdio.h> 7af75078fSIntel #include <stdlib.h> 8af75078fSIntel #include <signal.h> 9af75078fSIntel #include <string.h> 10af75078fSIntel #include <time.h> 11af75078fSIntel #include <fcntl.h> 12761f7ae1SJie Zhou #ifndef RTE_EXEC_ENV_WINDOWS 131c036b16SEelco Chaudron #include <sys/mman.h> 14761f7ae1SJie Zhou #endif 15af75078fSIntel #include <sys/types.h> 16af75078fSIntel #include <errno.h> 17fb73e096SJeff Guo #include <stdbool.h> 18af75078fSIntel 19af75078fSIntel #include <sys/queue.h> 20af75078fSIntel #include <sys/stat.h> 21af75078fSIntel 22af75078fSIntel #include <stdint.h> 23af75078fSIntel #include <unistd.h> 24af75078fSIntel #include <inttypes.h> 25af75078fSIntel 26af75078fSIntel #include <rte_common.h> 27d1eb542eSOlivier Matz #include <rte_errno.h> 28af75078fSIntel #include <rte_byteorder.h> 29af75078fSIntel #include <rte_log.h> 30af75078fSIntel #include <rte_debug.h> 31af75078fSIntel #include <rte_cycles.h> 32af75078fSIntel #include <rte_memory.h> 33af75078fSIntel #include <rte_memcpy.h> 34af75078fSIntel #include <rte_launch.h> 35770ebc06SDavid Marchand #include <rte_bus.h> 36af75078fSIntel #include <rte_eal.h> 37284c908cSGaetan Rivet #include <rte_alarm.h> 38af75078fSIntel #include <rte_per_lcore.h> 39af75078fSIntel #include <rte_lcore.h> 40af75078fSIntel #include <rte_branch_prediction.h> 41af75078fSIntel #include <rte_mempool.h> 42af75078fSIntel #include <rte_malloc.h> 43af75078fSIntel #include <rte_mbuf.h> 440e798567SPavan Nikhilesh #include <rte_mbuf_pool_ops.h> 45af75078fSIntel #include <rte_interrupts.h> 46af75078fSIntel #include <rte_ether.h> 47af75078fSIntel #include <rte_ethdev.h> 48edab33b1STetsuya Mukawa #include <rte_dev.h> 49af75078fSIntel #include <rte_string_fns.h> 50a8d0d473SBruce Richardson #ifdef RTE_NET_IXGBE 51e261265eSRadu Nicolau #include <rte_pmd_ixgbe.h> 52e261265eSRadu Nicolau #endif 53a8d0d473SBruce Richardson #ifdef RTE_LIB_PDUMP 54102b7329SReshma Pattan #include <rte_pdump.h> 55102b7329SReshma Pattan #endif 56938a184aSAdrien Mazarguil #include <rte_flow.h> 57bb9be9a4SDavid Marchand #ifdef RTE_LIB_METRICS 587e4441c8SRemy Horton #include <rte_metrics.h> 59bb9be9a4SDavid Marchand #endif 60a8d0d473SBruce Richardson #ifdef RTE_LIB_BITRATESTATS 617e4441c8SRemy Horton #include <rte_bitrate.h> 627e4441c8SRemy Horton #endif 63a8d0d473SBruce Richardson #ifdef RTE_LIB_LATENCYSTATS 6462d3216dSReshma Pattan #include <rte_latencystats.h> 6562d3216dSReshma Pattan #endif 66761f7ae1SJie Zhou #ifdef RTE_EXEC_ENV_WINDOWS 67761f7ae1SJie Zhou #include <process.h> 68761f7ae1SJie Zhou #endif 69e46372d7SHuisong Li #ifdef RTE_NET_BOND 70e46372d7SHuisong Li #include <rte_eth_bond.h> 71e46372d7SHuisong Li #endif 72f41a5092SSpike Du #ifdef RTE_NET_MLX5 73f41a5092SSpike Du #include "mlx5_testpmd.h" 74f41a5092SSpike Du #endif 75af75078fSIntel 76af75078fSIntel #include "testpmd.h" 77af75078fSIntel 78c7f5dba7SAnatoly Burakov #ifndef MAP_HUGETLB 79c7f5dba7SAnatoly Burakov /* FreeBSD may not have MAP_HUGETLB (in fact, it probably doesn't) */ 80c7f5dba7SAnatoly Burakov #define HUGE_FLAG (0x40000) 81c7f5dba7SAnatoly Burakov #else 82c7f5dba7SAnatoly Burakov #define HUGE_FLAG MAP_HUGETLB 83c7f5dba7SAnatoly Burakov #endif 84c7f5dba7SAnatoly Burakov 85c7f5dba7SAnatoly Burakov #ifndef MAP_HUGE_SHIFT 86c7f5dba7SAnatoly Burakov /* older kernels (or FreeBSD) will not have this define */ 87c7f5dba7SAnatoly Burakov #define HUGE_SHIFT (26) 88c7f5dba7SAnatoly Burakov #else 89c7f5dba7SAnatoly Burakov #define HUGE_SHIFT MAP_HUGE_SHIFT 90c7f5dba7SAnatoly Burakov #endif 91c7f5dba7SAnatoly Burakov 92c7f5dba7SAnatoly Burakov #define EXTMEM_HEAP_NAME "extmem" 9313b19642SDmitry Kozlyuk /* 9413b19642SDmitry Kozlyuk * Zone size with the malloc overhead (max of debug and release variants) 9513b19642SDmitry Kozlyuk * must fit into the smallest supported hugepage size (2M), 9613b19642SDmitry Kozlyuk * so that an IOVA-contiguous zone of this size can always be allocated 9713b19642SDmitry Kozlyuk * if there are free 2M hugepages. 9813b19642SDmitry Kozlyuk */ 9913b19642SDmitry Kozlyuk #define EXTBUF_ZONE_SIZE (RTE_PGSIZE_2M - 4 * RTE_CACHE_LINE_SIZE) 100c7f5dba7SAnatoly Burakov 101af75078fSIntel uint16_t verbose_level = 0; /**< Silent by default. */ 102285fd101SOlivier Matz int testpmd_logtype; /**< Log type for testpmd logs */ 103af75078fSIntel 104cb056611SStephen Hemminger /* use main core for command line ? */ 105af75078fSIntel uint8_t interactive = 0; 106ca7feb22SCyril Chemparathy uint8_t auto_start = 0; 10799cabef0SPablo de Lara uint8_t tx_first; 10881ef862bSAllain Legacy char cmdline_filename[PATH_MAX] = {0}; 109af75078fSIntel 110af75078fSIntel /* 111af75078fSIntel * NUMA support configuration. 112af75078fSIntel * When set, the NUMA support attempts to dispatch the allocation of the 113af75078fSIntel * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the 114af75078fSIntel * probed ports among the CPU sockets 0 and 1. 115af75078fSIntel * Otherwise, all memory is allocated from CPU socket 0. 116af75078fSIntel */ 117999b2ee0SBruce Richardson uint8_t numa_support = 1; /**< numa enabled by default */ 118af75078fSIntel 119af75078fSIntel /* 120b6ea6408SIntel * In UMA mode,all memory is allocated from socket 0 if --socket-num is 121b6ea6408SIntel * not configured. 122b6ea6408SIntel */ 123b6ea6408SIntel uint8_t socket_num = UMA_NO_CONFIG; 124b6ea6408SIntel 125b6ea6408SIntel /* 126c7f5dba7SAnatoly Burakov * Select mempool allocation type: 127c7f5dba7SAnatoly Burakov * - native: use regular DPDK memory 128c7f5dba7SAnatoly Burakov * - anon: use regular DPDK memory to create mempool, but populate using 129c7f5dba7SAnatoly Burakov * anonymous memory (may not be IOVA-contiguous) 130c7f5dba7SAnatoly Burakov * - xmem: use externally allocated hugepage memory 131148f963fSBruce Richardson */ 132c7f5dba7SAnatoly Burakov uint8_t mp_alloc_type = MP_ALLOC_NATIVE; 133148f963fSBruce Richardson 134148f963fSBruce Richardson /* 13563531389SGeorgios Katsikas * Store specified sockets on which memory pool to be used by ports 13663531389SGeorgios Katsikas * is allocated. 13763531389SGeorgios Katsikas */ 13863531389SGeorgios Katsikas uint8_t port_numa[RTE_MAX_ETHPORTS]; 13963531389SGeorgios Katsikas 14063531389SGeorgios Katsikas /* 14163531389SGeorgios Katsikas * Store specified sockets on which RX ring to be used by ports 14263531389SGeorgios Katsikas * is allocated. 14363531389SGeorgios Katsikas */ 14463531389SGeorgios Katsikas uint8_t rxring_numa[RTE_MAX_ETHPORTS]; 14563531389SGeorgios Katsikas 14663531389SGeorgios Katsikas /* 14763531389SGeorgios Katsikas * Store specified sockets on which TX ring to be used by ports 14863531389SGeorgios Katsikas * is allocated. 14963531389SGeorgios Katsikas */ 15063531389SGeorgios Katsikas uint8_t txring_numa[RTE_MAX_ETHPORTS]; 15163531389SGeorgios Katsikas 15263531389SGeorgios Katsikas /* 153af75078fSIntel * Record the Ethernet address of peer target ports to which packets are 154af75078fSIntel * forwarded. 155547d946cSNirmoy Das * Must be instantiated with the ethernet addresses of peer traffic generator 156af75078fSIntel * ports. 157af75078fSIntel */ 1586d13ea8eSOlivier Matz struct rte_ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS]; 159af75078fSIntel portid_t nb_peer_eth_addrs = 0; 160af75078fSIntel 161af75078fSIntel /* 162af75078fSIntel * Probed Target Environment. 163af75078fSIntel */ 164af75078fSIntel struct rte_port *ports; /**< For all probed ethernet ports. */ 165af75078fSIntel portid_t nb_ports; /**< Number of probed ethernet ports. */ 166af75078fSIntel struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */ 167af75078fSIntel lcoreid_t nb_lcores; /**< Number of probed logical cores. */ 168af75078fSIntel 1694918a357SXiaoyun Li portid_t ports_ids[RTE_MAX_ETHPORTS]; /**< Store all port ids. */ 1704918a357SXiaoyun Li 171af75078fSIntel /* 172af75078fSIntel * Test Forwarding Configuration. 173af75078fSIntel * nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores 174af75078fSIntel * nb_fwd_ports <= nb_cfg_ports <= nb_ports 175af75078fSIntel */ 176af75078fSIntel lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */ 177af75078fSIntel lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */ 178af75078fSIntel portid_t nb_cfg_ports; /**< Number of configured ports. */ 179af75078fSIntel portid_t nb_fwd_ports; /**< Number of forwarding ports. */ 180af75078fSIntel 181af75078fSIntel unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */ 182af75078fSIntel portid_t fwd_ports_ids[RTE_MAX_ETHPORTS]; /**< Port ids configuration. */ 183af75078fSIntel 184af75078fSIntel struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */ 185af75078fSIntel streamid_t nb_fwd_streams; /**< Is equal to (nb_ports * nb_rxq). */ 186af75078fSIntel 187af75078fSIntel /* 188af75078fSIntel * Forwarding engines. 189af75078fSIntel */ 190af75078fSIntel struct fwd_engine * fwd_engines[] = { 191af75078fSIntel &io_fwd_engine, 192af75078fSIntel &mac_fwd_engine, 193d47388f1SCyril Chemparathy &mac_swap_engine, 194e9e23a61SCyril Chemparathy &flow_gen_engine, 195af75078fSIntel &rx_only_engine, 196af75078fSIntel &tx_only_engine, 197af75078fSIntel &csum_fwd_engine, 198168dfa61SIvan Boule &icmp_echo_engine, 1993c156061SJens Freimann &noisy_vnf_engine, 2002564abdaSShiri Kuzin &five_tuple_swap_fwd_engine, 201af75078fSIntel #ifdef RTE_LIBRTE_IEEE1588 202af75078fSIntel &ieee1588_fwd_engine, 203af75078fSIntel #endif 20459840375SXueming Li &shared_rxq_engine, 205af75078fSIntel NULL, 206af75078fSIntel }; 207af75078fSIntel 20826cbb419SViacheslav Ovsiienko struct rte_mempool *mempools[RTE_MAX_NUMA_NODES * MAX_SEGS_BUFFER_SPLIT]; 20959fcf854SShahaf Shuler uint16_t mempool_flags; 210401b744dSShahaf Shuler 211af75078fSIntel struct fwd_config cur_fwd_config; 212af75078fSIntel struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */ 213bf56fce1SZhihong Wang uint32_t retry_enabled; 214bf56fce1SZhihong Wang uint32_t burst_tx_delay_time = BURST_TX_WAIT_US; 215bf56fce1SZhihong Wang uint32_t burst_tx_retry_num = BURST_TX_RETRIES; 216af75078fSIntel 21726cbb419SViacheslav Ovsiienko uint32_t mbuf_data_size_n = 1; /* Number of specified mbuf sizes. */ 21826cbb419SViacheslav Ovsiienko uint16_t mbuf_data_size[MAX_SEGS_BUFFER_SPLIT] = { 21926cbb419SViacheslav Ovsiienko DEFAULT_MBUF_DATA_SIZE 22026cbb419SViacheslav Ovsiienko }; /**< Mbuf data space size. */ 221c8798818SIntel uint32_t param_total_num_mbufs = 0; /**< number of mbufs in all pools - if 222c8798818SIntel * specified on command-line. */ 223cfea1f30SPablo de Lara uint16_t stats_period; /**< Period to show statistics (disabled by default) */ 224d9a191a0SPhil Yang 22563b72657SIvan Ilchenko /** Extended statistics to show. */ 22663b72657SIvan Ilchenko struct rte_eth_xstat_name *xstats_display; 22763b72657SIvan Ilchenko 22863b72657SIvan Ilchenko unsigned int xstats_display_num; /**< Size of extended statistics to show */ 22963b72657SIvan Ilchenko 230d9a191a0SPhil Yang /* 231d9a191a0SPhil Yang * In container, it cannot terminate the process which running with 'stats-period' 232d9a191a0SPhil Yang * option. Set flag to exit stats period loop after received SIGINT/SIGTERM. 233d9a191a0SPhil Yang */ 2344c243bd4SStephen Hemminger static volatile uint8_t f_quit; 2353889a322SHuisong Li uint8_t cl_quit; /* Quit testpmd from cmdline. */ 236d9a191a0SPhil Yang 237af75078fSIntel /* 2381bb4a528SFerruh Yigit * Max Rx frame size, set by '--max-pkt-len' parameter. 2391bb4a528SFerruh Yigit */ 2401bb4a528SFerruh Yigit uint32_t max_rx_pkt_len; 2411bb4a528SFerruh Yigit 2421bb4a528SFerruh Yigit /* 2430f2096d7SViacheslav Ovsiienko * Configuration of packet segments used to scatter received packets 2440f2096d7SViacheslav Ovsiienko * if some of split features is configured. 2450f2096d7SViacheslav Ovsiienko */ 2460f2096d7SViacheslav Ovsiienko uint16_t rx_pkt_seg_lengths[MAX_SEGS_BUFFER_SPLIT]; 2470f2096d7SViacheslav Ovsiienko uint8_t rx_pkt_nb_segs; /**< Number of segments to split */ 24891c78e09SViacheslav Ovsiienko uint16_t rx_pkt_seg_offsets[MAX_SEGS_BUFFER_SPLIT]; 24991c78e09SViacheslav Ovsiienko uint8_t rx_pkt_nb_offs; /**< Number of specified offsets */ 25052e2e7edSYuan Wang uint32_t rx_pkt_hdr_protos[MAX_SEGS_BUFFER_SPLIT]; 2510f2096d7SViacheslav Ovsiienko 252a4bf5421SHanumanth Pothula uint8_t multi_rx_mempool; /**< Enables multi-rx-mempool feature */ 253a4bf5421SHanumanth Pothula 2540f2096d7SViacheslav Ovsiienko /* 255af75078fSIntel * Configuration of packet segments used by the "txonly" processing engine. 256af75078fSIntel */ 257af75078fSIntel uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */ 258af75078fSIntel uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = { 259af75078fSIntel TXONLY_DEF_PACKET_LEN, 260af75078fSIntel }; 261af75078fSIntel uint8_t tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */ 262af75078fSIntel 26379bec05bSKonstantin Ananyev enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF; 26479bec05bSKonstantin Ananyev /**< Split policy for packets to TX. */ 26579bec05bSKonstantin Ananyev 26682010ef5SYongseok Koh uint8_t txonly_multi_flow; 26782010ef5SYongseok Koh /**< Whether multiple flows are generated in TXONLY mode. */ 26882010ef5SYongseok Koh 2694940344dSViacheslav Ovsiienko uint32_t tx_pkt_times_inter; 2704940344dSViacheslav Ovsiienko /**< Timings for send scheduling in TXONLY mode, time between bursts. */ 2714940344dSViacheslav Ovsiienko 2724940344dSViacheslav Ovsiienko uint32_t tx_pkt_times_intra; 2734940344dSViacheslav Ovsiienko /**< Timings for send scheduling in TXONLY mode, time between packets. */ 2744940344dSViacheslav Ovsiienko 275af75078fSIntel uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */ 2766c02043eSIgor Russkikh uint16_t nb_pkt_flowgen_clones; /**< Number of Tx packet clones to send in flowgen mode. */ 277861e7684SZhihong Wang int nb_flows_flowgen = 1024; /**< Number of flows in flowgen mode. */ 278e9378bbcSCunming Liang uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */ 279af75078fSIntel 280900550deSIntel /* current configuration is in DCB or not,0 means it is not in DCB mode */ 281900550deSIntel uint8_t dcb_config = 0; 282900550deSIntel 283af75078fSIntel /* 284af75078fSIntel * Configurable number of RX/TX queues. 285af75078fSIntel */ 2861c69df45SOri Kam queueid_t nb_hairpinq; /**< Number of hairpin queues per port. */ 287af75078fSIntel queueid_t nb_rxq = 1; /**< Number of RX queues per port. */ 288af75078fSIntel queueid_t nb_txq = 1; /**< Number of TX queues per port. */ 289af75078fSIntel 290af75078fSIntel /* 291af75078fSIntel * Configurable number of RX/TX ring descriptors. 2928599ed31SRemy Horton * Defaults are supplied by drivers via ethdev. 293af75078fSIntel */ 2944ed89049SDavid Marchand #define RX_DESC_DEFAULT 0 2954ed89049SDavid Marchand #define TX_DESC_DEFAULT 0 2964ed89049SDavid Marchand uint16_t nb_rxd = RX_DESC_DEFAULT; /**< Number of RX descriptors. */ 2974ed89049SDavid Marchand uint16_t nb_txd = TX_DESC_DEFAULT; /**< Number of TX descriptors. */ 298af75078fSIntel 299f2c5125aSPablo de Lara #define RTE_PMD_PARAM_UNSET -1 300af75078fSIntel /* 301af75078fSIntel * Configurable values of RX and TX ring threshold registers. 302af75078fSIntel */ 303af75078fSIntel 304f2c5125aSPablo de Lara int8_t rx_pthresh = RTE_PMD_PARAM_UNSET; 305f2c5125aSPablo de Lara int8_t rx_hthresh = RTE_PMD_PARAM_UNSET; 306f2c5125aSPablo de Lara int8_t rx_wthresh = RTE_PMD_PARAM_UNSET; 307af75078fSIntel 308f2c5125aSPablo de Lara int8_t tx_pthresh = RTE_PMD_PARAM_UNSET; 309f2c5125aSPablo de Lara int8_t tx_hthresh = RTE_PMD_PARAM_UNSET; 310f2c5125aSPablo de Lara int8_t tx_wthresh = RTE_PMD_PARAM_UNSET; 311af75078fSIntel 312af75078fSIntel /* 313af75078fSIntel * Configurable value of RX free threshold. 314af75078fSIntel */ 315f2c5125aSPablo de Lara int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET; 316af75078fSIntel 317af75078fSIntel /* 318ce8d5614SIntel * Configurable value of RX drop enable. 319ce8d5614SIntel */ 320f2c5125aSPablo de Lara int8_t rx_drop_en = RTE_PMD_PARAM_UNSET; 321ce8d5614SIntel 322ce8d5614SIntel /* 323af75078fSIntel * Configurable value of TX free threshold. 324af75078fSIntel */ 325f2c5125aSPablo de Lara int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET; 326af75078fSIntel 327af75078fSIntel /* 328af75078fSIntel * Configurable value of TX RS bit threshold. 329af75078fSIntel */ 330f2c5125aSPablo de Lara int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET; 331af75078fSIntel 332af75078fSIntel /* 3333c156061SJens Freimann * Configurable value of buffered packets before sending. 3343c156061SJens Freimann */ 3353c156061SJens Freimann uint16_t noisy_tx_sw_bufsz; 3363c156061SJens Freimann 3373c156061SJens Freimann /* 3383c156061SJens Freimann * Configurable value of packet buffer timeout. 3393c156061SJens Freimann */ 3403c156061SJens Freimann uint16_t noisy_tx_sw_buf_flush_time; 3413c156061SJens Freimann 3423c156061SJens Freimann /* 3433c156061SJens Freimann * Configurable value for size of VNF internal memory area 3443c156061SJens Freimann * used for simulating noisy neighbour behaviour 3453c156061SJens Freimann */ 3463c156061SJens Freimann uint64_t noisy_lkup_mem_sz; 3473c156061SJens Freimann 3483c156061SJens Freimann /* 3493c156061SJens Freimann * Configurable value of number of random writes done in 3503c156061SJens Freimann * VNF simulation memory area. 3513c156061SJens Freimann */ 3523c156061SJens Freimann uint64_t noisy_lkup_num_writes; 3533c156061SJens Freimann 3543c156061SJens Freimann /* 3553c156061SJens Freimann * Configurable value of number of random reads done in 3563c156061SJens Freimann * VNF simulation memory area. 3573c156061SJens Freimann */ 3583c156061SJens Freimann uint64_t noisy_lkup_num_reads; 3593c156061SJens Freimann 3603c156061SJens Freimann /* 3613c156061SJens Freimann * Configurable value of number of random reads/writes done in 3623c156061SJens Freimann * VNF simulation memory area. 3633c156061SJens Freimann */ 3643c156061SJens Freimann uint64_t noisy_lkup_num_reads_writes; 3653c156061SJens Freimann 3663c156061SJens Freimann /* 367af75078fSIntel * Receive Side Scaling (RSS) configuration. 368af75078fSIntel */ 369295968d1SFerruh Yigit uint64_t rss_hf = RTE_ETH_RSS_IP; /* RSS IP by default. */ 370af75078fSIntel 371af75078fSIntel /* 372af75078fSIntel * Port topology configuration 373af75078fSIntel */ 374af75078fSIntel uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */ 375af75078fSIntel 3767741e4cfSIntel /* 3777741e4cfSIntel * Avoids to flush all the RX streams before starts forwarding. 3787741e4cfSIntel */ 3797741e4cfSIntel uint8_t no_flush_rx = 0; /* flush by default */ 3807741e4cfSIntel 381af75078fSIntel /* 3827ee3e944SVasily Philipov * Flow API isolated mode. 3837ee3e944SVasily Philipov */ 3847ee3e944SVasily Philipov uint8_t flow_isolate_all; 3857ee3e944SVasily Philipov 3867ee3e944SVasily Philipov /* 387543df472SChengwen Feng * Disable port flow flush when stop port. 388543df472SChengwen Feng */ 389543df472SChengwen Feng uint8_t no_flow_flush = 0; /* do flow flush by default */ 390543df472SChengwen Feng 391543df472SChengwen Feng /* 392bc202406SDavid Marchand * Avoids to check link status when starting/stopping a port. 393bc202406SDavid Marchand */ 394bc202406SDavid Marchand uint8_t no_link_check = 0; /* check by default */ 395bc202406SDavid Marchand 396bc202406SDavid Marchand /* 3976937d210SStephen Hemminger * Don't automatically start all ports in interactive mode. 3986937d210SStephen Hemminger */ 3996937d210SStephen Hemminger uint8_t no_device_start = 0; 4006937d210SStephen Hemminger 4016937d210SStephen Hemminger /* 4028ea656f8SGaetan Rivet * Enable link status change notification 4038ea656f8SGaetan Rivet */ 4048ea656f8SGaetan Rivet uint8_t lsc_interrupt = 1; /* enabled by default */ 4058ea656f8SGaetan Rivet 4068ea656f8SGaetan Rivet /* 407284c908cSGaetan Rivet * Enable device removal notification. 408284c908cSGaetan Rivet */ 409284c908cSGaetan Rivet uint8_t rmv_interrupt = 1; /* enabled by default */ 410284c908cSGaetan Rivet 411fb73e096SJeff Guo uint8_t hot_plug = 0; /**< hotplug disabled by default. */ 412fb73e096SJeff Guo 4134f1ed78eSThomas Monjalon /* After attach, port setup is called on event or by iterator */ 4144f1ed78eSThomas Monjalon bool setup_on_probe_event = true; 4154f1ed78eSThomas Monjalon 416b0a9354aSPavan Nikhilesh /* Clear ptypes on port initialization. */ 417b0a9354aSPavan Nikhilesh uint8_t clear_ptypes = true; 418b0a9354aSPavan Nikhilesh 41901817b10SBing Zhao /* Hairpin ports configuration mode. */ 42023095155SDariusz Sosnowski uint32_t hairpin_mode; 42101817b10SBing Zhao 42297b5d8b5SThomas Monjalon /* Pretty printing of ethdev events */ 42397b5d8b5SThomas Monjalon static const char * const eth_event_desc[] = { 42497b5d8b5SThomas Monjalon [RTE_ETH_EVENT_UNKNOWN] = "unknown", 42597b5d8b5SThomas Monjalon [RTE_ETH_EVENT_INTR_LSC] = "link state change", 42697b5d8b5SThomas Monjalon [RTE_ETH_EVENT_QUEUE_STATE] = "queue state", 42797b5d8b5SThomas Monjalon [RTE_ETH_EVENT_INTR_RESET] = "reset", 42897b5d8b5SThomas Monjalon [RTE_ETH_EVENT_VF_MBOX] = "VF mbox", 42997b5d8b5SThomas Monjalon [RTE_ETH_EVENT_IPSEC] = "IPsec", 43097b5d8b5SThomas Monjalon [RTE_ETH_EVENT_MACSEC] = "MACsec", 43197b5d8b5SThomas Monjalon [RTE_ETH_EVENT_INTR_RMV] = "device removal", 43297b5d8b5SThomas Monjalon [RTE_ETH_EVENT_NEW] = "device probed", 43397b5d8b5SThomas Monjalon [RTE_ETH_EVENT_DESTROY] = "device released", 4340e459ffaSDong Zhou [RTE_ETH_EVENT_FLOW_AGED] = "flow aged", 435bc70e559SSpike Du [RTE_ETH_EVENT_RX_AVAIL_THRESH] = "RxQ available descriptors threshold reached", 436eb0d471aSKalesh AP [RTE_ETH_EVENT_ERR_RECOVERING] = "error recovering", 437eb0d471aSKalesh AP [RTE_ETH_EVENT_RECOVERY_SUCCESS] = "error recovery successful", 438eb0d471aSKalesh AP [RTE_ETH_EVENT_RECOVERY_FAILED] = "error recovery failed", 43997b5d8b5SThomas Monjalon [RTE_ETH_EVENT_MAX] = NULL, 44097b5d8b5SThomas Monjalon }; 44197b5d8b5SThomas Monjalon 442284c908cSGaetan Rivet /* 4433af72783SGaetan Rivet * Display or mask ether events 4443af72783SGaetan Rivet * Default to all events except VF_MBOX 4453af72783SGaetan Rivet */ 4463af72783SGaetan Rivet uint32_t event_print_mask = (UINT32_C(1) << RTE_ETH_EVENT_UNKNOWN) | 4473af72783SGaetan Rivet (UINT32_C(1) << RTE_ETH_EVENT_INTR_LSC) | 4483af72783SGaetan Rivet (UINT32_C(1) << RTE_ETH_EVENT_QUEUE_STATE) | 4493af72783SGaetan Rivet (UINT32_C(1) << RTE_ETH_EVENT_INTR_RESET) | 450badb87c1SAnoob Joseph (UINT32_C(1) << RTE_ETH_EVENT_IPSEC) | 4513af72783SGaetan Rivet (UINT32_C(1) << RTE_ETH_EVENT_MACSEC) | 4520e459ffaSDong Zhou (UINT32_C(1) << RTE_ETH_EVENT_INTR_RMV) | 453eb0d471aSKalesh AP (UINT32_C(1) << RTE_ETH_EVENT_FLOW_AGED) | 454eb0d471aSKalesh AP (UINT32_C(1) << RTE_ETH_EVENT_ERR_RECOVERING) | 455eb0d471aSKalesh AP (UINT32_C(1) << RTE_ETH_EVENT_RECOVERY_SUCCESS) | 456eb0d471aSKalesh AP (UINT32_C(1) << RTE_ETH_EVENT_RECOVERY_FAILED); 457e505d84cSAnatoly Burakov /* 458e505d84cSAnatoly Burakov * Decide if all memory are locked for performance. 459e505d84cSAnatoly Burakov */ 460e505d84cSAnatoly Burakov int do_mlockall = 0; 4613af72783SGaetan Rivet 462a8d0d473SBruce Richardson #ifdef RTE_LIB_LATENCYSTATS 46362d3216dSReshma Pattan 46462d3216dSReshma Pattan /* 46562d3216dSReshma Pattan * Set when latency stats is enabled in the commandline 46662d3216dSReshma Pattan */ 46762d3216dSReshma Pattan uint8_t latencystats_enabled; 46862d3216dSReshma Pattan 46962d3216dSReshma Pattan /* 4707be78d02SJosh Soref * Lcore ID to service latency statistics. 47162d3216dSReshma Pattan */ 47262d3216dSReshma Pattan lcoreid_t latencystats_lcore_id = -1; 47362d3216dSReshma Pattan 47462d3216dSReshma Pattan #endif 47562d3216dSReshma Pattan 4767b7e5ba7SIntel /* 477af75078fSIntel * Ethernet device configuration. 478af75078fSIntel */ 4791bb4a528SFerruh Yigit struct rte_eth_rxmode rx_mode; 480af75078fSIntel 48107e5f7bdSShahaf Shuler struct rte_eth_txmode tx_mode = { 482295968d1SFerruh Yigit .offloads = RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE, 48307e5f7bdSShahaf Shuler }; 484fd8c20aaSShahaf Shuler 4852950a769SDeclan Doherty volatile int test_done = 1; /* stop packet forwarding when set to 1. */ 486af75078fSIntel 487a4fd5eeeSElza Mathew /* 488a4fd5eeeSElza Mathew * Display zero values by default for xstats 489a4fd5eeeSElza Mathew */ 490a4fd5eeeSElza Mathew uint8_t xstats_hide_zero; 491a4fd5eeeSElza Mathew 492bc700b67SDharmik Thakkar /* 493bc700b67SDharmik Thakkar * Measure of CPU cycles disabled by default 494bc700b67SDharmik Thakkar */ 495bc700b67SDharmik Thakkar uint8_t record_core_cycles; 496bc700b67SDharmik Thakkar 4970e4b1963SDharmik Thakkar /* 4980e4b1963SDharmik Thakkar * Display of RX and TX bursts disabled by default 4990e4b1963SDharmik Thakkar */ 5000e4b1963SDharmik Thakkar uint8_t record_burst_stats; 5010e4b1963SDharmik Thakkar 502f4d178c1SXueming Li /* 503f4d178c1SXueming Li * Number of ports per shared Rx queue group, 0 disable. 504f4d178c1SXueming Li */ 505f4d178c1SXueming Li uint32_t rxq_share; 506f4d178c1SXueming Li 507c9cafcc8SShahaf Shuler unsigned int num_sockets = 0; 508c9cafcc8SShahaf Shuler unsigned int socket_ids[RTE_MAX_NUMA_NODES]; 5097acf894dSStephen Hurd 510a8d0d473SBruce Richardson #ifdef RTE_LIB_BITRATESTATS 5117e4441c8SRemy Horton /* Bitrate statistics */ 5127e4441c8SRemy Horton struct rte_stats_bitrates *bitrate_data; 513e25e6c70SRemy Horton lcoreid_t bitrate_lcore_id; 514e25e6c70SRemy Horton uint8_t bitrate_enabled; 515e25e6c70SRemy Horton #endif 5167e4441c8SRemy Horton 5176970401eSDavid Marchand #ifdef RTE_LIB_GRO 518b40f8d78SJiayu Hu struct gro_status gro_ports[RTE_MAX_ETHPORTS]; 519b7091f1dSJiayu Hu uint8_t gro_flush_cycles = GRO_DEFAULT_FLUSH_CYCLES; 5206970401eSDavid Marchand #endif 521b40f8d78SJiayu Hu 522f9295aa2SXiaoyu Min /* 523f9295aa2SXiaoyu Min * hexadecimal bitmask of RX mq mode can be enabled. 524f9295aa2SXiaoyu Min */ 525295968d1SFerruh Yigit enum rte_eth_rx_mq_mode rx_mq_mode = RTE_ETH_MQ_RX_VMDQ_DCB_RSS; 526f9295aa2SXiaoyu Min 527b7b78a08SAjit Khaparde /* 528b7b78a08SAjit Khaparde * Used to set forced link speed 529b7b78a08SAjit Khaparde */ 530b7b78a08SAjit Khaparde uint32_t eth_link_speed; 531b7b78a08SAjit Khaparde 532a550baf2SMin Hu (Connor) /* 533a550baf2SMin Hu (Connor) * ID of the current process in multi-process, used to 534a550baf2SMin Hu (Connor) * configure the queues to be polled. 535a550baf2SMin Hu (Connor) */ 536a550baf2SMin Hu (Connor) int proc_id; 537a550baf2SMin Hu (Connor) 538a550baf2SMin Hu (Connor) /* 539a550baf2SMin Hu (Connor) * Number of processes in multi-process, used to 540a550baf2SMin Hu (Connor) * configure the queues to be polled. 541a550baf2SMin Hu (Connor) */ 542a550baf2SMin Hu (Connor) unsigned int num_procs = 1; 543a550baf2SMin Hu (Connor) 544f6d8a6d3SIvan Malov static void 545f6d8a6d3SIvan Malov eth_rx_metadata_negotiate_mp(uint16_t port_id) 546f6d8a6d3SIvan Malov { 547f6d8a6d3SIvan Malov uint64_t rx_meta_features = 0; 548f6d8a6d3SIvan Malov int ret; 549f6d8a6d3SIvan Malov 550f6d8a6d3SIvan Malov if (!is_proc_primary()) 551f6d8a6d3SIvan Malov return; 552f6d8a6d3SIvan Malov 553f6d8a6d3SIvan Malov rx_meta_features |= RTE_ETH_RX_METADATA_USER_FLAG; 554f6d8a6d3SIvan Malov rx_meta_features |= RTE_ETH_RX_METADATA_USER_MARK; 555f6d8a6d3SIvan Malov rx_meta_features |= RTE_ETH_RX_METADATA_TUNNEL_ID; 556f6d8a6d3SIvan Malov 557f6d8a6d3SIvan Malov ret = rte_eth_rx_metadata_negotiate(port_id, &rx_meta_features); 558f6d8a6d3SIvan Malov if (ret == 0) { 559f6d8a6d3SIvan Malov if (!(rx_meta_features & RTE_ETH_RX_METADATA_USER_FLAG)) { 560f6d8a6d3SIvan Malov TESTPMD_LOG(DEBUG, "Flow action FLAG will not affect Rx mbufs on port %u\n", 561f6d8a6d3SIvan Malov port_id); 562f6d8a6d3SIvan Malov } 563f6d8a6d3SIvan Malov 564f6d8a6d3SIvan Malov if (!(rx_meta_features & RTE_ETH_RX_METADATA_USER_MARK)) { 565f6d8a6d3SIvan Malov TESTPMD_LOG(DEBUG, "Flow action MARK will not affect Rx mbufs on port %u\n", 566f6d8a6d3SIvan Malov port_id); 567f6d8a6d3SIvan Malov } 568f6d8a6d3SIvan Malov 569f6d8a6d3SIvan Malov if (!(rx_meta_features & RTE_ETH_RX_METADATA_TUNNEL_ID)) { 570f6d8a6d3SIvan Malov TESTPMD_LOG(DEBUG, "Flow tunnel offload support might be limited or unavailable on port %u\n", 571f6d8a6d3SIvan Malov port_id); 572f6d8a6d3SIvan Malov } 573f6d8a6d3SIvan Malov } else if (ret != -ENOTSUP) { 574f6d8a6d3SIvan Malov rte_exit(EXIT_FAILURE, "Error when negotiating Rx meta features on port %u: %s\n", 575f6d8a6d3SIvan Malov port_id, rte_strerror(-ret)); 576f6d8a6d3SIvan Malov } 577f6d8a6d3SIvan Malov } 578f6d8a6d3SIvan Malov 579a550baf2SMin Hu (Connor) static int 580a550baf2SMin Hu (Connor) eth_dev_configure_mp(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q, 581a550baf2SMin Hu (Connor) const struct rte_eth_conf *dev_conf) 582a550baf2SMin Hu (Connor) { 583a550baf2SMin Hu (Connor) if (is_proc_primary()) 584a550baf2SMin Hu (Connor) return rte_eth_dev_configure(port_id, nb_rx_q, nb_tx_q, 585a550baf2SMin Hu (Connor) dev_conf); 586a550baf2SMin Hu (Connor) return 0; 587a550baf2SMin Hu (Connor) } 588a550baf2SMin Hu (Connor) 589a550baf2SMin Hu (Connor) static int 590e46372d7SHuisong Li change_bonding_slave_port_status(portid_t bond_pid, bool is_stop) 591e46372d7SHuisong Li { 592e46372d7SHuisong Li #ifdef RTE_NET_BOND 593e46372d7SHuisong Li 594e46372d7SHuisong Li portid_t slave_pids[RTE_MAX_ETHPORTS]; 595e46372d7SHuisong Li struct rte_port *port; 596e46372d7SHuisong Li int num_slaves; 597e46372d7SHuisong Li portid_t slave_pid; 598e46372d7SHuisong Li int i; 599e46372d7SHuisong Li 600e46372d7SHuisong Li num_slaves = rte_eth_bond_slaves_get(bond_pid, slave_pids, 601e46372d7SHuisong Li RTE_MAX_ETHPORTS); 602e46372d7SHuisong Li if (num_slaves < 0) { 603e46372d7SHuisong Li fprintf(stderr, "Failed to get slave list for port = %u\n", 604e46372d7SHuisong Li bond_pid); 605e46372d7SHuisong Li return num_slaves; 606e46372d7SHuisong Li } 607e46372d7SHuisong Li 608e46372d7SHuisong Li for (i = 0; i < num_slaves; i++) { 609e46372d7SHuisong Li slave_pid = slave_pids[i]; 610e46372d7SHuisong Li port = &ports[slave_pid]; 611e46372d7SHuisong Li port->port_status = 612e46372d7SHuisong Li is_stop ? RTE_PORT_STOPPED : RTE_PORT_STARTED; 613e46372d7SHuisong Li } 614e46372d7SHuisong Li #else 615e46372d7SHuisong Li RTE_SET_USED(bond_pid); 616e46372d7SHuisong Li RTE_SET_USED(is_stop); 617e46372d7SHuisong Li #endif 618e46372d7SHuisong Li return 0; 619e46372d7SHuisong Li } 620e46372d7SHuisong Li 621e46372d7SHuisong Li static int 622a550baf2SMin Hu (Connor) eth_dev_start_mp(uint16_t port_id) 623a550baf2SMin Hu (Connor) { 624e46372d7SHuisong Li int ret; 625e46372d7SHuisong Li 626e46372d7SHuisong Li if (is_proc_primary()) { 627e46372d7SHuisong Li ret = rte_eth_dev_start(port_id); 628e46372d7SHuisong Li if (ret != 0) 629e46372d7SHuisong Li return ret; 630e46372d7SHuisong Li 631e46372d7SHuisong Li struct rte_port *port = &ports[port_id]; 632e46372d7SHuisong Li 633e46372d7SHuisong Li /* 634e46372d7SHuisong Li * Starting a bonded port also starts all slaves under the bonded 635e46372d7SHuisong Li * device. So if this port is bond device, we need to modify the 636e46372d7SHuisong Li * port status of these slaves. 637e46372d7SHuisong Li */ 638e46372d7SHuisong Li if (port->bond_flag == 1) 639e46372d7SHuisong Li return change_bonding_slave_port_status(port_id, false); 640e46372d7SHuisong Li } 641a550baf2SMin Hu (Connor) 642a550baf2SMin Hu (Connor) return 0; 643a550baf2SMin Hu (Connor) } 644a550baf2SMin Hu (Connor) 645a550baf2SMin Hu (Connor) static int 646a550baf2SMin Hu (Connor) eth_dev_stop_mp(uint16_t port_id) 647a550baf2SMin Hu (Connor) { 648e46372d7SHuisong Li int ret; 649e46372d7SHuisong Li 650e46372d7SHuisong Li if (is_proc_primary()) { 651e46372d7SHuisong Li ret = rte_eth_dev_stop(port_id); 652e46372d7SHuisong Li if (ret != 0) 653e46372d7SHuisong Li return ret; 654e46372d7SHuisong Li 655e46372d7SHuisong Li struct rte_port *port = &ports[port_id]; 656e46372d7SHuisong Li 657e46372d7SHuisong Li /* 658e46372d7SHuisong Li * Stopping a bonded port also stops all slaves under the bonded 659e46372d7SHuisong Li * device. So if this port is bond device, we need to modify the 660e46372d7SHuisong Li * port status of these slaves. 661e46372d7SHuisong Li */ 662e46372d7SHuisong Li if (port->bond_flag == 1) 663e46372d7SHuisong Li return change_bonding_slave_port_status(port_id, true); 664e46372d7SHuisong Li } 665a550baf2SMin Hu (Connor) 666a550baf2SMin Hu (Connor) return 0; 667a550baf2SMin Hu (Connor) } 668a550baf2SMin Hu (Connor) 669a550baf2SMin Hu (Connor) static void 670a550baf2SMin Hu (Connor) mempool_free_mp(struct rte_mempool *mp) 671a550baf2SMin Hu (Connor) { 672a550baf2SMin Hu (Connor) if (is_proc_primary()) 673a550baf2SMin Hu (Connor) rte_mempool_free(mp); 674a550baf2SMin Hu (Connor) } 675a550baf2SMin Hu (Connor) 676a550baf2SMin Hu (Connor) static int 677a550baf2SMin Hu (Connor) eth_dev_set_mtu_mp(uint16_t port_id, uint16_t mtu) 678a550baf2SMin Hu (Connor) { 679a550baf2SMin Hu (Connor) if (is_proc_primary()) 680a550baf2SMin Hu (Connor) return rte_eth_dev_set_mtu(port_id, mtu); 681a550baf2SMin Hu (Connor) 682a550baf2SMin Hu (Connor) return 0; 683a550baf2SMin Hu (Connor) } 684a550baf2SMin Hu (Connor) 685ed30d9b6SIntel /* Forward function declarations */ 686c9cce428SThomas Monjalon static void setup_attached_port(portid_t pi); 687edab33b1STetsuya Mukawa static void check_all_ports_link_status(uint32_t port_mask); 688f8244c63SZhiyong Yang static int eth_event_callback(portid_t port_id, 68976ad4a2dSGaetan Rivet enum rte_eth_event_type type, 690d6af1a13SBernard Iremonger void *param, void *ret_param); 691cc1bf307SJeff Guo static void dev_event_callback(const char *device_name, 692fb73e096SJeff Guo enum rte_dev_event_type type, 693fb73e096SJeff Guo void *param); 69463b72657SIvan Ilchenko static void fill_xstats_display_info(void); 695ce8d5614SIntel 696ce8d5614SIntel /* 697ce8d5614SIntel * Check if all the ports are started. 698ce8d5614SIntel * If yes, return positive value. If not, return zero. 699ce8d5614SIntel */ 700ce8d5614SIntel static int all_ports_started(void); 701ed30d9b6SIntel 7026970401eSDavid Marchand #ifdef RTE_LIB_GSO 70352f38a20SJiayu Hu struct gso_status gso_ports[RTE_MAX_ETHPORTS]; 70435b2d13fSOlivier Matz uint16_t gso_max_segment_size = RTE_ETHER_MAX_LEN - RTE_ETHER_CRC_LEN; 7056970401eSDavid Marchand #endif 70652f38a20SJiayu Hu 707b57b66a9SOri Kam /* Holds the registered mbuf dynamic flags names. */ 708b57b66a9SOri Kam char dynf_names[64][RTE_MBUF_DYN_NAMESIZE]; 709b57b66a9SOri Kam 71063b72657SIvan Ilchenko 711af75078fSIntel /* 71298a7ea33SJerin Jacob * Helper function to check if socket is already discovered. 713c9cafcc8SShahaf Shuler * If yes, return positive value. If not, return zero. 714c9cafcc8SShahaf Shuler */ 715c9cafcc8SShahaf Shuler int 716c9cafcc8SShahaf Shuler new_socket_id(unsigned int socket_id) 717c9cafcc8SShahaf Shuler { 718c9cafcc8SShahaf Shuler unsigned int i; 719c9cafcc8SShahaf Shuler 720c9cafcc8SShahaf Shuler for (i = 0; i < num_sockets; i++) { 721c9cafcc8SShahaf Shuler if (socket_ids[i] == socket_id) 722c9cafcc8SShahaf Shuler return 0; 723c9cafcc8SShahaf Shuler } 724c9cafcc8SShahaf Shuler return 1; 725c9cafcc8SShahaf Shuler } 726c9cafcc8SShahaf Shuler 727c9cafcc8SShahaf Shuler /* 728af75078fSIntel * Setup default configuration. 729af75078fSIntel */ 730af75078fSIntel static void 731af75078fSIntel set_default_fwd_lcores_config(void) 732af75078fSIntel { 733af75078fSIntel unsigned int i; 734af75078fSIntel unsigned int nb_lc; 7357acf894dSStephen Hurd unsigned int sock_num; 736af75078fSIntel 737af75078fSIntel nb_lc = 0; 738af75078fSIntel for (i = 0; i < RTE_MAX_LCORE; i++) { 739dbfb8ec7SPhil Yang if (!rte_lcore_is_enabled(i)) 740dbfb8ec7SPhil Yang continue; 741c9cafcc8SShahaf Shuler sock_num = rte_lcore_to_socket_id(i); 742c9cafcc8SShahaf Shuler if (new_socket_id(sock_num)) { 743c9cafcc8SShahaf Shuler if (num_sockets >= RTE_MAX_NUMA_NODES) { 744c9cafcc8SShahaf Shuler rte_exit(EXIT_FAILURE, 745c9cafcc8SShahaf Shuler "Total sockets greater than %u\n", 746c9cafcc8SShahaf Shuler RTE_MAX_NUMA_NODES); 747c9cafcc8SShahaf Shuler } 748c9cafcc8SShahaf Shuler socket_ids[num_sockets++] = sock_num; 7497acf894dSStephen Hurd } 750cb056611SStephen Hemminger if (i == rte_get_main_lcore()) 751f54fe5eeSStephen Hurd continue; 752f54fe5eeSStephen Hurd fwd_lcores_cpuids[nb_lc++] = i; 753af75078fSIntel } 754af75078fSIntel nb_lcores = (lcoreid_t) nb_lc; 755af75078fSIntel nb_cfg_lcores = nb_lcores; 756af75078fSIntel nb_fwd_lcores = 1; 757af75078fSIntel } 758af75078fSIntel 759af75078fSIntel static void 760af75078fSIntel set_def_peer_eth_addrs(void) 761af75078fSIntel { 762af75078fSIntel portid_t i; 763af75078fSIntel 764af75078fSIntel for (i = 0; i < RTE_MAX_ETHPORTS; i++) { 76535b2d13fSOlivier Matz peer_eth_addrs[i].addr_bytes[0] = RTE_ETHER_LOCAL_ADMIN_ADDR; 766af75078fSIntel peer_eth_addrs[i].addr_bytes[5] = i; 767af75078fSIntel } 768af75078fSIntel } 769af75078fSIntel 770af75078fSIntel static void 771af75078fSIntel set_default_fwd_ports_config(void) 772af75078fSIntel { 773af75078fSIntel portid_t pt_id; 77465a7360cSMatan Azrad int i = 0; 775af75078fSIntel 776effdb8bbSPhil Yang RTE_ETH_FOREACH_DEV(pt_id) { 77765a7360cSMatan Azrad fwd_ports_ids[i++] = pt_id; 778af75078fSIntel 779effdb8bbSPhil Yang /* Update sockets info according to the attached device */ 780effdb8bbSPhil Yang int socket_id = rte_eth_dev_socket_id(pt_id); 781effdb8bbSPhil Yang if (socket_id >= 0 && new_socket_id(socket_id)) { 782effdb8bbSPhil Yang if (num_sockets >= RTE_MAX_NUMA_NODES) { 783effdb8bbSPhil Yang rte_exit(EXIT_FAILURE, 784effdb8bbSPhil Yang "Total sockets greater than %u\n", 785effdb8bbSPhil Yang RTE_MAX_NUMA_NODES); 786effdb8bbSPhil Yang } 787effdb8bbSPhil Yang socket_ids[num_sockets++] = socket_id; 788effdb8bbSPhil Yang } 789effdb8bbSPhil Yang } 790effdb8bbSPhil Yang 791af75078fSIntel nb_cfg_ports = nb_ports; 792af75078fSIntel nb_fwd_ports = nb_ports; 793af75078fSIntel } 794af75078fSIntel 795af75078fSIntel void 796af75078fSIntel set_def_fwd_config(void) 797af75078fSIntel { 798af75078fSIntel set_default_fwd_lcores_config(); 799af75078fSIntel set_def_peer_eth_addrs(); 800af75078fSIntel set_default_fwd_ports_config(); 801af75078fSIntel } 802af75078fSIntel 803761f7ae1SJie Zhou #ifndef RTE_EXEC_ENV_WINDOWS 804c7f5dba7SAnatoly Burakov /* extremely pessimistic estimation of memory required to create a mempool */ 805c7f5dba7SAnatoly Burakov static int 806c7f5dba7SAnatoly Burakov calc_mem_size(uint32_t nb_mbufs, uint32_t mbuf_sz, size_t pgsz, size_t *out) 807c7f5dba7SAnatoly Burakov { 808c7f5dba7SAnatoly Burakov unsigned int n_pages, mbuf_per_pg, leftover; 809c7f5dba7SAnatoly Burakov uint64_t total_mem, mbuf_mem, obj_sz; 810c7f5dba7SAnatoly Burakov 811c7f5dba7SAnatoly Burakov /* there is no good way to predict how much space the mempool will 812c7f5dba7SAnatoly Burakov * occupy because it will allocate chunks on the fly, and some of those 813c7f5dba7SAnatoly Burakov * will come from default DPDK memory while some will come from our 814c7f5dba7SAnatoly Burakov * external memory, so just assume 128MB will be enough for everyone. 815c7f5dba7SAnatoly Burakov */ 816c7f5dba7SAnatoly Burakov uint64_t hdr_mem = 128 << 20; 817c7f5dba7SAnatoly Burakov 818c7f5dba7SAnatoly Burakov /* account for possible non-contiguousness */ 819c7f5dba7SAnatoly Burakov obj_sz = rte_mempool_calc_obj_size(mbuf_sz, 0, NULL); 820c7f5dba7SAnatoly Burakov if (obj_sz > pgsz) { 821c7f5dba7SAnatoly Burakov TESTPMD_LOG(ERR, "Object size is bigger than page size\n"); 822c7f5dba7SAnatoly Burakov return -1; 823c7f5dba7SAnatoly Burakov } 824c7f5dba7SAnatoly Burakov 825c7f5dba7SAnatoly Burakov mbuf_per_pg = pgsz / obj_sz; 826c7f5dba7SAnatoly Burakov leftover = (nb_mbufs % mbuf_per_pg) > 0; 827c7f5dba7SAnatoly Burakov n_pages = (nb_mbufs / mbuf_per_pg) + leftover; 828c7f5dba7SAnatoly Burakov 829c7f5dba7SAnatoly Burakov mbuf_mem = n_pages * pgsz; 830c7f5dba7SAnatoly Burakov 831c7f5dba7SAnatoly Burakov total_mem = RTE_ALIGN(hdr_mem + mbuf_mem, pgsz); 832c7f5dba7SAnatoly Burakov 833c7f5dba7SAnatoly Burakov if (total_mem > SIZE_MAX) { 834c7f5dba7SAnatoly Burakov TESTPMD_LOG(ERR, "Memory size too big\n"); 835c7f5dba7SAnatoly Burakov return -1; 836c7f5dba7SAnatoly Burakov } 837c7f5dba7SAnatoly Burakov *out = (size_t)total_mem; 838c7f5dba7SAnatoly Burakov 839c7f5dba7SAnatoly Burakov return 0; 840c7f5dba7SAnatoly Burakov } 841c7f5dba7SAnatoly Burakov 842c7f5dba7SAnatoly Burakov static int 843c7f5dba7SAnatoly Burakov pagesz_flags(uint64_t page_sz) 844c7f5dba7SAnatoly Burakov { 845c7f5dba7SAnatoly Burakov /* as per mmap() manpage, all page sizes are log2 of page size 846c7f5dba7SAnatoly Burakov * shifted by MAP_HUGE_SHIFT 847c7f5dba7SAnatoly Burakov */ 8489d650537SAnatoly Burakov int log2 = rte_log2_u64(page_sz); 849c7f5dba7SAnatoly Burakov 850c7f5dba7SAnatoly Burakov return (log2 << HUGE_SHIFT); 851c7f5dba7SAnatoly Burakov } 852c7f5dba7SAnatoly Burakov 853c7f5dba7SAnatoly Burakov static void * 854c7f5dba7SAnatoly Burakov alloc_mem(size_t memsz, size_t pgsz, bool huge) 855c7f5dba7SAnatoly Burakov { 856c7f5dba7SAnatoly Burakov void *addr; 857c7f5dba7SAnatoly Burakov int flags; 858c7f5dba7SAnatoly Burakov 859c7f5dba7SAnatoly Burakov /* allocate anonymous hugepages */ 860c7f5dba7SAnatoly Burakov flags = MAP_ANONYMOUS | MAP_PRIVATE; 861c7f5dba7SAnatoly Burakov if (huge) 862c7f5dba7SAnatoly Burakov flags |= HUGE_FLAG | pagesz_flags(pgsz); 863c7f5dba7SAnatoly Burakov 864c7f5dba7SAnatoly Burakov addr = mmap(NULL, memsz, PROT_READ | PROT_WRITE, flags, -1, 0); 865c7f5dba7SAnatoly Burakov if (addr == MAP_FAILED) 866c7f5dba7SAnatoly Burakov return NULL; 867c7f5dba7SAnatoly Burakov 868c7f5dba7SAnatoly Burakov return addr; 869c7f5dba7SAnatoly Burakov } 870c7f5dba7SAnatoly Burakov 871c7f5dba7SAnatoly Burakov struct extmem_param { 872c7f5dba7SAnatoly Burakov void *addr; 873c7f5dba7SAnatoly Burakov size_t len; 874c7f5dba7SAnatoly Burakov size_t pgsz; 875c7f5dba7SAnatoly Burakov rte_iova_t *iova_table; 876c7f5dba7SAnatoly Burakov unsigned int iova_table_len; 877c7f5dba7SAnatoly Burakov }; 878c7f5dba7SAnatoly Burakov 879c7f5dba7SAnatoly Burakov static int 880c7f5dba7SAnatoly Burakov create_extmem(uint32_t nb_mbufs, uint32_t mbuf_sz, struct extmem_param *param, 881c7f5dba7SAnatoly Burakov bool huge) 882c7f5dba7SAnatoly Burakov { 883c7f5dba7SAnatoly Burakov uint64_t pgsizes[] = {RTE_PGSIZE_2M, RTE_PGSIZE_1G, /* x86_64, ARM */ 884c7f5dba7SAnatoly Burakov RTE_PGSIZE_16M, RTE_PGSIZE_16G}; /* POWER */ 885c7f5dba7SAnatoly Burakov unsigned int cur_page, n_pages, pgsz_idx; 886c7f5dba7SAnatoly Burakov size_t mem_sz, cur_pgsz; 887c7f5dba7SAnatoly Burakov rte_iova_t *iovas = NULL; 888c7f5dba7SAnatoly Burakov void *addr; 889c7f5dba7SAnatoly Burakov int ret; 890c7f5dba7SAnatoly Burakov 891c7f5dba7SAnatoly Burakov for (pgsz_idx = 0; pgsz_idx < RTE_DIM(pgsizes); pgsz_idx++) { 892c7f5dba7SAnatoly Burakov /* skip anything that is too big */ 893c7f5dba7SAnatoly Burakov if (pgsizes[pgsz_idx] > SIZE_MAX) 894c7f5dba7SAnatoly Burakov continue; 895c7f5dba7SAnatoly Burakov 896c7f5dba7SAnatoly Burakov cur_pgsz = pgsizes[pgsz_idx]; 897c7f5dba7SAnatoly Burakov 898c7f5dba7SAnatoly Burakov /* if we were told not to allocate hugepages, override */ 899c7f5dba7SAnatoly Burakov if (!huge) 900c7f5dba7SAnatoly Burakov cur_pgsz = sysconf(_SC_PAGESIZE); 901c7f5dba7SAnatoly Burakov 902c7f5dba7SAnatoly Burakov ret = calc_mem_size(nb_mbufs, mbuf_sz, cur_pgsz, &mem_sz); 903c7f5dba7SAnatoly Burakov if (ret < 0) { 904c7f5dba7SAnatoly Burakov TESTPMD_LOG(ERR, "Cannot calculate memory size\n"); 905c7f5dba7SAnatoly Burakov return -1; 906c7f5dba7SAnatoly Burakov } 907c7f5dba7SAnatoly Burakov 908c7f5dba7SAnatoly Burakov /* allocate our memory */ 909c7f5dba7SAnatoly Burakov addr = alloc_mem(mem_sz, cur_pgsz, huge); 910c7f5dba7SAnatoly Burakov 911c7f5dba7SAnatoly Burakov /* if we couldn't allocate memory with a specified page size, 912c7f5dba7SAnatoly Burakov * that doesn't mean we can't do it with other page sizes, so 913c7f5dba7SAnatoly Burakov * try another one. 914c7f5dba7SAnatoly Burakov */ 915c7f5dba7SAnatoly Burakov if (addr == NULL) 916c7f5dba7SAnatoly Burakov continue; 917c7f5dba7SAnatoly Burakov 918c7f5dba7SAnatoly Burakov /* store IOVA addresses for every page in this memory area */ 919c7f5dba7SAnatoly Burakov n_pages = mem_sz / cur_pgsz; 920c7f5dba7SAnatoly Burakov 921c7f5dba7SAnatoly Burakov iovas = malloc(sizeof(*iovas) * n_pages); 922c7f5dba7SAnatoly Burakov 923c7f5dba7SAnatoly Burakov if (iovas == NULL) { 924c7f5dba7SAnatoly Burakov TESTPMD_LOG(ERR, "Cannot allocate memory for iova addresses\n"); 925c7f5dba7SAnatoly Burakov goto fail; 926c7f5dba7SAnatoly Burakov } 927c7f5dba7SAnatoly Burakov /* lock memory if it's not huge pages */ 928c7f5dba7SAnatoly Burakov if (!huge) 929c7f5dba7SAnatoly Burakov mlock(addr, mem_sz); 930c7f5dba7SAnatoly Burakov 931c7f5dba7SAnatoly Burakov /* populate IOVA addresses */ 932c7f5dba7SAnatoly Burakov for (cur_page = 0; cur_page < n_pages; cur_page++) { 933c7f5dba7SAnatoly Burakov rte_iova_t iova; 934c7f5dba7SAnatoly Burakov size_t offset; 935c7f5dba7SAnatoly Burakov void *cur; 936c7f5dba7SAnatoly Burakov 937c7f5dba7SAnatoly Burakov offset = cur_pgsz * cur_page; 938c7f5dba7SAnatoly Burakov cur = RTE_PTR_ADD(addr, offset); 939c7f5dba7SAnatoly Burakov 940c7f5dba7SAnatoly Burakov /* touch the page before getting its IOVA */ 941c7f5dba7SAnatoly Burakov *(volatile char *)cur = 0; 942c7f5dba7SAnatoly Burakov 943c7f5dba7SAnatoly Burakov iova = rte_mem_virt2iova(cur); 944c7f5dba7SAnatoly Burakov 945c7f5dba7SAnatoly Burakov iovas[cur_page] = iova; 946c7f5dba7SAnatoly Burakov } 947c7f5dba7SAnatoly Burakov 948c7f5dba7SAnatoly Burakov break; 949c7f5dba7SAnatoly Burakov } 950c7f5dba7SAnatoly Burakov /* if we couldn't allocate anything */ 951c7f5dba7SAnatoly Burakov if (iovas == NULL) 952c7f5dba7SAnatoly Burakov return -1; 953c7f5dba7SAnatoly Burakov 954c7f5dba7SAnatoly Burakov param->addr = addr; 955c7f5dba7SAnatoly Burakov param->len = mem_sz; 956c7f5dba7SAnatoly Burakov param->pgsz = cur_pgsz; 957c7f5dba7SAnatoly Burakov param->iova_table = iovas; 958c7f5dba7SAnatoly Burakov param->iova_table_len = n_pages; 959c7f5dba7SAnatoly Burakov 960c7f5dba7SAnatoly Burakov return 0; 961c7f5dba7SAnatoly Burakov fail: 962c7f5dba7SAnatoly Burakov free(iovas); 963c7f5dba7SAnatoly Burakov if (addr) 964c7f5dba7SAnatoly Burakov munmap(addr, mem_sz); 965c7f5dba7SAnatoly Burakov 966c7f5dba7SAnatoly Burakov return -1; 967c7f5dba7SAnatoly Burakov } 968c7f5dba7SAnatoly Burakov 969c7f5dba7SAnatoly Burakov static int 970c7f5dba7SAnatoly Burakov setup_extmem(uint32_t nb_mbufs, uint32_t mbuf_sz, bool huge) 971c7f5dba7SAnatoly Burakov { 972c7f5dba7SAnatoly Burakov struct extmem_param param; 973c7f5dba7SAnatoly Burakov int socket_id, ret; 974c7f5dba7SAnatoly Burakov 975c7f5dba7SAnatoly Burakov memset(¶m, 0, sizeof(param)); 976c7f5dba7SAnatoly Burakov 977c7f5dba7SAnatoly Burakov /* check if our heap exists */ 978c7f5dba7SAnatoly Burakov socket_id = rte_malloc_heap_get_socket(EXTMEM_HEAP_NAME); 979c7f5dba7SAnatoly Burakov if (socket_id < 0) { 980c7f5dba7SAnatoly Burakov /* create our heap */ 981c7f5dba7SAnatoly Burakov ret = rte_malloc_heap_create(EXTMEM_HEAP_NAME); 982c7f5dba7SAnatoly Burakov if (ret < 0) { 983c7f5dba7SAnatoly Burakov TESTPMD_LOG(ERR, "Cannot create heap\n"); 984c7f5dba7SAnatoly Burakov return -1; 985c7f5dba7SAnatoly Burakov } 986c7f5dba7SAnatoly Burakov } 987c7f5dba7SAnatoly Burakov 988c7f5dba7SAnatoly Burakov ret = create_extmem(nb_mbufs, mbuf_sz, ¶m, huge); 989c7f5dba7SAnatoly Burakov if (ret < 0) { 990c7f5dba7SAnatoly Burakov TESTPMD_LOG(ERR, "Cannot create memory area\n"); 991c7f5dba7SAnatoly Burakov return -1; 992c7f5dba7SAnatoly Burakov } 993c7f5dba7SAnatoly Burakov 994c7f5dba7SAnatoly Burakov /* we now have a valid memory area, so add it to heap */ 995c7f5dba7SAnatoly Burakov ret = rte_malloc_heap_memory_add(EXTMEM_HEAP_NAME, 996c7f5dba7SAnatoly Burakov param.addr, param.len, param.iova_table, 997c7f5dba7SAnatoly Burakov param.iova_table_len, param.pgsz); 998c7f5dba7SAnatoly Burakov 999c7f5dba7SAnatoly Burakov /* when using VFIO, memory is automatically mapped for DMA by EAL */ 1000c7f5dba7SAnatoly Burakov 1001c7f5dba7SAnatoly Burakov /* not needed any more */ 1002c7f5dba7SAnatoly Burakov free(param.iova_table); 1003c7f5dba7SAnatoly Burakov 1004c7f5dba7SAnatoly Burakov if (ret < 0) { 1005c7f5dba7SAnatoly Burakov TESTPMD_LOG(ERR, "Cannot add memory to heap\n"); 1006c7f5dba7SAnatoly Burakov munmap(param.addr, param.len); 1007c7f5dba7SAnatoly Burakov return -1; 1008c7f5dba7SAnatoly Burakov } 1009c7f5dba7SAnatoly Burakov 1010c7f5dba7SAnatoly Burakov /* success */ 1011c7f5dba7SAnatoly Burakov 1012c7f5dba7SAnatoly Burakov TESTPMD_LOG(DEBUG, "Allocated %zuMB of external memory\n", 1013c7f5dba7SAnatoly Burakov param.len >> 20); 1014c7f5dba7SAnatoly Burakov 1015c7f5dba7SAnatoly Burakov return 0; 1016c7f5dba7SAnatoly Burakov } 10173a0968c8SShahaf Shuler static void 10183a0968c8SShahaf Shuler dma_unmap_cb(struct rte_mempool *mp __rte_unused, void *opaque __rte_unused, 10193a0968c8SShahaf Shuler struct rte_mempool_memhdr *memhdr, unsigned mem_idx __rte_unused) 10203a0968c8SShahaf Shuler { 10213a0968c8SShahaf Shuler uint16_t pid = 0; 10223a0968c8SShahaf Shuler int ret; 10233a0968c8SShahaf Shuler 10243a0968c8SShahaf Shuler RTE_ETH_FOREACH_DEV(pid) { 10250a0821bcSPaulis Gributs struct rte_eth_dev_info dev_info; 10263a0968c8SShahaf Shuler 10270a0821bcSPaulis Gributs ret = eth_dev_info_get_print_err(pid, &dev_info); 10280a0821bcSPaulis Gributs if (ret != 0) { 10290a0821bcSPaulis Gributs TESTPMD_LOG(DEBUG, 10300a0821bcSPaulis Gributs "unable to get device info for port %d on addr 0x%p," 10310a0821bcSPaulis Gributs "mempool unmapping will not be performed\n", 10320a0821bcSPaulis Gributs pid, memhdr->addr); 10330a0821bcSPaulis Gributs continue; 10340a0821bcSPaulis Gributs } 10350a0821bcSPaulis Gributs 10360a0821bcSPaulis Gributs ret = rte_dev_dma_unmap(dev_info.device, memhdr->addr, 0, memhdr->len); 10373a0968c8SShahaf Shuler if (ret) { 10383a0968c8SShahaf Shuler TESTPMD_LOG(DEBUG, 10393a0968c8SShahaf Shuler "unable to DMA unmap addr 0x%p " 10403a0968c8SShahaf Shuler "for device %s\n", 1041ec5ecd7eSDavid Marchand memhdr->addr, rte_dev_name(dev_info.device)); 10423a0968c8SShahaf Shuler } 10433a0968c8SShahaf Shuler } 10443a0968c8SShahaf Shuler ret = rte_extmem_unregister(memhdr->addr, memhdr->len); 10453a0968c8SShahaf Shuler if (ret) { 10463a0968c8SShahaf Shuler TESTPMD_LOG(DEBUG, 10473a0968c8SShahaf Shuler "unable to un-register addr 0x%p\n", memhdr->addr); 10483a0968c8SShahaf Shuler } 10493a0968c8SShahaf Shuler } 10503a0968c8SShahaf Shuler 10513a0968c8SShahaf Shuler static void 10523a0968c8SShahaf Shuler dma_map_cb(struct rte_mempool *mp __rte_unused, void *opaque __rte_unused, 10533a0968c8SShahaf Shuler struct rte_mempool_memhdr *memhdr, unsigned mem_idx __rte_unused) 10543a0968c8SShahaf Shuler { 10553a0968c8SShahaf Shuler uint16_t pid = 0; 10563a0968c8SShahaf Shuler size_t page_size = sysconf(_SC_PAGESIZE); 10573a0968c8SShahaf Shuler int ret; 10583a0968c8SShahaf Shuler 10593a0968c8SShahaf Shuler ret = rte_extmem_register(memhdr->addr, memhdr->len, NULL, 0, 10603a0968c8SShahaf Shuler page_size); 10613a0968c8SShahaf Shuler if (ret) { 10623a0968c8SShahaf Shuler TESTPMD_LOG(DEBUG, 10633a0968c8SShahaf Shuler "unable to register addr 0x%p\n", memhdr->addr); 10643a0968c8SShahaf Shuler return; 10653a0968c8SShahaf Shuler } 10663a0968c8SShahaf Shuler RTE_ETH_FOREACH_DEV(pid) { 10670a0821bcSPaulis Gributs struct rte_eth_dev_info dev_info; 10683a0968c8SShahaf Shuler 10690a0821bcSPaulis Gributs ret = eth_dev_info_get_print_err(pid, &dev_info); 10700a0821bcSPaulis Gributs if (ret != 0) { 10710a0821bcSPaulis Gributs TESTPMD_LOG(DEBUG, 10720a0821bcSPaulis Gributs "unable to get device info for port %d on addr 0x%p," 10730a0821bcSPaulis Gributs "mempool mapping will not be performed\n", 10740a0821bcSPaulis Gributs pid, memhdr->addr); 10750a0821bcSPaulis Gributs continue; 10760a0821bcSPaulis Gributs } 10770a0821bcSPaulis Gributs ret = rte_dev_dma_map(dev_info.device, memhdr->addr, 0, memhdr->len); 10783a0968c8SShahaf Shuler if (ret) { 10793a0968c8SShahaf Shuler TESTPMD_LOG(DEBUG, 10803a0968c8SShahaf Shuler "unable to DMA map addr 0x%p " 10813a0968c8SShahaf Shuler "for device %s\n", 1082ec5ecd7eSDavid Marchand memhdr->addr, rte_dev_name(dev_info.device)); 10833a0968c8SShahaf Shuler } 10843a0968c8SShahaf Shuler } 10853a0968c8SShahaf Shuler } 1086761f7ae1SJie Zhou #endif 1087c7f5dba7SAnatoly Burakov 108872512e18SViacheslav Ovsiienko static unsigned int 108972512e18SViacheslav Ovsiienko setup_extbuf(uint32_t nb_mbufs, uint16_t mbuf_sz, unsigned int socket_id, 109072512e18SViacheslav Ovsiienko char *pool_name, struct rte_pktmbuf_extmem **ext_mem) 109172512e18SViacheslav Ovsiienko { 109272512e18SViacheslav Ovsiienko struct rte_pktmbuf_extmem *xmem; 109372512e18SViacheslav Ovsiienko unsigned int ext_num, zone_num, elt_num; 109472512e18SViacheslav Ovsiienko uint16_t elt_size; 109572512e18SViacheslav Ovsiienko 109672512e18SViacheslav Ovsiienko elt_size = RTE_ALIGN_CEIL(mbuf_sz, RTE_CACHE_LINE_SIZE); 109772512e18SViacheslav Ovsiienko elt_num = EXTBUF_ZONE_SIZE / elt_size; 109872512e18SViacheslav Ovsiienko zone_num = (nb_mbufs + elt_num - 1) / elt_num; 109972512e18SViacheslav Ovsiienko 110072512e18SViacheslav Ovsiienko xmem = malloc(sizeof(struct rte_pktmbuf_extmem) * zone_num); 110172512e18SViacheslav Ovsiienko if (xmem == NULL) { 110272512e18SViacheslav Ovsiienko TESTPMD_LOG(ERR, "Cannot allocate memory for " 110372512e18SViacheslav Ovsiienko "external buffer descriptors\n"); 110472512e18SViacheslav Ovsiienko *ext_mem = NULL; 110572512e18SViacheslav Ovsiienko return 0; 110672512e18SViacheslav Ovsiienko } 110772512e18SViacheslav Ovsiienko for (ext_num = 0; ext_num < zone_num; ext_num++) { 110872512e18SViacheslav Ovsiienko struct rte_pktmbuf_extmem *xseg = xmem + ext_num; 110972512e18SViacheslav Ovsiienko const struct rte_memzone *mz; 111072512e18SViacheslav Ovsiienko char mz_name[RTE_MEMZONE_NAMESIZE]; 111172512e18SViacheslav Ovsiienko int ret; 111272512e18SViacheslav Ovsiienko 111372512e18SViacheslav Ovsiienko ret = snprintf(mz_name, sizeof(mz_name), 111472512e18SViacheslav Ovsiienko RTE_MEMPOOL_MZ_FORMAT "_xb_%u", pool_name, ext_num); 111572512e18SViacheslav Ovsiienko if (ret < 0 || ret >= (int)sizeof(mz_name)) { 111672512e18SViacheslav Ovsiienko errno = ENAMETOOLONG; 111772512e18SViacheslav Ovsiienko ext_num = 0; 111872512e18SViacheslav Ovsiienko break; 111972512e18SViacheslav Ovsiienko } 112013b19642SDmitry Kozlyuk mz = rte_memzone_reserve(mz_name, EXTBUF_ZONE_SIZE, 112172512e18SViacheslav Ovsiienko socket_id, 112272512e18SViacheslav Ovsiienko RTE_MEMZONE_IOVA_CONTIG | 112372512e18SViacheslav Ovsiienko RTE_MEMZONE_1GB | 112413b19642SDmitry Kozlyuk RTE_MEMZONE_SIZE_HINT_ONLY); 112572512e18SViacheslav Ovsiienko if (mz == NULL) { 112672512e18SViacheslav Ovsiienko /* 112772512e18SViacheslav Ovsiienko * The caller exits on external buffer creation 112872512e18SViacheslav Ovsiienko * error, so there is no need to free memzones. 112972512e18SViacheslav Ovsiienko */ 113072512e18SViacheslav Ovsiienko errno = ENOMEM; 113172512e18SViacheslav Ovsiienko ext_num = 0; 113272512e18SViacheslav Ovsiienko break; 113372512e18SViacheslav Ovsiienko } 113472512e18SViacheslav Ovsiienko xseg->buf_ptr = mz->addr; 113572512e18SViacheslav Ovsiienko xseg->buf_iova = mz->iova; 113672512e18SViacheslav Ovsiienko xseg->buf_len = EXTBUF_ZONE_SIZE; 113772512e18SViacheslav Ovsiienko xseg->elt_size = elt_size; 113872512e18SViacheslav Ovsiienko } 113972512e18SViacheslav Ovsiienko if (ext_num == 0 && xmem != NULL) { 114072512e18SViacheslav Ovsiienko free(xmem); 114172512e18SViacheslav Ovsiienko xmem = NULL; 114272512e18SViacheslav Ovsiienko } 114372512e18SViacheslav Ovsiienko *ext_mem = xmem; 114472512e18SViacheslav Ovsiienko return ext_num; 114572512e18SViacheslav Ovsiienko } 114672512e18SViacheslav Ovsiienko 1147af75078fSIntel /* 1148af75078fSIntel * Configuration initialisation done once at init time. 1149af75078fSIntel */ 1150401b744dSShahaf Shuler static struct rte_mempool * 1151af75078fSIntel mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf, 115226cbb419SViacheslav Ovsiienko unsigned int socket_id, uint16_t size_idx) 1153af75078fSIntel { 1154af75078fSIntel char pool_name[RTE_MEMPOOL_NAMESIZE]; 1155bece7b6cSChristian Ehrhardt struct rte_mempool *rte_mp = NULL; 1156761f7ae1SJie Zhou #ifndef RTE_EXEC_ENV_WINDOWS 1157af75078fSIntel uint32_t mb_size; 1158af75078fSIntel 1159dfb03bbeSOlivier Matz mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size; 1160761f7ae1SJie Zhou #endif 116126cbb419SViacheslav Ovsiienko mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name), size_idx); 1162a550baf2SMin Hu (Connor) if (!is_proc_primary()) { 1163a550baf2SMin Hu (Connor) rte_mp = rte_mempool_lookup(pool_name); 1164a550baf2SMin Hu (Connor) if (rte_mp == NULL) 1165a550baf2SMin Hu (Connor) rte_exit(EXIT_FAILURE, 1166a550baf2SMin Hu (Connor) "Get mbuf pool for socket %u failed: %s\n", 1167a550baf2SMin Hu (Connor) socket_id, rte_strerror(rte_errno)); 1168a550baf2SMin Hu (Connor) return rte_mp; 1169a550baf2SMin Hu (Connor) } 1170148f963fSBruce Richardson 1171285fd101SOlivier Matz TESTPMD_LOG(INFO, 1172d1eb542eSOlivier Matz "create a new mbuf pool <%s>: n=%u, size=%u, socket=%u\n", 1173d1eb542eSOlivier Matz pool_name, nb_mbuf, mbuf_seg_size, socket_id); 1174d1eb542eSOlivier Matz 1175c7f5dba7SAnatoly Burakov switch (mp_alloc_type) { 1176c7f5dba7SAnatoly Burakov case MP_ALLOC_NATIVE: 1177c7f5dba7SAnatoly Burakov { 1178c7f5dba7SAnatoly Burakov /* wrapper to rte_mempool_create() */ 1179c7f5dba7SAnatoly Burakov TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n", 1180c7f5dba7SAnatoly Burakov rte_mbuf_best_mempool_ops()); 1181c7f5dba7SAnatoly Burakov rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf, 1182c7f5dba7SAnatoly Burakov mb_mempool_cache, 0, mbuf_seg_size, socket_id); 1183c7f5dba7SAnatoly Burakov break; 1184c7f5dba7SAnatoly Burakov } 1185761f7ae1SJie Zhou #ifndef RTE_EXEC_ENV_WINDOWS 1186c7f5dba7SAnatoly Burakov case MP_ALLOC_ANON: 1187c7f5dba7SAnatoly Burakov { 1188b19a0c75SOlivier Matz rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf, 1189c7f5dba7SAnatoly Burakov mb_size, (unsigned int) mb_mempool_cache, 1190148f963fSBruce Richardson sizeof(struct rte_pktmbuf_pool_private), 119159fcf854SShahaf Shuler socket_id, mempool_flags); 119224427bb9SOlivier Matz if (rte_mp == NULL) 119324427bb9SOlivier Matz goto err; 1194b19a0c75SOlivier Matz 1195b19a0c75SOlivier Matz if (rte_mempool_populate_anon(rte_mp) == 0) { 1196b19a0c75SOlivier Matz rte_mempool_free(rte_mp); 1197b19a0c75SOlivier Matz rte_mp = NULL; 119824427bb9SOlivier Matz goto err; 1199b19a0c75SOlivier Matz } 1200b19a0c75SOlivier Matz rte_pktmbuf_pool_init(rte_mp, NULL); 1201b19a0c75SOlivier Matz rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL); 12023a0968c8SShahaf Shuler rte_mempool_mem_iter(rte_mp, dma_map_cb, NULL); 1203c7f5dba7SAnatoly Burakov break; 1204c7f5dba7SAnatoly Burakov } 1205c7f5dba7SAnatoly Burakov case MP_ALLOC_XMEM: 1206c7f5dba7SAnatoly Burakov case MP_ALLOC_XMEM_HUGE: 1207c7f5dba7SAnatoly Burakov { 1208c7f5dba7SAnatoly Burakov int heap_socket; 1209c7f5dba7SAnatoly Burakov bool huge = mp_alloc_type == MP_ALLOC_XMEM_HUGE; 1210c7f5dba7SAnatoly Burakov 1211c7f5dba7SAnatoly Burakov if (setup_extmem(nb_mbuf, mbuf_seg_size, huge) < 0) 1212c7f5dba7SAnatoly Burakov rte_exit(EXIT_FAILURE, "Could not create external memory\n"); 1213c7f5dba7SAnatoly Burakov 1214c7f5dba7SAnatoly Burakov heap_socket = 1215c7f5dba7SAnatoly Burakov rte_malloc_heap_get_socket(EXTMEM_HEAP_NAME); 1216c7f5dba7SAnatoly Burakov if (heap_socket < 0) 1217c7f5dba7SAnatoly Burakov rte_exit(EXIT_FAILURE, "Could not get external memory socket ID\n"); 1218c7f5dba7SAnatoly Burakov 12190e798567SPavan Nikhilesh TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n", 12200e798567SPavan Nikhilesh rte_mbuf_best_mempool_ops()); 1221ea0c20eaSOlivier Matz rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf, 1222c7f5dba7SAnatoly Burakov mb_mempool_cache, 0, mbuf_seg_size, 1223c7f5dba7SAnatoly Burakov heap_socket); 1224c7f5dba7SAnatoly Burakov break; 1225c7f5dba7SAnatoly Burakov } 1226761f7ae1SJie Zhou #endif 122772512e18SViacheslav Ovsiienko case MP_ALLOC_XBUF: 122872512e18SViacheslav Ovsiienko { 122972512e18SViacheslav Ovsiienko struct rte_pktmbuf_extmem *ext_mem; 123072512e18SViacheslav Ovsiienko unsigned int ext_num; 123172512e18SViacheslav Ovsiienko 123272512e18SViacheslav Ovsiienko ext_num = setup_extbuf(nb_mbuf, mbuf_seg_size, 123372512e18SViacheslav Ovsiienko socket_id, pool_name, &ext_mem); 123472512e18SViacheslav Ovsiienko if (ext_num == 0) 123572512e18SViacheslav Ovsiienko rte_exit(EXIT_FAILURE, 123672512e18SViacheslav Ovsiienko "Can't create pinned data buffers\n"); 123772512e18SViacheslav Ovsiienko 123872512e18SViacheslav Ovsiienko TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n", 123972512e18SViacheslav Ovsiienko rte_mbuf_best_mempool_ops()); 124072512e18SViacheslav Ovsiienko rte_mp = rte_pktmbuf_pool_create_extbuf 124172512e18SViacheslav Ovsiienko (pool_name, nb_mbuf, mb_mempool_cache, 124272512e18SViacheslav Ovsiienko 0, mbuf_seg_size, socket_id, 124372512e18SViacheslav Ovsiienko ext_mem, ext_num); 124472512e18SViacheslav Ovsiienko free(ext_mem); 124572512e18SViacheslav Ovsiienko break; 124672512e18SViacheslav Ovsiienko } 1247c7f5dba7SAnatoly Burakov default: 1248c7f5dba7SAnatoly Burakov { 1249c7f5dba7SAnatoly Burakov rte_exit(EXIT_FAILURE, "Invalid mempool creation mode\n"); 1250c7f5dba7SAnatoly Burakov } 1251bece7b6cSChristian Ehrhardt } 1252148f963fSBruce Richardson 1253761f7ae1SJie Zhou #ifndef RTE_EXEC_ENV_WINDOWS 125424427bb9SOlivier Matz err: 1255761f7ae1SJie Zhou #endif 1256af75078fSIntel if (rte_mp == NULL) { 1257d1eb542eSOlivier Matz rte_exit(EXIT_FAILURE, 1258d1eb542eSOlivier Matz "Creation of mbuf pool for socket %u failed: %s\n", 1259d1eb542eSOlivier Matz socket_id, rte_strerror(rte_errno)); 1260148f963fSBruce Richardson } else if (verbose_level > 0) { 1261591a9d79SStephen Hemminger rte_mempool_dump(stdout, rte_mp); 1262af75078fSIntel } 1263401b744dSShahaf Shuler return rte_mp; 1264af75078fSIntel } 1265af75078fSIntel 126620a0286fSLiu Xiaofeng /* 126720a0286fSLiu Xiaofeng * Check given socket id is valid or not with NUMA mode, 126820a0286fSLiu Xiaofeng * if valid, return 0, else return -1 126920a0286fSLiu Xiaofeng */ 127020a0286fSLiu Xiaofeng static int 127120a0286fSLiu Xiaofeng check_socket_id(const unsigned int socket_id) 127220a0286fSLiu Xiaofeng { 127320a0286fSLiu Xiaofeng static int warning_once = 0; 127420a0286fSLiu Xiaofeng 1275c9cafcc8SShahaf Shuler if (new_socket_id(socket_id)) { 127620a0286fSLiu Xiaofeng if (!warning_once && numa_support) 127761a3b0e5SAndrew Rybchenko fprintf(stderr, 127861a3b0e5SAndrew Rybchenko "Warning: NUMA should be configured manually by using --port-numa-config and --ring-numa-config parameters along with --numa.\n"); 127920a0286fSLiu Xiaofeng warning_once = 1; 128020a0286fSLiu Xiaofeng return -1; 128120a0286fSLiu Xiaofeng } 128220a0286fSLiu Xiaofeng return 0; 128320a0286fSLiu Xiaofeng } 128420a0286fSLiu Xiaofeng 12853f7311baSWei Dai /* 12863f7311baSWei Dai * Get the allowed maximum number of RX queues. 12873f7311baSWei Dai * *pid return the port id which has minimal value of 12883f7311baSWei Dai * max_rx_queues in all ports. 12893f7311baSWei Dai */ 12903f7311baSWei Dai queueid_t 12913f7311baSWei Dai get_allowed_max_nb_rxq(portid_t *pid) 12923f7311baSWei Dai { 12939e6b36c3SDavid Marchand queueid_t allowed_max_rxq = RTE_MAX_QUEUES_PER_PORT; 12946f51deb9SIvan Ilchenko bool max_rxq_valid = false; 12953f7311baSWei Dai portid_t pi; 12963f7311baSWei Dai struct rte_eth_dev_info dev_info; 12973f7311baSWei Dai 12983f7311baSWei Dai RTE_ETH_FOREACH_DEV(pi) { 12996f51deb9SIvan Ilchenko if (eth_dev_info_get_print_err(pi, &dev_info) != 0) 13006f51deb9SIvan Ilchenko continue; 13016f51deb9SIvan Ilchenko 13026f51deb9SIvan Ilchenko max_rxq_valid = true; 13033f7311baSWei Dai if (dev_info.max_rx_queues < allowed_max_rxq) { 13043f7311baSWei Dai allowed_max_rxq = dev_info.max_rx_queues; 13053f7311baSWei Dai *pid = pi; 13063f7311baSWei Dai } 13073f7311baSWei Dai } 13086f51deb9SIvan Ilchenko return max_rxq_valid ? allowed_max_rxq : 0; 13093f7311baSWei Dai } 13103f7311baSWei Dai 13113f7311baSWei Dai /* 13123f7311baSWei Dai * Check input rxq is valid or not. 13133f7311baSWei Dai * If input rxq is not greater than any of maximum number 13143f7311baSWei Dai * of RX queues of all ports, it is valid. 13153f7311baSWei Dai * if valid, return 0, else return -1 13163f7311baSWei Dai */ 13173f7311baSWei Dai int 13183f7311baSWei Dai check_nb_rxq(queueid_t rxq) 13193f7311baSWei Dai { 13203f7311baSWei Dai queueid_t allowed_max_rxq; 13213f7311baSWei Dai portid_t pid = 0; 13223f7311baSWei Dai 13233f7311baSWei Dai allowed_max_rxq = get_allowed_max_nb_rxq(&pid); 13243f7311baSWei Dai if (rxq > allowed_max_rxq) { 132561a3b0e5SAndrew Rybchenko fprintf(stderr, 132661a3b0e5SAndrew Rybchenko "Fail: input rxq (%u) can't be greater than max_rx_queues (%u) of port %u\n", 132761a3b0e5SAndrew Rybchenko rxq, allowed_max_rxq, pid); 13283f7311baSWei Dai return -1; 13293f7311baSWei Dai } 13303f7311baSWei Dai return 0; 13313f7311baSWei Dai } 13323f7311baSWei Dai 133336db4f6cSWei Dai /* 133436db4f6cSWei Dai * Get the allowed maximum number of TX queues. 133536db4f6cSWei Dai * *pid return the port id which has minimal value of 133636db4f6cSWei Dai * max_tx_queues in all ports. 133736db4f6cSWei Dai */ 133836db4f6cSWei Dai queueid_t 133936db4f6cSWei Dai get_allowed_max_nb_txq(portid_t *pid) 134036db4f6cSWei Dai { 13419e6b36c3SDavid Marchand queueid_t allowed_max_txq = RTE_MAX_QUEUES_PER_PORT; 13426f51deb9SIvan Ilchenko bool max_txq_valid = false; 134336db4f6cSWei Dai portid_t pi; 134436db4f6cSWei Dai struct rte_eth_dev_info dev_info; 134536db4f6cSWei Dai 134636db4f6cSWei Dai RTE_ETH_FOREACH_DEV(pi) { 13476f51deb9SIvan Ilchenko if (eth_dev_info_get_print_err(pi, &dev_info) != 0) 13486f51deb9SIvan Ilchenko continue; 13496f51deb9SIvan Ilchenko 13506f51deb9SIvan Ilchenko max_txq_valid = true; 135136db4f6cSWei Dai if (dev_info.max_tx_queues < allowed_max_txq) { 135236db4f6cSWei Dai allowed_max_txq = dev_info.max_tx_queues; 135336db4f6cSWei Dai *pid = pi; 135436db4f6cSWei Dai } 135536db4f6cSWei Dai } 13566f51deb9SIvan Ilchenko return max_txq_valid ? allowed_max_txq : 0; 135736db4f6cSWei Dai } 135836db4f6cSWei Dai 135936db4f6cSWei Dai /* 136036db4f6cSWei Dai * Check input txq is valid or not. 136136db4f6cSWei Dai * If input txq is not greater than any of maximum number 136236db4f6cSWei Dai * of TX queues of all ports, it is valid. 136336db4f6cSWei Dai * if valid, return 0, else return -1 136436db4f6cSWei Dai */ 136536db4f6cSWei Dai int 136636db4f6cSWei Dai check_nb_txq(queueid_t txq) 136736db4f6cSWei Dai { 136836db4f6cSWei Dai queueid_t allowed_max_txq; 136936db4f6cSWei Dai portid_t pid = 0; 137036db4f6cSWei Dai 137136db4f6cSWei Dai allowed_max_txq = get_allowed_max_nb_txq(&pid); 137236db4f6cSWei Dai if (txq > allowed_max_txq) { 137361a3b0e5SAndrew Rybchenko fprintf(stderr, 137461a3b0e5SAndrew Rybchenko "Fail: input txq (%u) can't be greater than max_tx_queues (%u) of port %u\n", 137561a3b0e5SAndrew Rybchenko txq, allowed_max_txq, pid); 137636db4f6cSWei Dai return -1; 137736db4f6cSWei Dai } 137836db4f6cSWei Dai return 0; 137936db4f6cSWei Dai } 138036db4f6cSWei Dai 13811c69df45SOri Kam /* 138299e040d3SLijun Ou * Get the allowed maximum number of RXDs of every rx queue. 138399e040d3SLijun Ou * *pid return the port id which has minimal value of 138499e040d3SLijun Ou * max_rxd in all queues of all ports. 138599e040d3SLijun Ou */ 138699e040d3SLijun Ou static uint16_t 138799e040d3SLijun Ou get_allowed_max_nb_rxd(portid_t *pid) 138899e040d3SLijun Ou { 138999e040d3SLijun Ou uint16_t allowed_max_rxd = UINT16_MAX; 139099e040d3SLijun Ou portid_t pi; 139199e040d3SLijun Ou struct rte_eth_dev_info dev_info; 139299e040d3SLijun Ou 139399e040d3SLijun Ou RTE_ETH_FOREACH_DEV(pi) { 139499e040d3SLijun Ou if (eth_dev_info_get_print_err(pi, &dev_info) != 0) 139599e040d3SLijun Ou continue; 139699e040d3SLijun Ou 139799e040d3SLijun Ou if (dev_info.rx_desc_lim.nb_max < allowed_max_rxd) { 139899e040d3SLijun Ou allowed_max_rxd = dev_info.rx_desc_lim.nb_max; 139999e040d3SLijun Ou *pid = pi; 140099e040d3SLijun Ou } 140199e040d3SLijun Ou } 140299e040d3SLijun Ou return allowed_max_rxd; 140399e040d3SLijun Ou } 140499e040d3SLijun Ou 140599e040d3SLijun Ou /* 140699e040d3SLijun Ou * Get the allowed minimal number of RXDs of every rx queue. 140799e040d3SLijun Ou * *pid return the port id which has minimal value of 140899e040d3SLijun Ou * min_rxd in all queues of all ports. 140999e040d3SLijun Ou */ 141099e040d3SLijun Ou static uint16_t 141199e040d3SLijun Ou get_allowed_min_nb_rxd(portid_t *pid) 141299e040d3SLijun Ou { 141399e040d3SLijun Ou uint16_t allowed_min_rxd = 0; 141499e040d3SLijun Ou portid_t pi; 141599e040d3SLijun Ou struct rte_eth_dev_info dev_info; 141699e040d3SLijun Ou 141799e040d3SLijun Ou RTE_ETH_FOREACH_DEV(pi) { 141899e040d3SLijun Ou if (eth_dev_info_get_print_err(pi, &dev_info) != 0) 141999e040d3SLijun Ou continue; 142099e040d3SLijun Ou 142199e040d3SLijun Ou if (dev_info.rx_desc_lim.nb_min > allowed_min_rxd) { 142299e040d3SLijun Ou allowed_min_rxd = dev_info.rx_desc_lim.nb_min; 142399e040d3SLijun Ou *pid = pi; 142499e040d3SLijun Ou } 142599e040d3SLijun Ou } 142699e040d3SLijun Ou 142799e040d3SLijun Ou return allowed_min_rxd; 142899e040d3SLijun Ou } 142999e040d3SLijun Ou 143099e040d3SLijun Ou /* 143199e040d3SLijun Ou * Check input rxd is valid or not. 143299e040d3SLijun Ou * If input rxd is not greater than any of maximum number 143399e040d3SLijun Ou * of RXDs of every Rx queues and is not less than any of 143499e040d3SLijun Ou * minimal number of RXDs of every Rx queues, it is valid. 143599e040d3SLijun Ou * if valid, return 0, else return -1 143699e040d3SLijun Ou */ 143799e040d3SLijun Ou int 143899e040d3SLijun Ou check_nb_rxd(queueid_t rxd) 143999e040d3SLijun Ou { 144099e040d3SLijun Ou uint16_t allowed_max_rxd; 144199e040d3SLijun Ou uint16_t allowed_min_rxd; 144299e040d3SLijun Ou portid_t pid = 0; 144399e040d3SLijun Ou 144499e040d3SLijun Ou allowed_max_rxd = get_allowed_max_nb_rxd(&pid); 144599e040d3SLijun Ou if (rxd > allowed_max_rxd) { 144661a3b0e5SAndrew Rybchenko fprintf(stderr, 144761a3b0e5SAndrew Rybchenko "Fail: input rxd (%u) can't be greater than max_rxds (%u) of port %u\n", 144861a3b0e5SAndrew Rybchenko rxd, allowed_max_rxd, pid); 144999e040d3SLijun Ou return -1; 145099e040d3SLijun Ou } 145199e040d3SLijun Ou 145299e040d3SLijun Ou allowed_min_rxd = get_allowed_min_nb_rxd(&pid); 145399e040d3SLijun Ou if (rxd < allowed_min_rxd) { 145461a3b0e5SAndrew Rybchenko fprintf(stderr, 145561a3b0e5SAndrew Rybchenko "Fail: input rxd (%u) can't be less than min_rxds (%u) of port %u\n", 145661a3b0e5SAndrew Rybchenko rxd, allowed_min_rxd, pid); 145799e040d3SLijun Ou return -1; 145899e040d3SLijun Ou } 145999e040d3SLijun Ou 146099e040d3SLijun Ou return 0; 146199e040d3SLijun Ou } 146299e040d3SLijun Ou 146399e040d3SLijun Ou /* 146499e040d3SLijun Ou * Get the allowed maximum number of TXDs of every rx queues. 146599e040d3SLijun Ou * *pid return the port id which has minimal value of 146699e040d3SLijun Ou * max_txd in every tx queue. 146799e040d3SLijun Ou */ 146899e040d3SLijun Ou static uint16_t 146999e040d3SLijun Ou get_allowed_max_nb_txd(portid_t *pid) 147099e040d3SLijun Ou { 147199e040d3SLijun Ou uint16_t allowed_max_txd = UINT16_MAX; 147299e040d3SLijun Ou portid_t pi; 147399e040d3SLijun Ou struct rte_eth_dev_info dev_info; 147499e040d3SLijun Ou 147599e040d3SLijun Ou RTE_ETH_FOREACH_DEV(pi) { 147699e040d3SLijun Ou if (eth_dev_info_get_print_err(pi, &dev_info) != 0) 147799e040d3SLijun Ou continue; 147899e040d3SLijun Ou 147999e040d3SLijun Ou if (dev_info.tx_desc_lim.nb_max < allowed_max_txd) { 148099e040d3SLijun Ou allowed_max_txd = dev_info.tx_desc_lim.nb_max; 148199e040d3SLijun Ou *pid = pi; 148299e040d3SLijun Ou } 148399e040d3SLijun Ou } 148499e040d3SLijun Ou return allowed_max_txd; 148599e040d3SLijun Ou } 148699e040d3SLijun Ou 148799e040d3SLijun Ou /* 148899e040d3SLijun Ou * Get the allowed maximum number of TXDs of every tx queues. 148999e040d3SLijun Ou * *pid return the port id which has minimal value of 149099e040d3SLijun Ou * min_txd in every tx queue. 149199e040d3SLijun Ou */ 149299e040d3SLijun Ou static uint16_t 149399e040d3SLijun Ou get_allowed_min_nb_txd(portid_t *pid) 149499e040d3SLijun Ou { 149599e040d3SLijun Ou uint16_t allowed_min_txd = 0; 149699e040d3SLijun Ou portid_t pi; 149799e040d3SLijun Ou struct rte_eth_dev_info dev_info; 149899e040d3SLijun Ou 149999e040d3SLijun Ou RTE_ETH_FOREACH_DEV(pi) { 150099e040d3SLijun Ou if (eth_dev_info_get_print_err(pi, &dev_info) != 0) 150199e040d3SLijun Ou continue; 150299e040d3SLijun Ou 150399e040d3SLijun Ou if (dev_info.tx_desc_lim.nb_min > allowed_min_txd) { 150499e040d3SLijun Ou allowed_min_txd = dev_info.tx_desc_lim.nb_min; 150599e040d3SLijun Ou *pid = pi; 150699e040d3SLijun Ou } 150799e040d3SLijun Ou } 150899e040d3SLijun Ou 150999e040d3SLijun Ou return allowed_min_txd; 151099e040d3SLijun Ou } 151199e040d3SLijun Ou 151299e040d3SLijun Ou /* 151399e040d3SLijun Ou * Check input txd is valid or not. 151499e040d3SLijun Ou * If input txd is not greater than any of maximum number 151599e040d3SLijun Ou * of TXDs of every Rx queues, it is valid. 151699e040d3SLijun Ou * if valid, return 0, else return -1 151799e040d3SLijun Ou */ 151899e040d3SLijun Ou int 151999e040d3SLijun Ou check_nb_txd(queueid_t txd) 152099e040d3SLijun Ou { 152199e040d3SLijun Ou uint16_t allowed_max_txd; 152299e040d3SLijun Ou uint16_t allowed_min_txd; 152399e040d3SLijun Ou portid_t pid = 0; 152499e040d3SLijun Ou 152599e040d3SLijun Ou allowed_max_txd = get_allowed_max_nb_txd(&pid); 152699e040d3SLijun Ou if (txd > allowed_max_txd) { 152761a3b0e5SAndrew Rybchenko fprintf(stderr, 152861a3b0e5SAndrew Rybchenko "Fail: input txd (%u) can't be greater than max_txds (%u) of port %u\n", 152961a3b0e5SAndrew Rybchenko txd, allowed_max_txd, pid); 153099e040d3SLijun Ou return -1; 153199e040d3SLijun Ou } 153299e040d3SLijun Ou 153399e040d3SLijun Ou allowed_min_txd = get_allowed_min_nb_txd(&pid); 153499e040d3SLijun Ou if (txd < allowed_min_txd) { 153561a3b0e5SAndrew Rybchenko fprintf(stderr, 153661a3b0e5SAndrew Rybchenko "Fail: input txd (%u) can't be less than min_txds (%u) of port %u\n", 153761a3b0e5SAndrew Rybchenko txd, allowed_min_txd, pid); 153899e040d3SLijun Ou return -1; 153999e040d3SLijun Ou } 154099e040d3SLijun Ou return 0; 154199e040d3SLijun Ou } 154299e040d3SLijun Ou 154399e040d3SLijun Ou 154499e040d3SLijun Ou /* 15451c69df45SOri Kam * Get the allowed maximum number of hairpin queues. 15461c69df45SOri Kam * *pid return the port id which has minimal value of 15471c69df45SOri Kam * max_hairpin_queues in all ports. 15481c69df45SOri Kam */ 15491c69df45SOri Kam queueid_t 15501c69df45SOri Kam get_allowed_max_nb_hairpinq(portid_t *pid) 15511c69df45SOri Kam { 15529e6b36c3SDavid Marchand queueid_t allowed_max_hairpinq = RTE_MAX_QUEUES_PER_PORT; 15531c69df45SOri Kam portid_t pi; 15541c69df45SOri Kam struct rte_eth_hairpin_cap cap; 15551c69df45SOri Kam 15561c69df45SOri Kam RTE_ETH_FOREACH_DEV(pi) { 15571c69df45SOri Kam if (rte_eth_dev_hairpin_capability_get(pi, &cap) != 0) { 15581c69df45SOri Kam *pid = pi; 15591c69df45SOri Kam return 0; 15601c69df45SOri Kam } 15611c69df45SOri Kam if (cap.max_nb_queues < allowed_max_hairpinq) { 15621c69df45SOri Kam allowed_max_hairpinq = cap.max_nb_queues; 15631c69df45SOri Kam *pid = pi; 15641c69df45SOri Kam } 15651c69df45SOri Kam } 15661c69df45SOri Kam return allowed_max_hairpinq; 15671c69df45SOri Kam } 15681c69df45SOri Kam 15691c69df45SOri Kam /* 15701c69df45SOri Kam * Check input hairpin is valid or not. 15711c69df45SOri Kam * If input hairpin is not greater than any of maximum number 15721c69df45SOri Kam * of hairpin queues of all ports, it is valid. 15731c69df45SOri Kam * if valid, return 0, else return -1 15741c69df45SOri Kam */ 15751c69df45SOri Kam int 15761c69df45SOri Kam check_nb_hairpinq(queueid_t hairpinq) 15771c69df45SOri Kam { 15781c69df45SOri Kam queueid_t allowed_max_hairpinq; 15791c69df45SOri Kam portid_t pid = 0; 15801c69df45SOri Kam 15811c69df45SOri Kam allowed_max_hairpinq = get_allowed_max_nb_hairpinq(&pid); 15821c69df45SOri Kam if (hairpinq > allowed_max_hairpinq) { 158361a3b0e5SAndrew Rybchenko fprintf(stderr, 158461a3b0e5SAndrew Rybchenko "Fail: input hairpin (%u) can't be greater than max_hairpin_queues (%u) of port %u\n", 15851c69df45SOri Kam hairpinq, allowed_max_hairpinq, pid); 15861c69df45SOri Kam return -1; 15871c69df45SOri Kam } 15881c69df45SOri Kam return 0; 15891c69df45SOri Kam } 15901c69df45SOri Kam 15911bb4a528SFerruh Yigit static int 15921bb4a528SFerruh Yigit get_eth_overhead(struct rte_eth_dev_info *dev_info) 15931bb4a528SFerruh Yigit { 15941bb4a528SFerruh Yigit uint32_t eth_overhead; 15951bb4a528SFerruh Yigit 15961bb4a528SFerruh Yigit if (dev_info->max_mtu != UINT16_MAX && 15971bb4a528SFerruh Yigit dev_info->max_rx_pktlen > dev_info->max_mtu) 15981bb4a528SFerruh Yigit eth_overhead = dev_info->max_rx_pktlen - dev_info->max_mtu; 15991bb4a528SFerruh Yigit else 16001bb4a528SFerruh Yigit eth_overhead = RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN; 16011bb4a528SFerruh Yigit 16021bb4a528SFerruh Yigit return eth_overhead; 16031bb4a528SFerruh Yigit } 16041bb4a528SFerruh Yigit 1605af75078fSIntel static void 1606b6b8a1ebSViacheslav Ovsiienko init_config_port_offloads(portid_t pid, uint32_t socket_id) 1607b6b8a1ebSViacheslav Ovsiienko { 1608b6b8a1ebSViacheslav Ovsiienko struct rte_port *port = &ports[pid]; 1609b6b8a1ebSViacheslav Ovsiienko int ret; 1610b6b8a1ebSViacheslav Ovsiienko int i; 1611b6b8a1ebSViacheslav Ovsiienko 1612f6d8a6d3SIvan Malov eth_rx_metadata_negotiate_mp(pid); 1613f6d8a6d3SIvan Malov 1614b6b8a1ebSViacheslav Ovsiienko port->dev_conf.txmode = tx_mode; 1615b6b8a1ebSViacheslav Ovsiienko port->dev_conf.rxmode = rx_mode; 1616b6b8a1ebSViacheslav Ovsiienko 1617b6b8a1ebSViacheslav Ovsiienko ret = eth_dev_info_get_print_err(pid, &port->dev_info); 1618b6b8a1ebSViacheslav Ovsiienko if (ret != 0) 1619b6b8a1ebSViacheslav Ovsiienko rte_exit(EXIT_FAILURE, "rte_eth_dev_info_get() failed\n"); 1620b6b8a1ebSViacheslav Ovsiienko 1621295968d1SFerruh Yigit if (!(port->dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)) 1622b6b8a1ebSViacheslav Ovsiienko port->dev_conf.txmode.offloads &= 1623295968d1SFerruh Yigit ~RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE; 1624b6b8a1ebSViacheslav Ovsiienko 1625b6b8a1ebSViacheslav Ovsiienko /* Apply Rx offloads configuration */ 1626b6b8a1ebSViacheslav Ovsiienko for (i = 0; i < port->dev_info.max_rx_queues; i++) 16273c4426dbSDmitry Kozlyuk port->rxq[i].conf.offloads = port->dev_conf.rxmode.offloads; 1628b6b8a1ebSViacheslav Ovsiienko /* Apply Tx offloads configuration */ 1629b6b8a1ebSViacheslav Ovsiienko for (i = 0; i < port->dev_info.max_tx_queues; i++) 16303c4426dbSDmitry Kozlyuk port->txq[i].conf.offloads = port->dev_conf.txmode.offloads; 1631b6b8a1ebSViacheslav Ovsiienko 1632b6b8a1ebSViacheslav Ovsiienko if (eth_link_speed) 1633b6b8a1ebSViacheslav Ovsiienko port->dev_conf.link_speeds = eth_link_speed; 1634b6b8a1ebSViacheslav Ovsiienko 16351bb4a528SFerruh Yigit if (max_rx_pkt_len) 16361bb4a528SFerruh Yigit port->dev_conf.rxmode.mtu = max_rx_pkt_len - 16371bb4a528SFerruh Yigit get_eth_overhead(&port->dev_info); 16381bb4a528SFerruh Yigit 1639b6b8a1ebSViacheslav Ovsiienko /* set flag to initialize port/queue */ 1640b6b8a1ebSViacheslav Ovsiienko port->need_reconfig = 1; 1641b6b8a1ebSViacheslav Ovsiienko port->need_reconfig_queues = 1; 1642b6b8a1ebSViacheslav Ovsiienko port->socket_id = socket_id; 1643b6b8a1ebSViacheslav Ovsiienko port->tx_metadata = 0; 1644b6b8a1ebSViacheslav Ovsiienko 1645b6b8a1ebSViacheslav Ovsiienko /* 1646b6b8a1ebSViacheslav Ovsiienko * Check for maximum number of segments per MTU. 1647b6b8a1ebSViacheslav Ovsiienko * Accordingly update the mbuf data size. 1648b6b8a1ebSViacheslav Ovsiienko */ 1649b6b8a1ebSViacheslav Ovsiienko if (port->dev_info.rx_desc_lim.nb_mtu_seg_max != UINT16_MAX && 1650b6b8a1ebSViacheslav Ovsiienko port->dev_info.rx_desc_lim.nb_mtu_seg_max != 0) { 16511bb4a528SFerruh Yigit uint32_t eth_overhead = get_eth_overhead(&port->dev_info); 16521bb4a528SFerruh Yigit uint16_t mtu; 1653b6b8a1ebSViacheslav Ovsiienko 16541bb4a528SFerruh Yigit if (rte_eth_dev_get_mtu(pid, &mtu) == 0) { 16551bb4a528SFerruh Yigit uint16_t data_size = (mtu + eth_overhead) / 16561bb4a528SFerruh Yigit port->dev_info.rx_desc_lim.nb_mtu_seg_max; 16571bb4a528SFerruh Yigit uint16_t buffer_size = data_size + RTE_PKTMBUF_HEADROOM; 16581bb4a528SFerruh Yigit 16591bb4a528SFerruh Yigit if (buffer_size > mbuf_data_size[0]) { 16601bb4a528SFerruh Yigit mbuf_data_size[0] = buffer_size; 1661b6b8a1ebSViacheslav Ovsiienko TESTPMD_LOG(WARNING, 1662b6b8a1ebSViacheslav Ovsiienko "Configured mbuf size of the first segment %hu\n", 1663b6b8a1ebSViacheslav Ovsiienko mbuf_data_size[0]); 1664b6b8a1ebSViacheslav Ovsiienko } 1665b6b8a1ebSViacheslav Ovsiienko } 1666b6b8a1ebSViacheslav Ovsiienko } 16671bb4a528SFerruh Yigit } 1668b6b8a1ebSViacheslav Ovsiienko 1669b6b8a1ebSViacheslav Ovsiienko static void 1670af75078fSIntel init_config(void) 1671af75078fSIntel { 1672ce8d5614SIntel portid_t pid; 1673af75078fSIntel struct rte_mempool *mbp; 1674af75078fSIntel unsigned int nb_mbuf_per_pool; 1675af75078fSIntel lcoreid_t lc_id; 16766970401eSDavid Marchand #ifdef RTE_LIB_GRO 1677b7091f1dSJiayu Hu struct rte_gro_param gro_param; 16786970401eSDavid Marchand #endif 16796970401eSDavid Marchand #ifdef RTE_LIB_GSO 168052f38a20SJiayu Hu uint32_t gso_types; 16816970401eSDavid Marchand #endif 1682487f9a59SYulong Pei 1683af75078fSIntel /* Configuration of logical cores. */ 1684af75078fSIntel fwd_lcores = rte_zmalloc("testpmd: fwd_lcores", 1685af75078fSIntel sizeof(struct fwd_lcore *) * nb_lcores, 1686fdf20fa7SSergio Gonzalez Monroy RTE_CACHE_LINE_SIZE); 1687af75078fSIntel if (fwd_lcores == NULL) { 1688ce8d5614SIntel rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) " 1689ce8d5614SIntel "failed\n", nb_lcores); 1690af75078fSIntel } 1691af75078fSIntel for (lc_id = 0; lc_id < nb_lcores; lc_id++) { 1692af75078fSIntel fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore", 1693af75078fSIntel sizeof(struct fwd_lcore), 1694fdf20fa7SSergio Gonzalez Monroy RTE_CACHE_LINE_SIZE); 1695af75078fSIntel if (fwd_lcores[lc_id] == NULL) { 1696ce8d5614SIntel rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) " 1697ce8d5614SIntel "failed\n"); 1698af75078fSIntel } 1699af75078fSIntel fwd_lcores[lc_id]->cpuid_idx = lc_id; 1700af75078fSIntel } 1701af75078fSIntel 17027d89b261SGaetan Rivet RTE_ETH_FOREACH_DEV(pid) { 1703b6b8a1ebSViacheslav Ovsiienko uint32_t socket_id; 17046f51deb9SIvan Ilchenko 1705b6ea6408SIntel if (numa_support) { 1706b6b8a1ebSViacheslav Ovsiienko socket_id = port_numa[pid]; 1707b6b8a1ebSViacheslav Ovsiienko if (port_numa[pid] == NUMA_NO_CONFIG) { 1708b6b8a1ebSViacheslav Ovsiienko socket_id = rte_eth_dev_socket_id(pid); 170920a0286fSLiu Xiaofeng 171029841336SPhil Yang /* 171129841336SPhil Yang * if socket_id is invalid, 171229841336SPhil Yang * set to the first available socket. 171329841336SPhil Yang */ 171420a0286fSLiu Xiaofeng if (check_socket_id(socket_id) < 0) 171529841336SPhil Yang socket_id = socket_ids[0]; 1716b6ea6408SIntel } 1717b6b8a1ebSViacheslav Ovsiienko } else { 1718b6b8a1ebSViacheslav Ovsiienko socket_id = (socket_num == UMA_NO_CONFIG) ? 1719b6b8a1ebSViacheslav Ovsiienko 0 : socket_num; 1720b6ea6408SIntel } 1721b6b8a1ebSViacheslav Ovsiienko /* Apply default TxRx configuration for all ports */ 1722b6b8a1ebSViacheslav Ovsiienko init_config_port_offloads(pid, socket_id); 1723ce8d5614SIntel } 17243ab64341SOlivier Matz /* 17253ab64341SOlivier Matz * Create pools of mbuf. 17263ab64341SOlivier Matz * If NUMA support is disabled, create a single pool of mbuf in 17273ab64341SOlivier Matz * socket 0 memory by default. 17283ab64341SOlivier Matz * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1. 17293ab64341SOlivier Matz * 17303ab64341SOlivier Matz * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and 17313ab64341SOlivier Matz * nb_txd can be configured at run time. 17323ab64341SOlivier Matz */ 17333ab64341SOlivier Matz if (param_total_num_mbufs) 17343ab64341SOlivier Matz nb_mbuf_per_pool = param_total_num_mbufs; 17353ab64341SOlivier Matz else { 17364ed89049SDavid Marchand nb_mbuf_per_pool = RX_DESC_MAX + 17373ab64341SOlivier Matz (nb_lcores * mb_mempool_cache) + 17384ed89049SDavid Marchand TX_DESC_MAX + MAX_PKT_BURST; 17393ab64341SOlivier Matz nb_mbuf_per_pool *= RTE_MAX_ETHPORTS; 17403ab64341SOlivier Matz } 17413ab64341SOlivier Matz 1742b6ea6408SIntel if (numa_support) { 174326cbb419SViacheslav Ovsiienko uint8_t i, j; 1744ce8d5614SIntel 1745c9cafcc8SShahaf Shuler for (i = 0; i < num_sockets; i++) 174626cbb419SViacheslav Ovsiienko for (j = 0; j < mbuf_data_size_n; j++) 174726cbb419SViacheslav Ovsiienko mempools[i * MAX_SEGS_BUFFER_SPLIT + j] = 174826cbb419SViacheslav Ovsiienko mbuf_pool_create(mbuf_data_size[j], 1749401b744dSShahaf Shuler nb_mbuf_per_pool, 175026cbb419SViacheslav Ovsiienko socket_ids[i], j); 17513ab64341SOlivier Matz } else { 175226cbb419SViacheslav Ovsiienko uint8_t i; 175326cbb419SViacheslav Ovsiienko 175426cbb419SViacheslav Ovsiienko for (i = 0; i < mbuf_data_size_n; i++) 175526cbb419SViacheslav Ovsiienko mempools[i] = mbuf_pool_create 175626cbb419SViacheslav Ovsiienko (mbuf_data_size[i], 1757401b744dSShahaf Shuler nb_mbuf_per_pool, 175826cbb419SViacheslav Ovsiienko socket_num == UMA_NO_CONFIG ? 175926cbb419SViacheslav Ovsiienko 0 : socket_num, i); 17603ab64341SOlivier Matz } 1761b6ea6408SIntel 1762b6ea6408SIntel init_port_config(); 17635886ae07SAdrien Mazarguil 17646970401eSDavid Marchand #ifdef RTE_LIB_GSO 1765295968d1SFerruh Yigit gso_types = RTE_ETH_TX_OFFLOAD_TCP_TSO | RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO | 1766295968d1SFerruh Yigit RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO | RTE_ETH_TX_OFFLOAD_UDP_TSO; 17676970401eSDavid Marchand #endif 17685886ae07SAdrien Mazarguil /* 17695886ae07SAdrien Mazarguil * Records which Mbuf pool to use by each logical core, if needed. 17705886ae07SAdrien Mazarguil */ 17715886ae07SAdrien Mazarguil for (lc_id = 0; lc_id < nb_lcores; lc_id++) { 17728fd8bebcSAdrien Mazarguil mbp = mbuf_pool_find( 177326cbb419SViacheslav Ovsiienko rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]), 0); 17748fd8bebcSAdrien Mazarguil 17755886ae07SAdrien Mazarguil if (mbp == NULL) 177626cbb419SViacheslav Ovsiienko mbp = mbuf_pool_find(0, 0); 17775886ae07SAdrien Mazarguil fwd_lcores[lc_id]->mbp = mbp; 17786970401eSDavid Marchand #ifdef RTE_LIB_GSO 177952f38a20SJiayu Hu /* initialize GSO context */ 178052f38a20SJiayu Hu fwd_lcores[lc_id]->gso_ctx.direct_pool = mbp; 178152f38a20SJiayu Hu fwd_lcores[lc_id]->gso_ctx.indirect_pool = mbp; 178252f38a20SJiayu Hu fwd_lcores[lc_id]->gso_ctx.gso_types = gso_types; 178335b2d13fSOlivier Matz fwd_lcores[lc_id]->gso_ctx.gso_size = RTE_ETHER_MAX_LEN - 178435b2d13fSOlivier Matz RTE_ETHER_CRC_LEN; 178552f38a20SJiayu Hu fwd_lcores[lc_id]->gso_ctx.flag = 0; 17866970401eSDavid Marchand #endif 17875886ae07SAdrien Mazarguil } 17885886ae07SAdrien Mazarguil 17890c0db76fSBernard Iremonger fwd_config_setup(); 1790b7091f1dSJiayu Hu 17916970401eSDavid Marchand #ifdef RTE_LIB_GRO 1792b7091f1dSJiayu Hu /* create a gro context for each lcore */ 1793b7091f1dSJiayu Hu gro_param.gro_types = RTE_GRO_TCP_IPV4; 1794b7091f1dSJiayu Hu gro_param.max_flow_num = GRO_MAX_FLUSH_CYCLES; 1795b7091f1dSJiayu Hu gro_param.max_item_per_flow = MAX_PKT_BURST; 1796b7091f1dSJiayu Hu for (lc_id = 0; lc_id < nb_lcores; lc_id++) { 1797b7091f1dSJiayu Hu gro_param.socket_id = rte_lcore_to_socket_id( 1798b7091f1dSJiayu Hu fwd_lcores_cpuids[lc_id]); 1799b7091f1dSJiayu Hu fwd_lcores[lc_id]->gro_ctx = rte_gro_ctx_create(&gro_param); 1800b7091f1dSJiayu Hu if (fwd_lcores[lc_id]->gro_ctx == NULL) { 1801b7091f1dSJiayu Hu rte_exit(EXIT_FAILURE, 1802b7091f1dSJiayu Hu "rte_gro_ctx_create() failed\n"); 1803b7091f1dSJiayu Hu } 1804b7091f1dSJiayu Hu } 18056970401eSDavid Marchand #endif 1806ce8d5614SIntel } 1807ce8d5614SIntel 18082950a769SDeclan Doherty 18092950a769SDeclan Doherty void 1810a21d5a4bSDeclan Doherty reconfig(portid_t new_port_id, unsigned socket_id) 18112950a769SDeclan Doherty { 18122950a769SDeclan Doherty /* Reconfiguration of Ethernet ports. */ 1813b6b8a1ebSViacheslav Ovsiienko init_config_port_offloads(new_port_id, socket_id); 18142950a769SDeclan Doherty init_port_config(); 18152950a769SDeclan Doherty } 18162950a769SDeclan Doherty 1817ce8d5614SIntel int 1818ce8d5614SIntel init_fwd_streams(void) 1819ce8d5614SIntel { 1820ce8d5614SIntel portid_t pid; 1821ce8d5614SIntel struct rte_port *port; 1822ce8d5614SIntel streamid_t sm_id, nb_fwd_streams_new; 18235a8fb55cSReshma Pattan queueid_t q; 1824ce8d5614SIntel 1825ce8d5614SIntel /* set socket id according to numa or not */ 18267d89b261SGaetan Rivet RTE_ETH_FOREACH_DEV(pid) { 1827ce8d5614SIntel port = &ports[pid]; 1828ce8d5614SIntel if (nb_rxq > port->dev_info.max_rx_queues) { 182961a3b0e5SAndrew Rybchenko fprintf(stderr, 183061a3b0e5SAndrew Rybchenko "Fail: nb_rxq(%d) is greater than max_rx_queues(%d)\n", 183161a3b0e5SAndrew Rybchenko nb_rxq, port->dev_info.max_rx_queues); 1832ce8d5614SIntel return -1; 1833ce8d5614SIntel } 1834ce8d5614SIntel if (nb_txq > port->dev_info.max_tx_queues) { 183561a3b0e5SAndrew Rybchenko fprintf(stderr, 183661a3b0e5SAndrew Rybchenko "Fail: nb_txq(%d) is greater than max_tx_queues(%d)\n", 183761a3b0e5SAndrew Rybchenko nb_txq, port->dev_info.max_tx_queues); 1838ce8d5614SIntel return -1; 1839ce8d5614SIntel } 184020a0286fSLiu Xiaofeng if (numa_support) { 184120a0286fSLiu Xiaofeng if (port_numa[pid] != NUMA_NO_CONFIG) 184220a0286fSLiu Xiaofeng port->socket_id = port_numa[pid]; 184320a0286fSLiu Xiaofeng else { 1844b6ea6408SIntel port->socket_id = rte_eth_dev_socket_id(pid); 184520a0286fSLiu Xiaofeng 184629841336SPhil Yang /* 184729841336SPhil Yang * if socket_id is invalid, 184829841336SPhil Yang * set to the first available socket. 184929841336SPhil Yang */ 185020a0286fSLiu Xiaofeng if (check_socket_id(port->socket_id) < 0) 185129841336SPhil Yang port->socket_id = socket_ids[0]; 185220a0286fSLiu Xiaofeng } 185320a0286fSLiu Xiaofeng } 1854b6ea6408SIntel else { 1855b6ea6408SIntel if (socket_num == UMA_NO_CONFIG) 1856af75078fSIntel port->socket_id = 0; 1857b6ea6408SIntel else 1858b6ea6408SIntel port->socket_id = socket_num; 1859b6ea6408SIntel } 1860af75078fSIntel } 1861af75078fSIntel 18625a8fb55cSReshma Pattan q = RTE_MAX(nb_rxq, nb_txq); 18635a8fb55cSReshma Pattan if (q == 0) { 186461a3b0e5SAndrew Rybchenko fprintf(stderr, 186561a3b0e5SAndrew Rybchenko "Fail: Cannot allocate fwd streams as number of queues is 0\n"); 18665a8fb55cSReshma Pattan return -1; 18675a8fb55cSReshma Pattan } 18685a8fb55cSReshma Pattan nb_fwd_streams_new = (streamid_t)(nb_ports * q); 1869ce8d5614SIntel if (nb_fwd_streams_new == nb_fwd_streams) 1870ce8d5614SIntel return 0; 1871ce8d5614SIntel /* clear the old */ 1872ce8d5614SIntel if (fwd_streams != NULL) { 1873ce8d5614SIntel for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) { 1874ce8d5614SIntel if (fwd_streams[sm_id] == NULL) 1875ce8d5614SIntel continue; 1876ce8d5614SIntel rte_free(fwd_streams[sm_id]); 1877ce8d5614SIntel fwd_streams[sm_id] = NULL; 1878af75078fSIntel } 1879ce8d5614SIntel rte_free(fwd_streams); 1880ce8d5614SIntel fwd_streams = NULL; 1881ce8d5614SIntel } 1882ce8d5614SIntel 1883ce8d5614SIntel /* init new */ 1884ce8d5614SIntel nb_fwd_streams = nb_fwd_streams_new; 18851f84c469SMatan Azrad if (nb_fwd_streams) { 1886ce8d5614SIntel fwd_streams = rte_zmalloc("testpmd: fwd_streams", 18871f84c469SMatan Azrad sizeof(struct fwd_stream *) * nb_fwd_streams, 18881f84c469SMatan Azrad RTE_CACHE_LINE_SIZE); 1889ce8d5614SIntel if (fwd_streams == NULL) 18901f84c469SMatan Azrad rte_exit(EXIT_FAILURE, "rte_zmalloc(%d" 18911f84c469SMatan Azrad " (struct fwd_stream *)) failed\n", 18921f84c469SMatan Azrad nb_fwd_streams); 1893ce8d5614SIntel 1894af75078fSIntel for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) { 18951f84c469SMatan Azrad fwd_streams[sm_id] = rte_zmalloc("testpmd:" 18961f84c469SMatan Azrad " struct fwd_stream", sizeof(struct fwd_stream), 18971f84c469SMatan Azrad RTE_CACHE_LINE_SIZE); 1898ce8d5614SIntel if (fwd_streams[sm_id] == NULL) 18991f84c469SMatan Azrad rte_exit(EXIT_FAILURE, "rte_zmalloc" 19001f84c469SMatan Azrad "(struct fwd_stream) failed\n"); 19011f84c469SMatan Azrad } 1902af75078fSIntel } 1903ce8d5614SIntel 1904ce8d5614SIntel return 0; 1905af75078fSIntel } 1906af75078fSIntel 1907af75078fSIntel static void 1908af75078fSIntel pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs) 1909af75078fSIntel { 19107569b8c1SHonnappa Nagarahalli uint64_t total_burst, sburst; 191185de481aSHonnappa Nagarahalli uint64_t nb_burst; 19127569b8c1SHonnappa Nagarahalli uint64_t burst_stats[4]; 19137569b8c1SHonnappa Nagarahalli uint16_t pktnb_stats[4]; 1914af75078fSIntel uint16_t nb_pkt; 19157569b8c1SHonnappa Nagarahalli int burst_percent[4], sburstp; 19167569b8c1SHonnappa Nagarahalli int i; 1917af75078fSIntel 1918af75078fSIntel /* 1919af75078fSIntel * First compute the total number of packet bursts and the 1920af75078fSIntel * two highest numbers of bursts of the same number of packets. 1921af75078fSIntel */ 19227569b8c1SHonnappa Nagarahalli memset(&burst_stats, 0x0, sizeof(burst_stats)); 19237569b8c1SHonnappa Nagarahalli memset(&pktnb_stats, 0x0, sizeof(pktnb_stats)); 19247569b8c1SHonnappa Nagarahalli 19257569b8c1SHonnappa Nagarahalli /* Show stats for 0 burst size always */ 19267569b8c1SHonnappa Nagarahalli total_burst = pbs->pkt_burst_spread[0]; 19277569b8c1SHonnappa Nagarahalli burst_stats[0] = pbs->pkt_burst_spread[0]; 19287569b8c1SHonnappa Nagarahalli pktnb_stats[0] = 0; 19297569b8c1SHonnappa Nagarahalli 19307569b8c1SHonnappa Nagarahalli /* Find the next 2 burst sizes with highest occurrences. */ 19316a8b64fdSEli Britstein for (nb_pkt = 1; nb_pkt < MAX_PKT_BURST + 1; nb_pkt++) { 1932af75078fSIntel nb_burst = pbs->pkt_burst_spread[nb_pkt]; 19337569b8c1SHonnappa Nagarahalli 1934af75078fSIntel if (nb_burst == 0) 1935af75078fSIntel continue; 19367569b8c1SHonnappa Nagarahalli 1937af75078fSIntel total_burst += nb_burst; 19387569b8c1SHonnappa Nagarahalli 19397569b8c1SHonnappa Nagarahalli if (nb_burst > burst_stats[1]) { 19407569b8c1SHonnappa Nagarahalli burst_stats[2] = burst_stats[1]; 19417569b8c1SHonnappa Nagarahalli pktnb_stats[2] = pktnb_stats[1]; 1942fe613657SDaniel Shelepov burst_stats[1] = nb_burst; 1943fe613657SDaniel Shelepov pktnb_stats[1] = nb_pkt; 19447569b8c1SHonnappa Nagarahalli } else if (nb_burst > burst_stats[2]) { 19457569b8c1SHonnappa Nagarahalli burst_stats[2] = nb_burst; 19467569b8c1SHonnappa Nagarahalli pktnb_stats[2] = nb_pkt; 1947af75078fSIntel } 1948af75078fSIntel } 1949af75078fSIntel if (total_burst == 0) 1950af75078fSIntel return; 19517569b8c1SHonnappa Nagarahalli 19527569b8c1SHonnappa Nagarahalli printf(" %s-bursts : %"PRIu64" [", rx_tx, total_burst); 19537569b8c1SHonnappa Nagarahalli for (i = 0, sburst = 0, sburstp = 0; i < 4; i++) { 19547569b8c1SHonnappa Nagarahalli if (i == 3) { 19557569b8c1SHonnappa Nagarahalli printf("%d%% of other]\n", 100 - sburstp); 1956af75078fSIntel return; 1957af75078fSIntel } 19587569b8c1SHonnappa Nagarahalli 19597569b8c1SHonnappa Nagarahalli sburst += burst_stats[i]; 19607569b8c1SHonnappa Nagarahalli if (sburst == total_burst) { 19617569b8c1SHonnappa Nagarahalli printf("%d%% of %d pkts]\n", 19627569b8c1SHonnappa Nagarahalli 100 - sburstp, (int) pktnb_stats[i]); 1963af75078fSIntel return; 1964af75078fSIntel } 19657569b8c1SHonnappa Nagarahalli 19667569b8c1SHonnappa Nagarahalli burst_percent[i] = 19677569b8c1SHonnappa Nagarahalli (double)burst_stats[i] / total_burst * 100; 19687569b8c1SHonnappa Nagarahalli printf("%d%% of %d pkts + ", 19697569b8c1SHonnappa Nagarahalli burst_percent[i], (int) pktnb_stats[i]); 19707569b8c1SHonnappa Nagarahalli sburstp += burst_percent[i]; 1971af75078fSIntel } 1972af75078fSIntel } 1973af75078fSIntel 1974af75078fSIntel static void 1975af75078fSIntel fwd_stream_stats_display(streamid_t stream_id) 1976af75078fSIntel { 1977af75078fSIntel struct fwd_stream *fs; 1978af75078fSIntel static const char *fwd_top_stats_border = "-------"; 1979af75078fSIntel 1980af75078fSIntel fs = fwd_streams[stream_id]; 1981af75078fSIntel if ((fs->rx_packets == 0) && (fs->tx_packets == 0) && 1982af75078fSIntel (fs->fwd_dropped == 0)) 1983af75078fSIntel return; 1984af75078fSIntel printf("\n %s Forward Stats for RX Port=%2d/Queue=%2d -> " 1985af75078fSIntel "TX Port=%2d/Queue=%2d %s\n", 1986af75078fSIntel fwd_top_stats_border, fs->rx_port, fs->rx_queue, 1987af75078fSIntel fs->tx_port, fs->tx_queue, fwd_top_stats_border); 1988c185d42cSDavid Marchand printf(" RX-packets: %-14"PRIu64" TX-packets: %-14"PRIu64 1989c185d42cSDavid Marchand " TX-dropped: %-14"PRIu64, 1990af75078fSIntel fs->rx_packets, fs->tx_packets, fs->fwd_dropped); 1991af75078fSIntel 1992af75078fSIntel /* if checksum mode */ 1993af75078fSIntel if (cur_fwd_eng == &csum_fwd_engine) { 1994c185d42cSDavid Marchand printf(" RX- bad IP checksum: %-14"PRIu64 1995c185d42cSDavid Marchand " Rx- bad L4 checksum: %-14"PRIu64 1996c185d42cSDavid Marchand " Rx- bad outer L4 checksum: %-14"PRIu64"\n", 199758d475b7SJerin Jacob fs->rx_bad_ip_csum, fs->rx_bad_l4_csum, 199858d475b7SJerin Jacob fs->rx_bad_outer_l4_csum); 1999d139cf23SLance Richardson printf(" RX- bad outer IP checksum: %-14"PRIu64"\n", 2000d139cf23SLance Richardson fs->rx_bad_outer_ip_csum); 200194d65546SDavid Marchand } else { 200294d65546SDavid Marchand printf("\n"); 2003af75078fSIntel } 2004af75078fSIntel 20050e4b1963SDharmik Thakkar if (record_burst_stats) { 2006af75078fSIntel pkt_burst_stats_display("RX", &fs->rx_burst_stats); 2007af75078fSIntel pkt_burst_stats_display("TX", &fs->tx_burst_stats); 20080e4b1963SDharmik Thakkar } 2009af75078fSIntel } 2010af75078fSIntel 201153324971SDavid Marchand void 201253324971SDavid Marchand fwd_stats_display(void) 201353324971SDavid Marchand { 201453324971SDavid Marchand static const char *fwd_stats_border = "----------------------"; 201553324971SDavid Marchand static const char *acc_stats_border = "+++++++++++++++"; 201653324971SDavid Marchand struct { 201753324971SDavid Marchand struct fwd_stream *rx_stream; 201853324971SDavid Marchand struct fwd_stream *tx_stream; 201953324971SDavid Marchand uint64_t tx_dropped; 202053324971SDavid Marchand uint64_t rx_bad_ip_csum; 202153324971SDavid Marchand uint64_t rx_bad_l4_csum; 202253324971SDavid Marchand uint64_t rx_bad_outer_l4_csum; 2023d139cf23SLance Richardson uint64_t rx_bad_outer_ip_csum; 202453324971SDavid Marchand } ports_stats[RTE_MAX_ETHPORTS]; 202553324971SDavid Marchand uint64_t total_rx_dropped = 0; 202653324971SDavid Marchand uint64_t total_tx_dropped = 0; 202753324971SDavid Marchand uint64_t total_rx_nombuf = 0; 202853324971SDavid Marchand struct rte_eth_stats stats; 202953324971SDavid Marchand uint64_t fwd_cycles = 0; 203053324971SDavid Marchand uint64_t total_recv = 0; 203153324971SDavid Marchand uint64_t total_xmit = 0; 203253324971SDavid Marchand struct rte_port *port; 203353324971SDavid Marchand streamid_t sm_id; 203453324971SDavid Marchand portid_t pt_id; 2035baef6bbfSMin Hu (Connor) int ret; 203653324971SDavid Marchand int i; 203753324971SDavid Marchand 203853324971SDavid Marchand memset(ports_stats, 0, sizeof(ports_stats)); 203953324971SDavid Marchand 204053324971SDavid Marchand for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) { 204153324971SDavid Marchand struct fwd_stream *fs = fwd_streams[sm_id]; 204253324971SDavid Marchand 204353324971SDavid Marchand if (cur_fwd_config.nb_fwd_streams > 204453324971SDavid Marchand cur_fwd_config.nb_fwd_ports) { 204553324971SDavid Marchand fwd_stream_stats_display(sm_id); 204653324971SDavid Marchand } else { 204753324971SDavid Marchand ports_stats[fs->tx_port].tx_stream = fs; 204853324971SDavid Marchand ports_stats[fs->rx_port].rx_stream = fs; 204953324971SDavid Marchand } 205053324971SDavid Marchand 205153324971SDavid Marchand ports_stats[fs->tx_port].tx_dropped += fs->fwd_dropped; 205253324971SDavid Marchand 205353324971SDavid Marchand ports_stats[fs->rx_port].rx_bad_ip_csum += fs->rx_bad_ip_csum; 205453324971SDavid Marchand ports_stats[fs->rx_port].rx_bad_l4_csum += fs->rx_bad_l4_csum; 205553324971SDavid Marchand ports_stats[fs->rx_port].rx_bad_outer_l4_csum += 205653324971SDavid Marchand fs->rx_bad_outer_l4_csum; 2057d139cf23SLance Richardson ports_stats[fs->rx_port].rx_bad_outer_ip_csum += 2058d139cf23SLance Richardson fs->rx_bad_outer_ip_csum; 205953324971SDavid Marchand 2060bc700b67SDharmik Thakkar if (record_core_cycles) 206199a4974aSRobin Jarry fwd_cycles += fs->busy_cycles; 206253324971SDavid Marchand } 206353324971SDavid Marchand for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) { 2064*c3fd1e60SFerruh Yigit uint64_t tx_dropped = 0; 2065*c3fd1e60SFerruh Yigit 206653324971SDavid Marchand pt_id = fwd_ports_ids[i]; 206753324971SDavid Marchand port = &ports[pt_id]; 206853324971SDavid Marchand 2069baef6bbfSMin Hu (Connor) ret = rte_eth_stats_get(pt_id, &stats); 2070baef6bbfSMin Hu (Connor) if (ret != 0) { 2071baef6bbfSMin Hu (Connor) fprintf(stderr, 2072baef6bbfSMin Hu (Connor) "%s: Error: failed to get stats (port %u): %d", 2073baef6bbfSMin Hu (Connor) __func__, pt_id, ret); 2074baef6bbfSMin Hu (Connor) continue; 2075baef6bbfSMin Hu (Connor) } 207653324971SDavid Marchand stats.ipackets -= port->stats.ipackets; 207753324971SDavid Marchand stats.opackets -= port->stats.opackets; 207853324971SDavid Marchand stats.ibytes -= port->stats.ibytes; 207953324971SDavid Marchand stats.obytes -= port->stats.obytes; 208053324971SDavid Marchand stats.imissed -= port->stats.imissed; 208153324971SDavid Marchand stats.oerrors -= port->stats.oerrors; 208253324971SDavid Marchand stats.rx_nombuf -= port->stats.rx_nombuf; 208353324971SDavid Marchand 208453324971SDavid Marchand total_recv += stats.ipackets; 208553324971SDavid Marchand total_xmit += stats.opackets; 208653324971SDavid Marchand total_rx_dropped += stats.imissed; 2087*c3fd1e60SFerruh Yigit tx_dropped += ports_stats[pt_id].tx_dropped; 2088*c3fd1e60SFerruh Yigit tx_dropped += stats.oerrors; 2089*c3fd1e60SFerruh Yigit total_tx_dropped += tx_dropped; 209053324971SDavid Marchand total_rx_nombuf += stats.rx_nombuf; 209153324971SDavid Marchand 209253324971SDavid Marchand printf("\n %s Forward statistics for port %-2d %s\n", 209353324971SDavid Marchand fwd_stats_border, pt_id, fwd_stats_border); 209453324971SDavid Marchand 209508dcd187SHuisong Li printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64 209608dcd187SHuisong Li "RX-total: %-"PRIu64"\n", stats.ipackets, stats.imissed, 209753324971SDavid Marchand stats.ipackets + stats.imissed); 209853324971SDavid Marchand 2099d139cf23SLance Richardson if (cur_fwd_eng == &csum_fwd_engine) { 210053324971SDavid Marchand printf(" Bad-ipcsum: %-14"PRIu64 210153324971SDavid Marchand " Bad-l4csum: %-14"PRIu64 210253324971SDavid Marchand "Bad-outer-l4csum: %-14"PRIu64"\n", 210353324971SDavid Marchand ports_stats[pt_id].rx_bad_ip_csum, 210453324971SDavid Marchand ports_stats[pt_id].rx_bad_l4_csum, 210553324971SDavid Marchand ports_stats[pt_id].rx_bad_outer_l4_csum); 2106d139cf23SLance Richardson printf(" Bad-outer-ipcsum: %-14"PRIu64"\n", 2107d139cf23SLance Richardson ports_stats[pt_id].rx_bad_outer_ip_csum); 2108d139cf23SLance Richardson } 210953324971SDavid Marchand if (stats.ierrors + stats.rx_nombuf > 0) { 211008dcd187SHuisong Li printf(" RX-error: %-"PRIu64"\n", stats.ierrors); 211108dcd187SHuisong Li printf(" RX-nombufs: %-14"PRIu64"\n", stats.rx_nombuf); 211253324971SDavid Marchand } 211353324971SDavid Marchand 211408dcd187SHuisong Li printf(" TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64 211553324971SDavid Marchand "TX-total: %-"PRIu64"\n", 2116*c3fd1e60SFerruh Yigit stats.opackets, tx_dropped, 2117*c3fd1e60SFerruh Yigit stats.opackets + tx_dropped); 211853324971SDavid Marchand 21190e4b1963SDharmik Thakkar if (record_burst_stats) { 212053324971SDavid Marchand if (ports_stats[pt_id].rx_stream) 212153324971SDavid Marchand pkt_burst_stats_display("RX", 212253324971SDavid Marchand &ports_stats[pt_id].rx_stream->rx_burst_stats); 212353324971SDavid Marchand if (ports_stats[pt_id].tx_stream) 212453324971SDavid Marchand pkt_burst_stats_display("TX", 212553324971SDavid Marchand &ports_stats[pt_id].tx_stream->tx_burst_stats); 21260e4b1963SDharmik Thakkar } 212753324971SDavid Marchand 212853324971SDavid Marchand printf(" %s--------------------------------%s\n", 212953324971SDavid Marchand fwd_stats_border, fwd_stats_border); 213053324971SDavid Marchand } 213153324971SDavid Marchand 213253324971SDavid Marchand printf("\n %s Accumulated forward statistics for all ports" 213353324971SDavid Marchand "%s\n", 213453324971SDavid Marchand acc_stats_border, acc_stats_border); 213553324971SDavid Marchand printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: " 213653324971SDavid Marchand "%-"PRIu64"\n" 213753324971SDavid Marchand " TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: " 213853324971SDavid Marchand "%-"PRIu64"\n", 213953324971SDavid Marchand total_recv, total_rx_dropped, total_recv + total_rx_dropped, 214053324971SDavid Marchand total_xmit, total_tx_dropped, total_xmit + total_tx_dropped); 214153324971SDavid Marchand if (total_rx_nombuf > 0) 214253324971SDavid Marchand printf(" RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf); 214353324971SDavid Marchand printf(" %s++++++++++++++++++++++++++++++++++++++++++++++" 214453324971SDavid Marchand "%s\n", 214553324971SDavid Marchand acc_stats_border, acc_stats_border); 2146bc700b67SDharmik Thakkar if (record_core_cycles) { 21474c0497b1SDharmik Thakkar #define CYC_PER_MHZ 1E6 21483a164e00SPhil Yang if (total_recv > 0 || total_xmit > 0) { 21493a164e00SPhil Yang uint64_t total_pkts = 0; 21503a164e00SPhil Yang if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 || 21513a164e00SPhil Yang strcmp(cur_fwd_eng->fwd_mode_name, "flowgen") == 0) 21523a164e00SPhil Yang total_pkts = total_xmit; 21533a164e00SPhil Yang else 21543a164e00SPhil Yang total_pkts = total_recv; 21553a164e00SPhil Yang 215699a4974aSRobin Jarry printf("\n CPU cycles/packet=%.2F (busy cycles=" 21573a164e00SPhil Yang "%"PRIu64" / total %s packets=%"PRIu64") at %"PRIu64 21584c0497b1SDharmik Thakkar " MHz Clock\n", 21593a164e00SPhil Yang (double) fwd_cycles / total_pkts, 21603a164e00SPhil Yang fwd_cycles, cur_fwd_eng->fwd_mode_name, total_pkts, 21614c0497b1SDharmik Thakkar (uint64_t)(rte_get_tsc_hz() / CYC_PER_MHZ)); 21623a164e00SPhil Yang } 2163bc700b67SDharmik Thakkar } 216453324971SDavid Marchand } 216553324971SDavid Marchand 216653324971SDavid Marchand void 216753324971SDavid Marchand fwd_stats_reset(void) 216853324971SDavid Marchand { 216953324971SDavid Marchand streamid_t sm_id; 217053324971SDavid Marchand portid_t pt_id; 2171baef6bbfSMin Hu (Connor) int ret; 217253324971SDavid Marchand int i; 217353324971SDavid Marchand 217453324971SDavid Marchand for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) { 217553324971SDavid Marchand pt_id = fwd_ports_ids[i]; 2176baef6bbfSMin Hu (Connor) ret = rte_eth_stats_get(pt_id, &ports[pt_id].stats); 2177baef6bbfSMin Hu (Connor) if (ret != 0) 2178baef6bbfSMin Hu (Connor) fprintf(stderr, 2179baef6bbfSMin Hu (Connor) "%s: Error: failed to clear stats (port %u):%d", 2180baef6bbfSMin Hu (Connor) __func__, pt_id, ret); 218153324971SDavid Marchand } 218253324971SDavid Marchand for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) { 218353324971SDavid Marchand struct fwd_stream *fs = fwd_streams[sm_id]; 218453324971SDavid Marchand 218553324971SDavid Marchand fs->rx_packets = 0; 218653324971SDavid Marchand fs->tx_packets = 0; 218753324971SDavid Marchand fs->fwd_dropped = 0; 218853324971SDavid Marchand fs->rx_bad_ip_csum = 0; 218953324971SDavid Marchand fs->rx_bad_l4_csum = 0; 219053324971SDavid Marchand fs->rx_bad_outer_l4_csum = 0; 2191d139cf23SLance Richardson fs->rx_bad_outer_ip_csum = 0; 219253324971SDavid Marchand 219353324971SDavid Marchand memset(&fs->rx_burst_stats, 0, sizeof(fs->rx_burst_stats)); 219453324971SDavid Marchand memset(&fs->tx_burst_stats, 0, sizeof(fs->tx_burst_stats)); 219599a4974aSRobin Jarry fs->busy_cycles = 0; 219653324971SDavid Marchand } 219753324971SDavid Marchand } 219853324971SDavid Marchand 2199af75078fSIntel static void 22007741e4cfSIntel flush_fwd_rx_queues(void) 2201af75078fSIntel { 2202af75078fSIntel struct rte_mbuf *pkts_burst[MAX_PKT_BURST]; 2203af75078fSIntel portid_t rxp; 22047741e4cfSIntel portid_t port_id; 2205af75078fSIntel queueid_t rxq; 2206af75078fSIntel uint16_t nb_rx; 2207af75078fSIntel uint16_t i; 2208af75078fSIntel uint8_t j; 2209f487715fSReshma Pattan uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0; 2210594302c7SJames Poole uint64_t timer_period; 2211f487715fSReshma Pattan 2212a550baf2SMin Hu (Connor) if (num_procs > 1) { 2213a550baf2SMin Hu (Connor) printf("multi-process not support for flushing fwd Rx queues, skip the below lines and return.\n"); 2214a550baf2SMin Hu (Connor) return; 2215a550baf2SMin Hu (Connor) } 2216a550baf2SMin Hu (Connor) 2217f487715fSReshma Pattan /* convert to number of cycles */ 2218594302c7SJames Poole timer_period = rte_get_timer_hz(); /* 1 second timeout */ 2219af75078fSIntel 2220af75078fSIntel for (j = 0; j < 2; j++) { 22217741e4cfSIntel for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) { 2222af75078fSIntel for (rxq = 0; rxq < nb_rxq; rxq++) { 22237741e4cfSIntel port_id = fwd_ports_ids[rxp]; 22243c4426dbSDmitry Kozlyuk 22253c4426dbSDmitry Kozlyuk /* Polling stopped queues is prohibited. */ 22263c4426dbSDmitry Kozlyuk if (ports[port_id].rxq[rxq].state == 22273c4426dbSDmitry Kozlyuk RTE_ETH_QUEUE_STATE_STOPPED) 22283c4426dbSDmitry Kozlyuk continue; 22293c4426dbSDmitry Kozlyuk 2230f487715fSReshma Pattan /** 2231f487715fSReshma Pattan * testpmd can stuck in the below do while loop 2232f487715fSReshma Pattan * if rte_eth_rx_burst() always returns nonzero 2233f487715fSReshma Pattan * packets. So timer is added to exit this loop 2234f487715fSReshma Pattan * after 1sec timer expiry. 2235f487715fSReshma Pattan */ 2236f487715fSReshma Pattan prev_tsc = rte_rdtsc(); 2237af75078fSIntel do { 22387741e4cfSIntel nb_rx = rte_eth_rx_burst(port_id, rxq, 2239013af9b6SIntel pkts_burst, MAX_PKT_BURST); 2240af75078fSIntel for (i = 0; i < nb_rx; i++) 2241af75078fSIntel rte_pktmbuf_free(pkts_burst[i]); 2242f487715fSReshma Pattan 2243f487715fSReshma Pattan cur_tsc = rte_rdtsc(); 2244f487715fSReshma Pattan diff_tsc = cur_tsc - prev_tsc; 2245f487715fSReshma Pattan timer_tsc += diff_tsc; 2246f487715fSReshma Pattan } while ((nb_rx > 0) && 2247f487715fSReshma Pattan (timer_tsc < timer_period)); 2248f487715fSReshma Pattan timer_tsc = 0; 2249af75078fSIntel } 2250af75078fSIntel } 2251af75078fSIntel rte_delay_ms(10); /* wait 10 milli-seconds before retrying */ 2252af75078fSIntel } 2253af75078fSIntel } 2254af75078fSIntel 2255af75078fSIntel static void 2256af75078fSIntel run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd) 2257af75078fSIntel { 2258af75078fSIntel struct fwd_stream **fsm; 225999a4974aSRobin Jarry uint64_t prev_tsc; 2260af75078fSIntel streamid_t nb_fs; 2261af75078fSIntel streamid_t sm_id; 2262a8d0d473SBruce Richardson #ifdef RTE_LIB_BITRATESTATS 22637e4441c8SRemy Horton uint64_t tics_per_1sec; 22647e4441c8SRemy Horton uint64_t tics_datum; 22657e4441c8SRemy Horton uint64_t tics_current; 22664918a357SXiaoyun Li uint16_t i, cnt_ports; 2267af75078fSIntel 22684918a357SXiaoyun Li cnt_ports = nb_ports; 22697e4441c8SRemy Horton tics_datum = rte_rdtsc(); 22707e4441c8SRemy Horton tics_per_1sec = rte_get_timer_hz(); 22717e4441c8SRemy Horton #endif 2272af75078fSIntel fsm = &fwd_streams[fc->stream_idx]; 2273af75078fSIntel nb_fs = fc->stream_nb; 227499a4974aSRobin Jarry prev_tsc = rte_rdtsc(); 2275af75078fSIntel do { 2276af75078fSIntel for (sm_id = 0; sm_id < nb_fs; sm_id++) 22773c4426dbSDmitry Kozlyuk if (!fsm[sm_id]->disabled) 2278af75078fSIntel (*pkt_fwd)(fsm[sm_id]); 2279a8d0d473SBruce Richardson #ifdef RTE_LIB_BITRATESTATS 2280e25e6c70SRemy Horton if (bitrate_enabled != 0 && 2281e25e6c70SRemy Horton bitrate_lcore_id == rte_lcore_id()) { 22827e4441c8SRemy Horton tics_current = rte_rdtsc(); 22837e4441c8SRemy Horton if (tics_current - tics_datum >= tics_per_1sec) { 22847e4441c8SRemy Horton /* Periodic bitrate calculation */ 22854918a357SXiaoyun Li for (i = 0; i < cnt_ports; i++) 2286e25e6c70SRemy Horton rte_stats_bitrate_calc(bitrate_data, 22874918a357SXiaoyun Li ports_ids[i]); 22887e4441c8SRemy Horton tics_datum = tics_current; 22897e4441c8SRemy Horton } 2290e25e6c70SRemy Horton } 22917e4441c8SRemy Horton #endif 2292a8d0d473SBruce Richardson #ifdef RTE_LIB_LATENCYSTATS 229365eb1e54SPablo de Lara if (latencystats_enabled != 0 && 229465eb1e54SPablo de Lara latencystats_lcore_id == rte_lcore_id()) 229562d3216dSReshma Pattan rte_latencystats_update(); 229662d3216dSReshma Pattan #endif 229799a4974aSRobin Jarry if (record_core_cycles) { 229899a4974aSRobin Jarry uint64_t tsc = rte_rdtsc(); 229962d3216dSReshma Pattan 230099a4974aSRobin Jarry fc->total_cycles += tsc - prev_tsc; 230199a4974aSRobin Jarry prev_tsc = tsc; 230299a4974aSRobin Jarry } 2303af75078fSIntel } while (! fc->stopped); 2304af75078fSIntel } 2305af75078fSIntel 2306af75078fSIntel static int 230799a4974aSRobin Jarry lcore_usage_callback(unsigned int lcore_id, struct rte_lcore_usage *usage) 230899a4974aSRobin Jarry { 230999a4974aSRobin Jarry struct fwd_stream **fsm; 231099a4974aSRobin Jarry struct fwd_lcore *fc; 231199a4974aSRobin Jarry streamid_t nb_fs; 231299a4974aSRobin Jarry streamid_t sm_id; 231399a4974aSRobin Jarry 231499a4974aSRobin Jarry fc = lcore_to_fwd_lcore(lcore_id); 231599a4974aSRobin Jarry if (fc == NULL) 231699a4974aSRobin Jarry return -1; 231799a4974aSRobin Jarry 231899a4974aSRobin Jarry fsm = &fwd_streams[fc->stream_idx]; 231999a4974aSRobin Jarry nb_fs = fc->stream_nb; 232099a4974aSRobin Jarry usage->busy_cycles = 0; 232199a4974aSRobin Jarry usage->total_cycles = fc->total_cycles; 232299a4974aSRobin Jarry 232399a4974aSRobin Jarry for (sm_id = 0; sm_id < nb_fs; sm_id++) { 232499a4974aSRobin Jarry if (!fsm[sm_id]->disabled) 232599a4974aSRobin Jarry usage->busy_cycles += fsm[sm_id]->busy_cycles; 232699a4974aSRobin Jarry } 232799a4974aSRobin Jarry 232899a4974aSRobin Jarry return 0; 232999a4974aSRobin Jarry } 233099a4974aSRobin Jarry 233199a4974aSRobin Jarry static int 2332af75078fSIntel start_pkt_forward_on_core(void *fwd_arg) 2333af75078fSIntel { 2334af75078fSIntel run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg, 2335af75078fSIntel cur_fwd_config.fwd_eng->packet_fwd); 2336af75078fSIntel return 0; 2337af75078fSIntel } 2338af75078fSIntel 2339af75078fSIntel /* 2340af75078fSIntel * Run the TXONLY packet forwarding engine to send a single burst of packets. 2341af75078fSIntel * Used to start communication flows in network loopback test configurations. 2342af75078fSIntel */ 2343af75078fSIntel static int 2344af75078fSIntel run_one_txonly_burst_on_core(void *fwd_arg) 2345af75078fSIntel { 2346af75078fSIntel struct fwd_lcore *fwd_lc; 2347af75078fSIntel struct fwd_lcore tmp_lcore; 2348af75078fSIntel 2349af75078fSIntel fwd_lc = (struct fwd_lcore *) fwd_arg; 2350af75078fSIntel tmp_lcore = *fwd_lc; 2351af75078fSIntel tmp_lcore.stopped = 1; 2352af75078fSIntel run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd); 2353af75078fSIntel return 0; 2354af75078fSIntel } 2355af75078fSIntel 2356af75078fSIntel /* 2357af75078fSIntel * Launch packet forwarding: 2358af75078fSIntel * - Setup per-port forwarding context. 2359af75078fSIntel * - launch logical cores with their forwarding configuration. 2360af75078fSIntel */ 2361af75078fSIntel static void 2362af75078fSIntel launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore) 2363af75078fSIntel { 2364af75078fSIntel unsigned int i; 2365af75078fSIntel unsigned int lc_id; 2366af75078fSIntel int diag; 2367af75078fSIntel 2368af75078fSIntel for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) { 2369af75078fSIntel lc_id = fwd_lcores_cpuids[i]; 2370af75078fSIntel if ((interactive == 0) || (lc_id != rte_lcore_id())) { 2371af75078fSIntel fwd_lcores[i]->stopped = 0; 2372af75078fSIntel diag = rte_eal_remote_launch(pkt_fwd_on_lcore, 2373af75078fSIntel fwd_lcores[i], lc_id); 2374af75078fSIntel if (diag != 0) 237561a3b0e5SAndrew Rybchenko fprintf(stderr, 237661a3b0e5SAndrew Rybchenko "launch lcore %u failed - diag=%d\n", 2377af75078fSIntel lc_id, diag); 2378af75078fSIntel } 2379af75078fSIntel } 2380af75078fSIntel } 2381af75078fSIntel 2382af75078fSIntel /* 2383af75078fSIntel * Launch packet forwarding configuration. 2384af75078fSIntel */ 2385af75078fSIntel void 2386af75078fSIntel start_packet_forwarding(int with_tx_first) 2387af75078fSIntel { 2388af75078fSIntel port_fwd_begin_t port_fwd_begin; 2389af75078fSIntel port_fwd_end_t port_fwd_end; 23903c4426dbSDmitry Kozlyuk stream_init_t stream_init = cur_fwd_eng->stream_init; 2391af75078fSIntel unsigned int i; 2392af75078fSIntel 23935a8fb55cSReshma Pattan if (strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") == 0 && !nb_rxq) 23945a8fb55cSReshma Pattan rte_exit(EXIT_FAILURE, "rxq are 0, cannot use rxonly fwd mode\n"); 23955a8fb55cSReshma Pattan 23965a8fb55cSReshma Pattan if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 && !nb_txq) 23975a8fb55cSReshma Pattan rte_exit(EXIT_FAILURE, "txq are 0, cannot use txonly fwd mode\n"); 23985a8fb55cSReshma Pattan 23995a8fb55cSReshma Pattan if ((strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") != 0 && 24005a8fb55cSReshma Pattan strcmp(cur_fwd_eng->fwd_mode_name, "txonly") != 0) && 24015a8fb55cSReshma Pattan (!nb_rxq || !nb_txq)) 24025a8fb55cSReshma Pattan rte_exit(EXIT_FAILURE, 24035a8fb55cSReshma Pattan "Either rxq or txq are 0, cannot use %s fwd mode\n", 24045a8fb55cSReshma Pattan cur_fwd_eng->fwd_mode_name); 24055a8fb55cSReshma Pattan 2406ce8d5614SIntel if (all_ports_started() == 0) { 240761a3b0e5SAndrew Rybchenko fprintf(stderr, "Not all ports were started\n"); 2408ce8d5614SIntel return; 2409ce8d5614SIntel } 2410af75078fSIntel if (test_done == 0) { 241161a3b0e5SAndrew Rybchenko fprintf(stderr, "Packet forwarding already started\n"); 2412af75078fSIntel return; 2413af75078fSIntel } 24147741e4cfSIntel 241547a767b2SMatan Azrad fwd_config_setup(); 241647a767b2SMatan Azrad 241765744833SXueming Li pkt_fwd_config_display(&cur_fwd_config); 241865744833SXueming Li if (!pkt_fwd_shared_rxq_check()) 241965744833SXueming Li return; 242065744833SXueming Li 24213c4426dbSDmitry Kozlyuk if (stream_init != NULL) 24223c4426dbSDmitry Kozlyuk for (i = 0; i < cur_fwd_config.nb_fwd_streams; i++) 24233c4426dbSDmitry Kozlyuk stream_init(fwd_streams[i]); 24243c4426dbSDmitry Kozlyuk 2425a78040c9SAlvin Zhang port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin; 2426a78040c9SAlvin Zhang if (port_fwd_begin != NULL) { 2427a78040c9SAlvin Zhang for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) { 2428a78040c9SAlvin Zhang if (port_fwd_begin(fwd_ports_ids[i])) { 2429a78040c9SAlvin Zhang fprintf(stderr, 2430a78040c9SAlvin Zhang "Packet forwarding is not ready\n"); 2431a78040c9SAlvin Zhang return; 2432a78040c9SAlvin Zhang } 2433a78040c9SAlvin Zhang } 2434a78040c9SAlvin Zhang } 2435a78040c9SAlvin Zhang 2436a78040c9SAlvin Zhang if (with_tx_first) { 2437a78040c9SAlvin Zhang port_fwd_begin = tx_only_engine.port_fwd_begin; 2438a78040c9SAlvin Zhang if (port_fwd_begin != NULL) { 2439a78040c9SAlvin Zhang for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) { 2440a78040c9SAlvin Zhang if (port_fwd_begin(fwd_ports_ids[i])) { 2441a78040c9SAlvin Zhang fprintf(stderr, 2442a78040c9SAlvin Zhang "Packet forwarding is not ready\n"); 2443a78040c9SAlvin Zhang return; 2444a78040c9SAlvin Zhang } 2445a78040c9SAlvin Zhang } 2446a78040c9SAlvin Zhang } 2447a78040c9SAlvin Zhang } 2448a78040c9SAlvin Zhang 2449a78040c9SAlvin Zhang test_done = 0; 2450a78040c9SAlvin Zhang 24517741e4cfSIntel if(!no_flush_rx) 24527741e4cfSIntel flush_fwd_rx_queues(); 24537741e4cfSIntel 2454af75078fSIntel rxtx_config_display(); 2455af75078fSIntel 245653324971SDavid Marchand fwd_stats_reset(); 2457af75078fSIntel if (with_tx_first) { 2458acbf77a6SZhihong Wang while (with_tx_first--) { 2459acbf77a6SZhihong Wang launch_packet_forwarding( 2460acbf77a6SZhihong Wang run_one_txonly_burst_on_core); 2461af75078fSIntel rte_eal_mp_wait_lcore(); 2462acbf77a6SZhihong Wang } 2463af75078fSIntel port_fwd_end = tx_only_engine.port_fwd_end; 2464af75078fSIntel if (port_fwd_end != NULL) { 2465af75078fSIntel for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) 2466af75078fSIntel (*port_fwd_end)(fwd_ports_ids[i]); 2467af75078fSIntel } 2468af75078fSIntel } 2469af75078fSIntel launch_packet_forwarding(start_pkt_forward_on_core); 2470af75078fSIntel } 2471af75078fSIntel 2472af75078fSIntel void 2473af75078fSIntel stop_packet_forwarding(void) 2474af75078fSIntel { 2475af75078fSIntel port_fwd_end_t port_fwd_end; 2476af75078fSIntel lcoreid_t lc_id; 247753324971SDavid Marchand portid_t pt_id; 247853324971SDavid Marchand int i; 2479af75078fSIntel 2480af75078fSIntel if (test_done) { 248161a3b0e5SAndrew Rybchenko fprintf(stderr, "Packet forwarding not started\n"); 2482af75078fSIntel return; 2483af75078fSIntel } 2484af75078fSIntel printf("Telling cores to stop..."); 2485af75078fSIntel for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) 2486af75078fSIntel fwd_lcores[lc_id]->stopped = 1; 2487af75078fSIntel printf("\nWaiting for lcores to finish...\n"); 2488af75078fSIntel rte_eal_mp_wait_lcore(); 2489af75078fSIntel port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end; 2490af75078fSIntel if (port_fwd_end != NULL) { 2491af75078fSIntel for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) { 2492af75078fSIntel pt_id = fwd_ports_ids[i]; 2493af75078fSIntel (*port_fwd_end)(pt_id); 2494af75078fSIntel } 2495af75078fSIntel } 2496c185d42cSDavid Marchand 249753324971SDavid Marchand fwd_stats_display(); 249858d475b7SJerin Jacob 2499af75078fSIntel printf("\nDone.\n"); 2500af75078fSIntel test_done = 1; 2501af75078fSIntel } 2502af75078fSIntel 2503cfae07fdSOuyang Changchun void 2504cfae07fdSOuyang Changchun dev_set_link_up(portid_t pid) 2505cfae07fdSOuyang Changchun { 2506492ab604SZhiyong Yang if (rte_eth_dev_set_link_up(pid) < 0) 250761a3b0e5SAndrew Rybchenko fprintf(stderr, "\nSet link up fail.\n"); 2508cfae07fdSOuyang Changchun } 2509cfae07fdSOuyang Changchun 2510cfae07fdSOuyang Changchun void 2511cfae07fdSOuyang Changchun dev_set_link_down(portid_t pid) 2512cfae07fdSOuyang Changchun { 2513492ab604SZhiyong Yang if (rte_eth_dev_set_link_down(pid) < 0) 251461a3b0e5SAndrew Rybchenko fprintf(stderr, "\nSet link down fail.\n"); 2515cfae07fdSOuyang Changchun } 2516cfae07fdSOuyang Changchun 2517ce8d5614SIntel static int 2518ce8d5614SIntel all_ports_started(void) 2519ce8d5614SIntel { 2520ce8d5614SIntel portid_t pi; 2521ce8d5614SIntel struct rte_port *port; 2522ce8d5614SIntel 25237d89b261SGaetan Rivet RTE_ETH_FOREACH_DEV(pi) { 2524ce8d5614SIntel port = &ports[pi]; 2525ce8d5614SIntel /* Check if there is a port which is not started */ 252641b05095SBernard Iremonger if ((port->port_status != RTE_PORT_STARTED) && 252741b05095SBernard Iremonger (port->slave_flag == 0)) 2528ce8d5614SIntel return 0; 2529ce8d5614SIntel } 2530ce8d5614SIntel 2531ce8d5614SIntel /* No port is not started */ 2532ce8d5614SIntel return 1; 2533ce8d5614SIntel } 2534ce8d5614SIntel 2535148f963fSBruce Richardson int 25366018eb8cSShahaf Shuler port_is_stopped(portid_t port_id) 25376018eb8cSShahaf Shuler { 25386018eb8cSShahaf Shuler struct rte_port *port = &ports[port_id]; 25396018eb8cSShahaf Shuler 25406018eb8cSShahaf Shuler if ((port->port_status != RTE_PORT_STOPPED) && 25416018eb8cSShahaf Shuler (port->slave_flag == 0)) 25426018eb8cSShahaf Shuler return 0; 25436018eb8cSShahaf Shuler return 1; 25446018eb8cSShahaf Shuler } 25456018eb8cSShahaf Shuler 25466018eb8cSShahaf Shuler int 2547edab33b1STetsuya Mukawa all_ports_stopped(void) 2548edab33b1STetsuya Mukawa { 2549edab33b1STetsuya Mukawa portid_t pi; 2550edab33b1STetsuya Mukawa 25517d89b261SGaetan Rivet RTE_ETH_FOREACH_DEV(pi) { 25526018eb8cSShahaf Shuler if (!port_is_stopped(pi)) 2553edab33b1STetsuya Mukawa return 0; 2554edab33b1STetsuya Mukawa } 2555edab33b1STetsuya Mukawa 2556edab33b1STetsuya Mukawa return 1; 2557edab33b1STetsuya Mukawa } 2558edab33b1STetsuya Mukawa 2559edab33b1STetsuya Mukawa int 2560edab33b1STetsuya Mukawa port_is_started(portid_t port_id) 2561edab33b1STetsuya Mukawa { 2562edab33b1STetsuya Mukawa if (port_id_is_invalid(port_id, ENABLED_WARN)) 2563edab33b1STetsuya Mukawa return 0; 2564edab33b1STetsuya Mukawa 2565edab33b1STetsuya Mukawa if (ports[port_id].port_status != RTE_PORT_STARTED) 2566edab33b1STetsuya Mukawa return 0; 2567edab33b1STetsuya Mukawa 2568edab33b1STetsuya Mukawa return 1; 2569edab33b1STetsuya Mukawa } 2570edab33b1STetsuya Mukawa 257123095155SDariusz Sosnowski #define HAIRPIN_MODE_RX_FORCE_MEMORY RTE_BIT32(8) 257223095155SDariusz Sosnowski #define HAIRPIN_MODE_TX_FORCE_MEMORY RTE_BIT32(9) 257323095155SDariusz Sosnowski 257423095155SDariusz Sosnowski #define HAIRPIN_MODE_RX_LOCKED_MEMORY RTE_BIT32(12) 257523095155SDariusz Sosnowski #define HAIRPIN_MODE_RX_RTE_MEMORY RTE_BIT32(13) 257623095155SDariusz Sosnowski 257723095155SDariusz Sosnowski #define HAIRPIN_MODE_TX_LOCKED_MEMORY RTE_BIT32(16) 257823095155SDariusz Sosnowski #define HAIRPIN_MODE_TX_RTE_MEMORY RTE_BIT32(17) 257923095155SDariusz Sosnowski 258023095155SDariusz Sosnowski 25811c69df45SOri Kam /* Configure the Rx and Tx hairpin queues for the selected port. */ 25821c69df45SOri Kam static int 258301817b10SBing Zhao setup_hairpin_queues(portid_t pi, portid_t p_pi, uint16_t cnt_pi) 25841c69df45SOri Kam { 25851c69df45SOri Kam queueid_t qi; 25861c69df45SOri Kam struct rte_eth_hairpin_conf hairpin_conf = { 25871c69df45SOri Kam .peer_count = 1, 25881c69df45SOri Kam }; 25891c69df45SOri Kam int i; 25901c69df45SOri Kam int diag; 25911c69df45SOri Kam struct rte_port *port = &ports[pi]; 259201817b10SBing Zhao uint16_t peer_rx_port = pi; 259301817b10SBing Zhao uint16_t peer_tx_port = pi; 259401817b10SBing Zhao uint32_t manual = 1; 259501817b10SBing Zhao uint32_t tx_exp = hairpin_mode & 0x10; 259623095155SDariusz Sosnowski uint32_t rx_force_memory = hairpin_mode & HAIRPIN_MODE_RX_FORCE_MEMORY; 259723095155SDariusz Sosnowski uint32_t rx_locked_memory = hairpin_mode & HAIRPIN_MODE_RX_LOCKED_MEMORY; 259823095155SDariusz Sosnowski uint32_t rx_rte_memory = hairpin_mode & HAIRPIN_MODE_RX_RTE_MEMORY; 259923095155SDariusz Sosnowski uint32_t tx_force_memory = hairpin_mode & HAIRPIN_MODE_TX_FORCE_MEMORY; 260023095155SDariusz Sosnowski uint32_t tx_locked_memory = hairpin_mode & HAIRPIN_MODE_TX_LOCKED_MEMORY; 260123095155SDariusz Sosnowski uint32_t tx_rte_memory = hairpin_mode & HAIRPIN_MODE_TX_RTE_MEMORY; 260201817b10SBing Zhao 260301817b10SBing Zhao if (!(hairpin_mode & 0xf)) { 260401817b10SBing Zhao peer_rx_port = pi; 260501817b10SBing Zhao peer_tx_port = pi; 260601817b10SBing Zhao manual = 0; 260701817b10SBing Zhao } else if (hairpin_mode & 0x1) { 260801817b10SBing Zhao peer_tx_port = rte_eth_find_next_owned_by(pi + 1, 260901817b10SBing Zhao RTE_ETH_DEV_NO_OWNER); 261001817b10SBing Zhao if (peer_tx_port >= RTE_MAX_ETHPORTS) 261101817b10SBing Zhao peer_tx_port = rte_eth_find_next_owned_by(0, 261201817b10SBing Zhao RTE_ETH_DEV_NO_OWNER); 261301817b10SBing Zhao if (p_pi != RTE_MAX_ETHPORTS) { 261401817b10SBing Zhao peer_rx_port = p_pi; 261501817b10SBing Zhao } else { 261601817b10SBing Zhao uint16_t next_pi; 261701817b10SBing Zhao 261801817b10SBing Zhao /* Last port will be the peer RX port of the first. */ 261901817b10SBing Zhao RTE_ETH_FOREACH_DEV(next_pi) 262001817b10SBing Zhao peer_rx_port = next_pi; 262101817b10SBing Zhao } 262201817b10SBing Zhao manual = 1; 262301817b10SBing Zhao } else if (hairpin_mode & 0x2) { 262401817b10SBing Zhao if (cnt_pi & 0x1) { 262501817b10SBing Zhao peer_rx_port = p_pi; 262601817b10SBing Zhao } else { 262701817b10SBing Zhao peer_rx_port = rte_eth_find_next_owned_by(pi + 1, 262801817b10SBing Zhao RTE_ETH_DEV_NO_OWNER); 262901817b10SBing Zhao if (peer_rx_port >= RTE_MAX_ETHPORTS) 263001817b10SBing Zhao peer_rx_port = pi; 263101817b10SBing Zhao } 263201817b10SBing Zhao peer_tx_port = peer_rx_port; 263301817b10SBing Zhao manual = 1; 263401817b10SBing Zhao } 26351c69df45SOri Kam 26361c69df45SOri Kam for (qi = nb_txq, i = 0; qi < nb_hairpinq + nb_txq; qi++) { 263701817b10SBing Zhao hairpin_conf.peers[0].port = peer_rx_port; 26381c69df45SOri Kam hairpin_conf.peers[0].queue = i + nb_rxq; 263901817b10SBing Zhao hairpin_conf.manual_bind = !!manual; 264001817b10SBing Zhao hairpin_conf.tx_explicit = !!tx_exp; 264123095155SDariusz Sosnowski hairpin_conf.force_memory = !!tx_force_memory; 264223095155SDariusz Sosnowski hairpin_conf.use_locked_device_memory = !!tx_locked_memory; 264323095155SDariusz Sosnowski hairpin_conf.use_rte_memory = !!tx_rte_memory; 26441c69df45SOri Kam diag = rte_eth_tx_hairpin_queue_setup 26451c69df45SOri Kam (pi, qi, nb_txd, &hairpin_conf); 26461c69df45SOri Kam i++; 26471c69df45SOri Kam if (diag == 0) 26481c69df45SOri Kam continue; 26491c69df45SOri Kam 26501c69df45SOri Kam /* Fail to setup rx queue, return */ 2651eac341d3SJoyce Kong if (port->port_status == RTE_PORT_HANDLING) 2652eac341d3SJoyce Kong port->port_status = RTE_PORT_STOPPED; 2653eac341d3SJoyce Kong else 265461a3b0e5SAndrew Rybchenko fprintf(stderr, 265561a3b0e5SAndrew Rybchenko "Port %d can not be set back to stopped\n", pi); 265661a3b0e5SAndrew Rybchenko fprintf(stderr, "Fail to configure port %d hairpin queues\n", 265761a3b0e5SAndrew Rybchenko pi); 26581c69df45SOri Kam /* try to reconfigure queues next time */ 26591c69df45SOri Kam port->need_reconfig_queues = 1; 26601c69df45SOri Kam return -1; 26611c69df45SOri Kam } 26621c69df45SOri Kam for (qi = nb_rxq, i = 0; qi < nb_hairpinq + nb_rxq; qi++) { 266301817b10SBing Zhao hairpin_conf.peers[0].port = peer_tx_port; 26641c69df45SOri Kam hairpin_conf.peers[0].queue = i + nb_txq; 266501817b10SBing Zhao hairpin_conf.manual_bind = !!manual; 266601817b10SBing Zhao hairpin_conf.tx_explicit = !!tx_exp; 266723095155SDariusz Sosnowski hairpin_conf.force_memory = !!rx_force_memory; 266823095155SDariusz Sosnowski hairpin_conf.use_locked_device_memory = !!rx_locked_memory; 266923095155SDariusz Sosnowski hairpin_conf.use_rte_memory = !!rx_rte_memory; 26701c69df45SOri Kam diag = rte_eth_rx_hairpin_queue_setup 26711c69df45SOri Kam (pi, qi, nb_rxd, &hairpin_conf); 26721c69df45SOri Kam i++; 26731c69df45SOri Kam if (diag == 0) 26741c69df45SOri Kam continue; 26751c69df45SOri Kam 26761c69df45SOri Kam /* Fail to setup rx queue, return */ 2677eac341d3SJoyce Kong if (port->port_status == RTE_PORT_HANDLING) 2678eac341d3SJoyce Kong port->port_status = RTE_PORT_STOPPED; 2679eac341d3SJoyce Kong else 268061a3b0e5SAndrew Rybchenko fprintf(stderr, 268161a3b0e5SAndrew Rybchenko "Port %d can not be set back to stopped\n", pi); 268261a3b0e5SAndrew Rybchenko fprintf(stderr, "Fail to configure port %d hairpin queues\n", 268361a3b0e5SAndrew Rybchenko pi); 26841c69df45SOri Kam /* try to reconfigure queues next time */ 26851c69df45SOri Kam port->need_reconfig_queues = 1; 26861c69df45SOri Kam return -1; 26871c69df45SOri Kam } 26881c69df45SOri Kam return 0; 26891c69df45SOri Kam } 26901c69df45SOri Kam 26912befc67fSViacheslav Ovsiienko /* Configure the Rx with optional split. */ 26922befc67fSViacheslav Ovsiienko int 26932befc67fSViacheslav Ovsiienko rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id, 26942befc67fSViacheslav Ovsiienko uint16_t nb_rx_desc, unsigned int socket_id, 26952befc67fSViacheslav Ovsiienko struct rte_eth_rxconf *rx_conf, struct rte_mempool *mp) 26962befc67fSViacheslav Ovsiienko { 26972befc67fSViacheslav Ovsiienko union rte_eth_rxseg rx_useg[MAX_SEGS_BUFFER_SPLIT] = {}; 26984f04edcdSHanumanth Pothula struct rte_mempool *rx_mempool[MAX_MEMPOOL] = {}; 26994f04edcdSHanumanth Pothula struct rte_mempool *mpx; 27002befc67fSViacheslav Ovsiienko unsigned int i, mp_n; 270154a0f4d7SYuan Wang uint32_t prev_hdrs = 0; 27022befc67fSViacheslav Ovsiienko int ret; 27032befc67fSViacheslav Ovsiienko 27044f04edcdSHanumanth Pothula 2705a4bf5421SHanumanth Pothula if ((rx_pkt_nb_segs > 1) && 2706a4bf5421SHanumanth Pothula (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT)) { 27074f04edcdSHanumanth Pothula /* multi-segment configuration */ 27082befc67fSViacheslav Ovsiienko for (i = 0; i < rx_pkt_nb_segs; i++) { 27092befc67fSViacheslav Ovsiienko struct rte_eth_rxseg_split *rx_seg = &rx_useg[i].split; 27102befc67fSViacheslav Ovsiienko /* 27112befc67fSViacheslav Ovsiienko * Use last valid pool for the segments with number 27122befc67fSViacheslav Ovsiienko * exceeding the pool index. 27132befc67fSViacheslav Ovsiienko */ 27141108c33eSRaja Zidane mp_n = (i >= mbuf_data_size_n) ? mbuf_data_size_n - 1 : i; 27152befc67fSViacheslav Ovsiienko mpx = mbuf_pool_find(socket_id, mp_n); 27162befc67fSViacheslav Ovsiienko /* Handle zero as mbuf data buffer size. */ 27172befc67fSViacheslav Ovsiienko rx_seg->offset = i < rx_pkt_nb_offs ? 27182befc67fSViacheslav Ovsiienko rx_pkt_seg_offsets[i] : 0; 27192befc67fSViacheslav Ovsiienko rx_seg->mp = mpx ? mpx : mp; 272052e2e7edSYuan Wang if (rx_pkt_hdr_protos[i] != 0 && rx_pkt_seg_lengths[i] == 0) { 272154a0f4d7SYuan Wang rx_seg->proto_hdr = rx_pkt_hdr_protos[i] & ~prev_hdrs; 272254a0f4d7SYuan Wang prev_hdrs |= rx_seg->proto_hdr; 272352e2e7edSYuan Wang } else { 272452e2e7edSYuan Wang rx_seg->length = rx_pkt_seg_lengths[i] ? 272552e2e7edSYuan Wang rx_pkt_seg_lengths[i] : 272652e2e7edSYuan Wang mbuf_data_size[mp_n]; 272752e2e7edSYuan Wang } 27282befc67fSViacheslav Ovsiienko } 27292befc67fSViacheslav Ovsiienko rx_conf->rx_nseg = rx_pkt_nb_segs; 27302befc67fSViacheslav Ovsiienko rx_conf->rx_seg = rx_useg; 2731a4bf5421SHanumanth Pothula rx_conf->rx_mempools = NULL; 2732a4bf5421SHanumanth Pothula rx_conf->rx_nmempool = 0; 2733a4bf5421SHanumanth Pothula ret = rte_eth_rx_queue_setup(port_id, rx_queue_id, nb_rx_desc, 2734a4bf5421SHanumanth Pothula socket_id, rx_conf, NULL); 2735a4bf5421SHanumanth Pothula rx_conf->rx_seg = NULL; 2736a4bf5421SHanumanth Pothula rx_conf->rx_nseg = 0; 2737a4bf5421SHanumanth Pothula } else if (multi_rx_mempool == 1) { 27384f04edcdSHanumanth Pothula /* multi-pool configuration */ 2739a4bf5421SHanumanth Pothula struct rte_eth_dev_info dev_info; 2740a4bf5421SHanumanth Pothula 2741a4bf5421SHanumanth Pothula if (mbuf_data_size_n <= 1) { 2742a4bf5421SHanumanth Pothula fprintf(stderr, "Invalid number of mempools %u\n", 2743a4bf5421SHanumanth Pothula mbuf_data_size_n); 2744a4bf5421SHanumanth Pothula return -EINVAL; 2745a4bf5421SHanumanth Pothula } 2746a4bf5421SHanumanth Pothula ret = rte_eth_dev_info_get(port_id, &dev_info); 2747a4bf5421SHanumanth Pothula if (ret != 0) 2748a4bf5421SHanumanth Pothula return ret; 2749a4bf5421SHanumanth Pothula if (dev_info.max_rx_mempools == 0) { 2750a4bf5421SHanumanth Pothula fprintf(stderr, 2751a4bf5421SHanumanth Pothula "Port %u doesn't support requested multi-rx-mempool configuration.\n", 2752a4bf5421SHanumanth Pothula port_id); 2753a4bf5421SHanumanth Pothula return -ENOTSUP; 2754a4bf5421SHanumanth Pothula } 27554f04edcdSHanumanth Pothula for (i = 0; i < mbuf_data_size_n; i++) { 27564f04edcdSHanumanth Pothula mpx = mbuf_pool_find(socket_id, i); 27574f04edcdSHanumanth Pothula rx_mempool[i] = mpx ? mpx : mp; 27584f04edcdSHanumanth Pothula } 27594f04edcdSHanumanth Pothula rx_conf->rx_mempools = rx_mempool; 27604f04edcdSHanumanth Pothula rx_conf->rx_nmempool = mbuf_data_size_n; 2761a4bf5421SHanumanth Pothula rx_conf->rx_seg = NULL; 2762a4bf5421SHanumanth Pothula rx_conf->rx_nseg = 0; 27632befc67fSViacheslav Ovsiienko ret = rte_eth_rx_queue_setup(port_id, rx_queue_id, nb_rx_desc, 27642befc67fSViacheslav Ovsiienko socket_id, rx_conf, NULL); 2765a4bf5421SHanumanth Pothula rx_conf->rx_mempools = NULL; 2766a4bf5421SHanumanth Pothula rx_conf->rx_nmempool = 0; 2767a4bf5421SHanumanth Pothula } else { 2768a4bf5421SHanumanth Pothula /* Single pool/segment configuration */ 27692befc67fSViacheslav Ovsiienko rx_conf->rx_seg = NULL; 27702befc67fSViacheslav Ovsiienko rx_conf->rx_nseg = 0; 27714f04edcdSHanumanth Pothula rx_conf->rx_mempools = NULL; 27724f04edcdSHanumanth Pothula rx_conf->rx_nmempool = 0; 2773a4bf5421SHanumanth Pothula ret = rte_eth_rx_queue_setup(port_id, rx_queue_id, nb_rx_desc, 2774a4bf5421SHanumanth Pothula socket_id, rx_conf, mp); 2775a4bf5421SHanumanth Pothula } 2776a4bf5421SHanumanth Pothula 27773c4426dbSDmitry Kozlyuk ports[port_id].rxq[rx_queue_id].state = rx_conf->rx_deferred_start ? 27783c4426dbSDmitry Kozlyuk RTE_ETH_QUEUE_STATE_STOPPED : 27793c4426dbSDmitry Kozlyuk RTE_ETH_QUEUE_STATE_STARTED; 27802befc67fSViacheslav Ovsiienko return ret; 27812befc67fSViacheslav Ovsiienko } 27822befc67fSViacheslav Ovsiienko 278363b72657SIvan Ilchenko static int 278463b72657SIvan Ilchenko alloc_xstats_display_info(portid_t pi) 278563b72657SIvan Ilchenko { 278663b72657SIvan Ilchenko uint64_t **ids_supp = &ports[pi].xstats_info.ids_supp; 278763b72657SIvan Ilchenko uint64_t **prev_values = &ports[pi].xstats_info.prev_values; 278863b72657SIvan Ilchenko uint64_t **curr_values = &ports[pi].xstats_info.curr_values; 278963b72657SIvan Ilchenko 279063b72657SIvan Ilchenko if (xstats_display_num == 0) 279163b72657SIvan Ilchenko return 0; 279263b72657SIvan Ilchenko 279363b72657SIvan Ilchenko *ids_supp = calloc(xstats_display_num, sizeof(**ids_supp)); 279463b72657SIvan Ilchenko if (*ids_supp == NULL) 279563b72657SIvan Ilchenko goto fail_ids_supp; 279663b72657SIvan Ilchenko 279763b72657SIvan Ilchenko *prev_values = calloc(xstats_display_num, 279863b72657SIvan Ilchenko sizeof(**prev_values)); 279963b72657SIvan Ilchenko if (*prev_values == NULL) 280063b72657SIvan Ilchenko goto fail_prev_values; 280163b72657SIvan Ilchenko 280263b72657SIvan Ilchenko *curr_values = calloc(xstats_display_num, 280363b72657SIvan Ilchenko sizeof(**curr_values)); 280463b72657SIvan Ilchenko if (*curr_values == NULL) 280563b72657SIvan Ilchenko goto fail_curr_values; 280663b72657SIvan Ilchenko 280763b72657SIvan Ilchenko ports[pi].xstats_info.allocated = true; 280863b72657SIvan Ilchenko 280963b72657SIvan Ilchenko return 0; 281063b72657SIvan Ilchenko 281163b72657SIvan Ilchenko fail_curr_values: 281263b72657SIvan Ilchenko free(*prev_values); 281363b72657SIvan Ilchenko fail_prev_values: 281463b72657SIvan Ilchenko free(*ids_supp); 281563b72657SIvan Ilchenko fail_ids_supp: 281663b72657SIvan Ilchenko return -ENOMEM; 281763b72657SIvan Ilchenko } 281863b72657SIvan Ilchenko 281963b72657SIvan Ilchenko static void 282063b72657SIvan Ilchenko free_xstats_display_info(portid_t pi) 282163b72657SIvan Ilchenko { 282263b72657SIvan Ilchenko if (!ports[pi].xstats_info.allocated) 282363b72657SIvan Ilchenko return; 282463b72657SIvan Ilchenko free(ports[pi].xstats_info.ids_supp); 282563b72657SIvan Ilchenko free(ports[pi].xstats_info.prev_values); 282663b72657SIvan Ilchenko free(ports[pi].xstats_info.curr_values); 282763b72657SIvan Ilchenko ports[pi].xstats_info.allocated = false; 282863b72657SIvan Ilchenko } 282963b72657SIvan Ilchenko 283063b72657SIvan Ilchenko /** Fill helper structures for specified port to show extended statistics. */ 283163b72657SIvan Ilchenko static void 283263b72657SIvan Ilchenko fill_xstats_display_info_for_port(portid_t pi) 283363b72657SIvan Ilchenko { 283463b72657SIvan Ilchenko unsigned int stat, stat_supp; 283563b72657SIvan Ilchenko const char *xstat_name; 283663b72657SIvan Ilchenko struct rte_port *port; 283763b72657SIvan Ilchenko uint64_t *ids_supp; 283863b72657SIvan Ilchenko int rc; 283963b72657SIvan Ilchenko 284063b72657SIvan Ilchenko if (xstats_display_num == 0) 284163b72657SIvan Ilchenko return; 284263b72657SIvan Ilchenko 284363b72657SIvan Ilchenko if (pi == (portid_t)RTE_PORT_ALL) { 284463b72657SIvan Ilchenko fill_xstats_display_info(); 284563b72657SIvan Ilchenko return; 284663b72657SIvan Ilchenko } 284763b72657SIvan Ilchenko 284863b72657SIvan Ilchenko port = &ports[pi]; 284963b72657SIvan Ilchenko if (port->port_status != RTE_PORT_STARTED) 285063b72657SIvan Ilchenko return; 285163b72657SIvan Ilchenko 285263b72657SIvan Ilchenko if (!port->xstats_info.allocated && alloc_xstats_display_info(pi) != 0) 285363b72657SIvan Ilchenko rte_exit(EXIT_FAILURE, 285463b72657SIvan Ilchenko "Failed to allocate xstats display memory\n"); 285563b72657SIvan Ilchenko 285663b72657SIvan Ilchenko ids_supp = port->xstats_info.ids_supp; 285763b72657SIvan Ilchenko for (stat = stat_supp = 0; stat < xstats_display_num; stat++) { 285863b72657SIvan Ilchenko xstat_name = xstats_display[stat].name; 285963b72657SIvan Ilchenko rc = rte_eth_xstats_get_id_by_name(pi, xstat_name, 286063b72657SIvan Ilchenko ids_supp + stat_supp); 286163b72657SIvan Ilchenko if (rc != 0) { 286263b72657SIvan Ilchenko fprintf(stderr, "No xstat '%s' on port %u - skip it %u\n", 286363b72657SIvan Ilchenko xstat_name, pi, stat); 286463b72657SIvan Ilchenko continue; 286563b72657SIvan Ilchenko } 286663b72657SIvan Ilchenko stat_supp++; 286763b72657SIvan Ilchenko } 286863b72657SIvan Ilchenko 286963b72657SIvan Ilchenko port->xstats_info.ids_supp_sz = stat_supp; 287063b72657SIvan Ilchenko } 287163b72657SIvan Ilchenko 287263b72657SIvan Ilchenko /** Fill helper structures for all ports to show extended statistics. */ 287363b72657SIvan Ilchenko static void 287463b72657SIvan Ilchenko fill_xstats_display_info(void) 287563b72657SIvan Ilchenko { 287663b72657SIvan Ilchenko portid_t pi; 287763b72657SIvan Ilchenko 287863b72657SIvan Ilchenko if (xstats_display_num == 0) 287963b72657SIvan Ilchenko return; 288063b72657SIvan Ilchenko 288163b72657SIvan Ilchenko RTE_ETH_FOREACH_DEV(pi) 288263b72657SIvan Ilchenko fill_xstats_display_info_for_port(pi); 288363b72657SIvan Ilchenko } 288463b72657SIvan Ilchenko 28857c06f1abSHuisong Li /* 28867c06f1abSHuisong Li * Some capabilities (like, rx_offload_capa and tx_offload_capa) of bonding 28877c06f1abSHuisong Li * device in dev_info is zero when no slave is added. And its capability 28887c06f1abSHuisong Li * will be updated when add a new slave device. So adding a slave device need 28897c06f1abSHuisong Li * to update the port configurations of bonding device. 28907c06f1abSHuisong Li */ 28917c06f1abSHuisong Li static void 28927c06f1abSHuisong Li update_bonding_port_dev_conf(portid_t bond_pid) 28937c06f1abSHuisong Li { 28947c06f1abSHuisong Li #ifdef RTE_NET_BOND 28957c06f1abSHuisong Li struct rte_port *port = &ports[bond_pid]; 28967c06f1abSHuisong Li uint16_t i; 28977c06f1abSHuisong Li int ret; 28987c06f1abSHuisong Li 28997c06f1abSHuisong Li ret = eth_dev_info_get_print_err(bond_pid, &port->dev_info); 29007c06f1abSHuisong Li if (ret != 0) { 29017c06f1abSHuisong Li fprintf(stderr, "Failed to get dev info for port = %u\n", 29027c06f1abSHuisong Li bond_pid); 29037c06f1abSHuisong Li return; 29047c06f1abSHuisong Li } 29057c06f1abSHuisong Li 29067c06f1abSHuisong Li if (port->dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE) 29077c06f1abSHuisong Li port->dev_conf.txmode.offloads |= 29087c06f1abSHuisong Li RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE; 29097c06f1abSHuisong Li /* Apply Tx offloads configuration */ 29107c06f1abSHuisong Li for (i = 0; i < port->dev_info.max_tx_queues; i++) 29117c06f1abSHuisong Li port->txq[i].conf.offloads = port->dev_conf.txmode.offloads; 29127c06f1abSHuisong Li 29137c06f1abSHuisong Li port->dev_conf.rx_adv_conf.rss_conf.rss_hf &= 29147c06f1abSHuisong Li port->dev_info.flow_type_rss_offloads; 29157c06f1abSHuisong Li #else 29167c06f1abSHuisong Li RTE_SET_USED(bond_pid); 29177c06f1abSHuisong Li #endif 29187c06f1abSHuisong Li } 29197c06f1abSHuisong Li 2920edab33b1STetsuya Mukawa int 2921ce8d5614SIntel start_port(portid_t pid) 2922ce8d5614SIntel { 292392d2703eSMichael Qiu int diag, need_check_link_status = -1; 2924ce8d5614SIntel portid_t pi; 292501817b10SBing Zhao portid_t p_pi = RTE_MAX_ETHPORTS; 292601817b10SBing Zhao portid_t pl[RTE_MAX_ETHPORTS]; 292701817b10SBing Zhao portid_t peer_pl[RTE_MAX_ETHPORTS]; 292801817b10SBing Zhao uint16_t cnt_pi = 0; 292901817b10SBing Zhao uint16_t cfg_pi = 0; 293001817b10SBing Zhao int peer_pi; 2931ce8d5614SIntel queueid_t qi; 2932ce8d5614SIntel struct rte_port *port; 29331c69df45SOri Kam struct rte_eth_hairpin_cap cap; 2934ce8d5614SIntel 29354468635fSMichael Qiu if (port_id_is_invalid(pid, ENABLED_WARN)) 29364468635fSMichael Qiu return 0; 29374468635fSMichael Qiu 29387d89b261SGaetan Rivet RTE_ETH_FOREACH_DEV(pi) { 2939edab33b1STetsuya Mukawa if (pid != pi && pid != (portid_t)RTE_PORT_ALL) 2940ce8d5614SIntel continue; 2941ce8d5614SIntel 2942d8c079a5SMin Hu (Connor) if (port_is_bonding_slave(pi)) { 2943d8c079a5SMin Hu (Connor) fprintf(stderr, 2944d8c079a5SMin Hu (Connor) "Please remove port %d from bonded device.\n", 2945d8c079a5SMin Hu (Connor) pi); 2946d8c079a5SMin Hu (Connor) continue; 2947d8c079a5SMin Hu (Connor) } 2948d8c079a5SMin Hu (Connor) 294992d2703eSMichael Qiu need_check_link_status = 0; 2950ce8d5614SIntel port = &ports[pi]; 2951eac341d3SJoyce Kong if (port->port_status == RTE_PORT_STOPPED) 2952eac341d3SJoyce Kong port->port_status = RTE_PORT_HANDLING; 2953eac341d3SJoyce Kong else { 295461a3b0e5SAndrew Rybchenko fprintf(stderr, "Port %d is now not stopped\n", pi); 2955ce8d5614SIntel continue; 2956ce8d5614SIntel } 2957ce8d5614SIntel 2958ce8d5614SIntel if (port->need_reconfig > 0) { 2959655eae01SJie Wang struct rte_eth_conf dev_conf; 2960655eae01SJie Wang int k; 2961655eae01SJie Wang 2962ce8d5614SIntel port->need_reconfig = 0; 2963ce8d5614SIntel 29647ee3e944SVasily Philipov if (flow_isolate_all) { 29657ee3e944SVasily Philipov int ret = port_flow_isolate(pi, 1); 29667ee3e944SVasily Philipov if (ret) { 296761a3b0e5SAndrew Rybchenko fprintf(stderr, 296861a3b0e5SAndrew Rybchenko "Failed to apply isolated mode on port %d\n", 296961a3b0e5SAndrew Rybchenko pi); 29707ee3e944SVasily Philipov return -1; 29717ee3e944SVasily Philipov } 29727ee3e944SVasily Philipov } 2973b5b38ed8SRaslan Darawsheh configure_rxtx_dump_callbacks(0); 29745706de65SJulien Cretin printf("Configuring Port %d (socket %u)\n", pi, 297520a0286fSLiu Xiaofeng port->socket_id); 29761c69df45SOri Kam if (nb_hairpinq > 0 && 29771c69df45SOri Kam rte_eth_dev_hairpin_capability_get(pi, &cap)) { 297861a3b0e5SAndrew Rybchenko fprintf(stderr, 297961a3b0e5SAndrew Rybchenko "Port %d doesn't support hairpin queues\n", 298061a3b0e5SAndrew Rybchenko pi); 29811c69df45SOri Kam return -1; 29821c69df45SOri Kam } 29831bb4a528SFerruh Yigit 29847c06f1abSHuisong Li if (port->bond_flag == 1 && port->update_conf == 1) { 29857c06f1abSHuisong Li update_bonding_port_dev_conf(pi); 29867c06f1abSHuisong Li port->update_conf = 0; 29877c06f1abSHuisong Li } 29887c06f1abSHuisong Li 2989ce8d5614SIntel /* configure port */ 2990a550baf2SMin Hu (Connor) diag = eth_dev_configure_mp(pi, nb_rxq + nb_hairpinq, 29911c69df45SOri Kam nb_txq + nb_hairpinq, 2992ce8d5614SIntel &(port->dev_conf)); 2993ce8d5614SIntel if (diag != 0) { 2994eac341d3SJoyce Kong if (port->port_status == RTE_PORT_HANDLING) 2995eac341d3SJoyce Kong port->port_status = RTE_PORT_STOPPED; 2996eac341d3SJoyce Kong else 299761a3b0e5SAndrew Rybchenko fprintf(stderr, 299861a3b0e5SAndrew Rybchenko "Port %d can not be set back to stopped\n", 299961a3b0e5SAndrew Rybchenko pi); 300061a3b0e5SAndrew Rybchenko fprintf(stderr, "Fail to configure port %d\n", 300161a3b0e5SAndrew Rybchenko pi); 3002ce8d5614SIntel /* try to reconfigure port next time */ 3003ce8d5614SIntel port->need_reconfig = 1; 3004148f963fSBruce Richardson return -1; 3005ce8d5614SIntel } 3006655eae01SJie Wang /* get device configuration*/ 3007655eae01SJie Wang if (0 != 3008655eae01SJie Wang eth_dev_conf_get_print_err(pi, &dev_conf)) { 3009655eae01SJie Wang fprintf(stderr, 3010655eae01SJie Wang "port %d can not get device configuration\n", 3011655eae01SJie Wang pi); 3012655eae01SJie Wang return -1; 3013655eae01SJie Wang } 3014655eae01SJie Wang /* Apply Rx offloads configuration */ 3015655eae01SJie Wang if (dev_conf.rxmode.offloads != 3016655eae01SJie Wang port->dev_conf.rxmode.offloads) { 3017655eae01SJie Wang port->dev_conf.rxmode.offloads |= 3018655eae01SJie Wang dev_conf.rxmode.offloads; 3019655eae01SJie Wang for (k = 0; 3020655eae01SJie Wang k < port->dev_info.max_rx_queues; 3021655eae01SJie Wang k++) 30223c4426dbSDmitry Kozlyuk port->rxq[k].conf.offloads |= 3023655eae01SJie Wang dev_conf.rxmode.offloads; 3024655eae01SJie Wang } 3025655eae01SJie Wang /* Apply Tx offloads configuration */ 3026655eae01SJie Wang if (dev_conf.txmode.offloads != 3027655eae01SJie Wang port->dev_conf.txmode.offloads) { 3028655eae01SJie Wang port->dev_conf.txmode.offloads |= 3029655eae01SJie Wang dev_conf.txmode.offloads; 3030655eae01SJie Wang for (k = 0; 3031655eae01SJie Wang k < port->dev_info.max_tx_queues; 3032655eae01SJie Wang k++) 30333c4426dbSDmitry Kozlyuk port->txq[k].conf.offloads |= 3034655eae01SJie Wang dev_conf.txmode.offloads; 3035655eae01SJie Wang } 3036ce8d5614SIntel } 3037a550baf2SMin Hu (Connor) if (port->need_reconfig_queues > 0 && is_proc_primary()) { 3038ce8d5614SIntel port->need_reconfig_queues = 0; 3039ce8d5614SIntel /* setup tx queues */ 3040ce8d5614SIntel for (qi = 0; qi < nb_txq; qi++) { 30413c4426dbSDmitry Kozlyuk struct rte_eth_txconf *conf = 30423c4426dbSDmitry Kozlyuk &port->txq[qi].conf; 30433c4426dbSDmitry Kozlyuk 3044b6ea6408SIntel if ((numa_support) && 3045b6ea6408SIntel (txring_numa[pi] != NUMA_NO_CONFIG)) 3046b6ea6408SIntel diag = rte_eth_tx_queue_setup(pi, qi, 3047d44f8a48SQi Zhang port->nb_tx_desc[qi], 3048d44f8a48SQi Zhang txring_numa[pi], 30493c4426dbSDmitry Kozlyuk &(port->txq[qi].conf)); 3050b6ea6408SIntel else 3051b6ea6408SIntel diag = rte_eth_tx_queue_setup(pi, qi, 3052d44f8a48SQi Zhang port->nb_tx_desc[qi], 3053d44f8a48SQi Zhang port->socket_id, 30543c4426dbSDmitry Kozlyuk &(port->txq[qi].conf)); 3055b6ea6408SIntel 30563c4426dbSDmitry Kozlyuk if (diag == 0) { 30573c4426dbSDmitry Kozlyuk port->txq[qi].state = 30583c4426dbSDmitry Kozlyuk conf->tx_deferred_start ? 30593c4426dbSDmitry Kozlyuk RTE_ETH_QUEUE_STATE_STOPPED : 30603c4426dbSDmitry Kozlyuk RTE_ETH_QUEUE_STATE_STARTED; 3061ce8d5614SIntel continue; 30623c4426dbSDmitry Kozlyuk } 3063ce8d5614SIntel 3064ce8d5614SIntel /* Fail to setup tx queue, return */ 3065eac341d3SJoyce Kong if (port->port_status == RTE_PORT_HANDLING) 3066eac341d3SJoyce Kong port->port_status = RTE_PORT_STOPPED; 3067eac341d3SJoyce Kong else 306861a3b0e5SAndrew Rybchenko fprintf(stderr, 306961a3b0e5SAndrew Rybchenko "Port %d can not be set back to stopped\n", 307061a3b0e5SAndrew Rybchenko pi); 307161a3b0e5SAndrew Rybchenko fprintf(stderr, 307261a3b0e5SAndrew Rybchenko "Fail to configure port %d tx queues\n", 3073d44f8a48SQi Zhang pi); 3074ce8d5614SIntel /* try to reconfigure queues next time */ 3075ce8d5614SIntel port->need_reconfig_queues = 1; 3076148f963fSBruce Richardson return -1; 3077ce8d5614SIntel } 3078ce8d5614SIntel for (qi = 0; qi < nb_rxq; qi++) { 3079d44f8a48SQi Zhang /* setup rx queues */ 3080b6ea6408SIntel if ((numa_support) && 3081b6ea6408SIntel (rxring_numa[pi] != NUMA_NO_CONFIG)) { 3082b6ea6408SIntel struct rte_mempool * mp = 308326cbb419SViacheslav Ovsiienko mbuf_pool_find 308426cbb419SViacheslav Ovsiienko (rxring_numa[pi], 0); 3085b6ea6408SIntel if (mp == NULL) { 308661a3b0e5SAndrew Rybchenko fprintf(stderr, 308761a3b0e5SAndrew Rybchenko "Failed to setup RX queue: No mempool allocation on the socket %d\n", 3088b6ea6408SIntel rxring_numa[pi]); 3089148f963fSBruce Richardson return -1; 3090b6ea6408SIntel } 3091b6ea6408SIntel 30922befc67fSViacheslav Ovsiienko diag = rx_queue_setup(pi, qi, 3093d4930794SFerruh Yigit port->nb_rx_desc[qi], 3094d44f8a48SQi Zhang rxring_numa[pi], 30953c4426dbSDmitry Kozlyuk &(port->rxq[qi].conf), 3096d44f8a48SQi Zhang mp); 30971e1d6bddSBernard Iremonger } else { 30981e1d6bddSBernard Iremonger struct rte_mempool *mp = 309926cbb419SViacheslav Ovsiienko mbuf_pool_find 310026cbb419SViacheslav Ovsiienko (port->socket_id, 0); 31011e1d6bddSBernard Iremonger if (mp == NULL) { 310261a3b0e5SAndrew Rybchenko fprintf(stderr, 310361a3b0e5SAndrew Rybchenko "Failed to setup RX queue: No mempool allocation on the socket %d\n", 31041e1d6bddSBernard Iremonger port->socket_id); 31051e1d6bddSBernard Iremonger return -1; 3106b6ea6408SIntel } 31072befc67fSViacheslav Ovsiienko diag = rx_queue_setup(pi, qi, 3108d4930794SFerruh Yigit port->nb_rx_desc[qi], 3109d44f8a48SQi Zhang port->socket_id, 31103c4426dbSDmitry Kozlyuk &(port->rxq[qi].conf), 3111d44f8a48SQi Zhang mp); 31121e1d6bddSBernard Iremonger } 3113ce8d5614SIntel if (diag == 0) 3114ce8d5614SIntel continue; 3115ce8d5614SIntel 3116ce8d5614SIntel /* Fail to setup rx queue, return */ 3117eac341d3SJoyce Kong if (port->port_status == RTE_PORT_HANDLING) 3118eac341d3SJoyce Kong port->port_status = RTE_PORT_STOPPED; 3119eac341d3SJoyce Kong else 312061a3b0e5SAndrew Rybchenko fprintf(stderr, 312161a3b0e5SAndrew Rybchenko "Port %d can not be set back to stopped\n", 312261a3b0e5SAndrew Rybchenko pi); 312361a3b0e5SAndrew Rybchenko fprintf(stderr, 312461a3b0e5SAndrew Rybchenko "Fail to configure port %d rx queues\n", 3125d44f8a48SQi Zhang pi); 3126ce8d5614SIntel /* try to reconfigure queues next time */ 3127ce8d5614SIntel port->need_reconfig_queues = 1; 3128148f963fSBruce Richardson return -1; 3129ce8d5614SIntel } 31301c69df45SOri Kam /* setup hairpin queues */ 313101817b10SBing Zhao if (setup_hairpin_queues(pi, p_pi, cnt_pi) != 0) 31321c69df45SOri Kam return -1; 3133ce8d5614SIntel } 3134b5b38ed8SRaslan Darawsheh configure_rxtx_dump_callbacks(verbose_level); 3135b0a9354aSPavan Nikhilesh if (clear_ptypes) { 3136b0a9354aSPavan Nikhilesh diag = rte_eth_dev_set_ptypes(pi, RTE_PTYPE_UNKNOWN, 3137b0a9354aSPavan Nikhilesh NULL, 0); 3138b0a9354aSPavan Nikhilesh if (diag < 0) 313961a3b0e5SAndrew Rybchenko fprintf(stderr, 3140b0a9354aSPavan Nikhilesh "Port %d: Failed to disable Ptype parsing\n", 3141b0a9354aSPavan Nikhilesh pi); 3142b0a9354aSPavan Nikhilesh } 3143b0a9354aSPavan Nikhilesh 314401817b10SBing Zhao p_pi = pi; 314501817b10SBing Zhao cnt_pi++; 314601817b10SBing Zhao 3147ce8d5614SIntel /* start port */ 3148a550baf2SMin Hu (Connor) diag = eth_dev_start_mp(pi); 314952f2c6f2SAndrew Rybchenko if (diag < 0) { 315061a3b0e5SAndrew Rybchenko fprintf(stderr, "Fail to start port %d: %s\n", 315161a3b0e5SAndrew Rybchenko pi, rte_strerror(-diag)); 3152ce8d5614SIntel 3153ce8d5614SIntel /* Fail to setup rx queue, return */ 3154eac341d3SJoyce Kong if (port->port_status == RTE_PORT_HANDLING) 3155eac341d3SJoyce Kong port->port_status = RTE_PORT_STOPPED; 3156eac341d3SJoyce Kong else 315761a3b0e5SAndrew Rybchenko fprintf(stderr, 315861a3b0e5SAndrew Rybchenko "Port %d can not be set back to stopped\n", 315961a3b0e5SAndrew Rybchenko pi); 3160ce8d5614SIntel continue; 3161ce8d5614SIntel } 3162ce8d5614SIntel 3163eac341d3SJoyce Kong if (port->port_status == RTE_PORT_HANDLING) 3164eac341d3SJoyce Kong port->port_status = RTE_PORT_STARTED; 3165eac341d3SJoyce Kong else 316661a3b0e5SAndrew Rybchenko fprintf(stderr, "Port %d can not be set into started\n", 316761a3b0e5SAndrew Rybchenko pi); 3168ce8d5614SIntel 31695ffc4a2aSYuying Zhang if (eth_macaddr_get_print_err(pi, &port->eth_addr) == 0) 3170c2c4f87bSAman Deep Singh printf("Port %d: " RTE_ETHER_ADDR_PRT_FMT "\n", pi, 3171a7db3afcSAman Deep Singh RTE_ETHER_ADDR_BYTES(&port->eth_addr)); 3172d8c89163SZijie Pan 3173ce8d5614SIntel /* at least one port started, need checking link status */ 3174ce8d5614SIntel need_check_link_status = 1; 317501817b10SBing Zhao 317601817b10SBing Zhao pl[cfg_pi++] = pi; 3177ce8d5614SIntel } 3178ce8d5614SIntel 317992d2703eSMichael Qiu if (need_check_link_status == 1 && !no_link_check) 3180edab33b1STetsuya Mukawa check_all_ports_link_status(RTE_PORT_ALL); 318192d2703eSMichael Qiu else if (need_check_link_status == 0) 318261a3b0e5SAndrew Rybchenko fprintf(stderr, "Please stop the ports first\n"); 3183ce8d5614SIntel 318401817b10SBing Zhao if (hairpin_mode & 0xf) { 318501817b10SBing Zhao uint16_t i; 318601817b10SBing Zhao int j; 318701817b10SBing Zhao 318801817b10SBing Zhao /* bind all started hairpin ports */ 318901817b10SBing Zhao for (i = 0; i < cfg_pi; i++) { 319001817b10SBing Zhao pi = pl[i]; 319101817b10SBing Zhao /* bind current Tx to all peer Rx */ 319201817b10SBing Zhao peer_pi = rte_eth_hairpin_get_peer_ports(pi, peer_pl, 319301817b10SBing Zhao RTE_MAX_ETHPORTS, 1); 319401817b10SBing Zhao if (peer_pi < 0) 319501817b10SBing Zhao return peer_pi; 319601817b10SBing Zhao for (j = 0; j < peer_pi; j++) { 319701817b10SBing Zhao if (!port_is_started(peer_pl[j])) 319801817b10SBing Zhao continue; 319901817b10SBing Zhao diag = rte_eth_hairpin_bind(pi, peer_pl[j]); 320001817b10SBing Zhao if (diag < 0) { 320161a3b0e5SAndrew Rybchenko fprintf(stderr, 320261a3b0e5SAndrew Rybchenko "Error during binding hairpin Tx port %u to %u: %s\n", 320301817b10SBing Zhao pi, peer_pl[j], 320401817b10SBing Zhao rte_strerror(-diag)); 320501817b10SBing Zhao return -1; 320601817b10SBing Zhao } 320701817b10SBing Zhao } 320801817b10SBing Zhao /* bind all peer Tx to current Rx */ 320901817b10SBing Zhao peer_pi = rte_eth_hairpin_get_peer_ports(pi, peer_pl, 321001817b10SBing Zhao RTE_MAX_ETHPORTS, 0); 321101817b10SBing Zhao if (peer_pi < 0) 321201817b10SBing Zhao return peer_pi; 321301817b10SBing Zhao for (j = 0; j < peer_pi; j++) { 321401817b10SBing Zhao if (!port_is_started(peer_pl[j])) 321501817b10SBing Zhao continue; 321601817b10SBing Zhao diag = rte_eth_hairpin_bind(peer_pl[j], pi); 321701817b10SBing Zhao if (diag < 0) { 321861a3b0e5SAndrew Rybchenko fprintf(stderr, 321961a3b0e5SAndrew Rybchenko "Error during binding hairpin Tx port %u to %u: %s\n", 322001817b10SBing Zhao peer_pl[j], pi, 322101817b10SBing Zhao rte_strerror(-diag)); 322201817b10SBing Zhao return -1; 322301817b10SBing Zhao } 322401817b10SBing Zhao } 322501817b10SBing Zhao } 322601817b10SBing Zhao } 322701817b10SBing Zhao 322863b72657SIvan Ilchenko fill_xstats_display_info_for_port(pid); 322963b72657SIvan Ilchenko 3230ce8d5614SIntel printf("Done\n"); 3231148f963fSBruce Richardson return 0; 3232ce8d5614SIntel } 3233ce8d5614SIntel 3234ce8d5614SIntel void 3235ce8d5614SIntel stop_port(portid_t pid) 3236ce8d5614SIntel { 3237ce8d5614SIntel portid_t pi; 3238ce8d5614SIntel struct rte_port *port; 3239ce8d5614SIntel int need_check_link_status = 0; 324001817b10SBing Zhao portid_t peer_pl[RTE_MAX_ETHPORTS]; 324101817b10SBing Zhao int peer_pi; 324247a4e1fbSDariusz Sosnowski int ret; 3243ce8d5614SIntel 32444468635fSMichael Qiu if (port_id_is_invalid(pid, ENABLED_WARN)) 32454468635fSMichael Qiu return; 32464468635fSMichael Qiu 3247ce8d5614SIntel printf("Stopping ports...\n"); 3248ce8d5614SIntel 32497d89b261SGaetan Rivet RTE_ETH_FOREACH_DEV(pi) { 32504468635fSMichael Qiu if (pid != pi && pid != (portid_t)RTE_PORT_ALL) 3251ce8d5614SIntel continue; 3252ce8d5614SIntel 3253a8ef3e3aSBernard Iremonger if (port_is_forwarding(pi) != 0 && test_done == 0) { 325461a3b0e5SAndrew Rybchenko fprintf(stderr, 325561a3b0e5SAndrew Rybchenko "Please remove port %d from forwarding configuration.\n", 325661a3b0e5SAndrew Rybchenko pi); 3257a8ef3e3aSBernard Iremonger continue; 3258a8ef3e3aSBernard Iremonger } 3259a8ef3e3aSBernard Iremonger 32600e545d30SBernard Iremonger if (port_is_bonding_slave(pi)) { 326161a3b0e5SAndrew Rybchenko fprintf(stderr, 326261a3b0e5SAndrew Rybchenko "Please remove port %d from bonded device.\n", 326361a3b0e5SAndrew Rybchenko pi); 32640e545d30SBernard Iremonger continue; 32650e545d30SBernard Iremonger } 32660e545d30SBernard Iremonger 3267ce8d5614SIntel port = &ports[pi]; 3268eac341d3SJoyce Kong if (port->port_status == RTE_PORT_STARTED) 3269eac341d3SJoyce Kong port->port_status = RTE_PORT_HANDLING; 3270eac341d3SJoyce Kong else 3271ce8d5614SIntel continue; 3272ce8d5614SIntel 327301817b10SBing Zhao if (hairpin_mode & 0xf) { 327401817b10SBing Zhao int j; 327501817b10SBing Zhao 327601817b10SBing Zhao rte_eth_hairpin_unbind(pi, RTE_MAX_ETHPORTS); 327701817b10SBing Zhao /* unbind all peer Tx from current Rx */ 327801817b10SBing Zhao peer_pi = rte_eth_hairpin_get_peer_ports(pi, peer_pl, 327901817b10SBing Zhao RTE_MAX_ETHPORTS, 0); 328001817b10SBing Zhao if (peer_pi < 0) 328101817b10SBing Zhao continue; 328201817b10SBing Zhao for (j = 0; j < peer_pi; j++) { 328301817b10SBing Zhao if (!port_is_started(peer_pl[j])) 328401817b10SBing Zhao continue; 328501817b10SBing Zhao rte_eth_hairpin_unbind(peer_pl[j], pi); 328601817b10SBing Zhao } 328701817b10SBing Zhao } 328801817b10SBing Zhao 3289543df472SChengwen Feng if (port->flow_list && !no_flow_flush) 32900f93edbfSGregory Etelson port_flow_flush(pi); 32910f93edbfSGregory Etelson 329247a4e1fbSDariusz Sosnowski ret = eth_dev_stop_mp(pi); 329347a4e1fbSDariusz Sosnowski if (ret != 0) { 3294e62c5a12SIvan Ilchenko RTE_LOG(ERR, EAL, "rte_eth_dev_stop failed for port %u\n", 3295e62c5a12SIvan Ilchenko pi); 329647a4e1fbSDariusz Sosnowski /* Allow to retry stopping the port. */ 329747a4e1fbSDariusz Sosnowski port->port_status = RTE_PORT_STARTED; 329847a4e1fbSDariusz Sosnowski continue; 329947a4e1fbSDariusz Sosnowski } 3300ce8d5614SIntel 3301eac341d3SJoyce Kong if (port->port_status == RTE_PORT_HANDLING) 3302eac341d3SJoyce Kong port->port_status = RTE_PORT_STOPPED; 3303eac341d3SJoyce Kong else 330461a3b0e5SAndrew Rybchenko fprintf(stderr, "Port %d can not be set into stopped\n", 330561a3b0e5SAndrew Rybchenko pi); 3306ce8d5614SIntel need_check_link_status = 1; 3307ce8d5614SIntel } 3308bc202406SDavid Marchand if (need_check_link_status && !no_link_check) 3309edab33b1STetsuya Mukawa check_all_ports_link_status(RTE_PORT_ALL); 3310ce8d5614SIntel 3311ce8d5614SIntel printf("Done\n"); 3312ce8d5614SIntel } 3313ce8d5614SIntel 3314ce6959bfSWisam Jaddo static void 33154f1de450SThomas Monjalon remove_invalid_ports_in(portid_t *array, portid_t *total) 3316ce6959bfSWisam Jaddo { 33174f1de450SThomas Monjalon portid_t i; 33184f1de450SThomas Monjalon portid_t new_total = 0; 3319ce6959bfSWisam Jaddo 33204f1de450SThomas Monjalon for (i = 0; i < *total; i++) 33214f1de450SThomas Monjalon if (!port_id_is_invalid(array[i], DISABLED_WARN)) { 33224f1de450SThomas Monjalon array[new_total] = array[i]; 33234f1de450SThomas Monjalon new_total++; 3324ce6959bfSWisam Jaddo } 33254f1de450SThomas Monjalon *total = new_total; 33264f1de450SThomas Monjalon } 33274f1de450SThomas Monjalon 33284f1de450SThomas Monjalon static void 33294f1de450SThomas Monjalon remove_invalid_ports(void) 33304f1de450SThomas Monjalon { 33314f1de450SThomas Monjalon remove_invalid_ports_in(ports_ids, &nb_ports); 33324f1de450SThomas Monjalon remove_invalid_ports_in(fwd_ports_ids, &nb_fwd_ports); 33334f1de450SThomas Monjalon nb_cfg_ports = nb_fwd_ports; 3334ce6959bfSWisam Jaddo } 3335ce6959bfSWisam Jaddo 33363889a322SHuisong Li static void 33374b27989dSDmitry Kozlyuk flush_port_owned_resources(portid_t pi) 33384b27989dSDmitry Kozlyuk { 33394b27989dSDmitry Kozlyuk mcast_addr_pool_destroy(pi); 33404b27989dSDmitry Kozlyuk port_flow_flush(pi); 33414b27989dSDmitry Kozlyuk port_flex_item_flush(pi); 33426d736e05SSuanming Mou port_flow_template_table_flush(pi); 33436d736e05SSuanming Mou port_flow_pattern_template_flush(pi); 33446d736e05SSuanming Mou port_flow_actions_template_flush(pi); 33454b27989dSDmitry Kozlyuk port_action_handle_flush(pi); 33464b27989dSDmitry Kozlyuk } 33474b27989dSDmitry Kozlyuk 33484b27989dSDmitry Kozlyuk static void 33493889a322SHuisong Li clear_bonding_slave_device(portid_t *slave_pids, uint16_t num_slaves) 33503889a322SHuisong Li { 33513889a322SHuisong Li struct rte_port *port; 33523889a322SHuisong Li portid_t slave_pid; 33533889a322SHuisong Li uint16_t i; 33543889a322SHuisong Li 33553889a322SHuisong Li for (i = 0; i < num_slaves; i++) { 33563889a322SHuisong Li slave_pid = slave_pids[i]; 33573889a322SHuisong Li if (port_is_started(slave_pid) == 1) { 33583889a322SHuisong Li if (rte_eth_dev_stop(slave_pid) != 0) 33593889a322SHuisong Li fprintf(stderr, "rte_eth_dev_stop failed for port %u\n", 33603889a322SHuisong Li slave_pid); 33613889a322SHuisong Li 33623889a322SHuisong Li port = &ports[slave_pid]; 33633889a322SHuisong Li port->port_status = RTE_PORT_STOPPED; 33643889a322SHuisong Li } 33653889a322SHuisong Li 33663889a322SHuisong Li clear_port_slave_flag(slave_pid); 33673889a322SHuisong Li 33683889a322SHuisong Li /* Close slave device when testpmd quit or is killed. */ 33693889a322SHuisong Li if (cl_quit == 1 || f_quit == 1) 33703889a322SHuisong Li rte_eth_dev_close(slave_pid); 33713889a322SHuisong Li } 33723889a322SHuisong Li } 33733889a322SHuisong Li 3374ce8d5614SIntel void 3375ce8d5614SIntel close_port(portid_t pid) 3376ce8d5614SIntel { 3377ce8d5614SIntel portid_t pi; 3378ce8d5614SIntel struct rte_port *port; 33793889a322SHuisong Li portid_t slave_pids[RTE_MAX_ETHPORTS]; 33803889a322SHuisong Li int num_slaves = 0; 3381ce8d5614SIntel 33824468635fSMichael Qiu if (port_id_is_invalid(pid, ENABLED_WARN)) 33834468635fSMichael Qiu return; 33844468635fSMichael Qiu 3385ce8d5614SIntel printf("Closing ports...\n"); 3386ce8d5614SIntel 33877d89b261SGaetan Rivet RTE_ETH_FOREACH_DEV(pi) { 33884468635fSMichael Qiu if (pid != pi && pid != (portid_t)RTE_PORT_ALL) 3389ce8d5614SIntel continue; 3390ce8d5614SIntel 3391a8ef3e3aSBernard Iremonger if (port_is_forwarding(pi) != 0 && test_done == 0) { 339261a3b0e5SAndrew Rybchenko fprintf(stderr, 339361a3b0e5SAndrew Rybchenko "Please remove port %d from forwarding configuration.\n", 339461a3b0e5SAndrew Rybchenko pi); 3395a8ef3e3aSBernard Iremonger continue; 3396a8ef3e3aSBernard Iremonger } 3397a8ef3e3aSBernard Iremonger 33980e545d30SBernard Iremonger if (port_is_bonding_slave(pi)) { 339961a3b0e5SAndrew Rybchenko fprintf(stderr, 340061a3b0e5SAndrew Rybchenko "Please remove port %d from bonded device.\n", 340161a3b0e5SAndrew Rybchenko pi); 34020e545d30SBernard Iremonger continue; 34030e545d30SBernard Iremonger } 34040e545d30SBernard Iremonger 3405ce8d5614SIntel port = &ports[pi]; 3406eac341d3SJoyce Kong if (port->port_status == RTE_PORT_CLOSED) { 340761a3b0e5SAndrew Rybchenko fprintf(stderr, "Port %d is already closed\n", pi); 3408d4e8ad64SMichael Qiu continue; 3409d4e8ad64SMichael Qiu } 3410d4e8ad64SMichael Qiu 3411a550baf2SMin Hu (Connor) if (is_proc_primary()) { 34124b27989dSDmitry Kozlyuk flush_port_owned_resources(pi); 34133889a322SHuisong Li #ifdef RTE_NET_BOND 34143889a322SHuisong Li if (port->bond_flag == 1) 34153889a322SHuisong Li num_slaves = rte_eth_bond_slaves_get(pi, 34163889a322SHuisong Li slave_pids, RTE_MAX_ETHPORTS); 34173889a322SHuisong Li #endif 3418ce8d5614SIntel rte_eth_dev_close(pi); 34193889a322SHuisong Li /* 34203889a322SHuisong Li * If this port is bonded device, all slaves under the 34213889a322SHuisong Li * device need to be removed or closed. 34223889a322SHuisong Li */ 34233889a322SHuisong Li if (port->bond_flag == 1 && num_slaves > 0) 34243889a322SHuisong Li clear_bonding_slave_device(slave_pids, 34253889a322SHuisong Li num_slaves); 3426ce8d5614SIntel } 342763b72657SIvan Ilchenko 342863b72657SIvan Ilchenko free_xstats_display_info(pi); 3429a550baf2SMin Hu (Connor) } 3430ce8d5614SIntel 343185c6571cSThomas Monjalon remove_invalid_ports(); 3432ce8d5614SIntel printf("Done\n"); 3433ce8d5614SIntel } 3434ce8d5614SIntel 3435edab33b1STetsuya Mukawa void 343697f1e196SWei Dai reset_port(portid_t pid) 343797f1e196SWei Dai { 343897f1e196SWei Dai int diag; 343997f1e196SWei Dai portid_t pi; 344097f1e196SWei Dai struct rte_port *port; 344197f1e196SWei Dai 344297f1e196SWei Dai if (port_id_is_invalid(pid, ENABLED_WARN)) 344397f1e196SWei Dai return; 344497f1e196SWei Dai 34451cde1b9aSShougang Wang if ((pid == (portid_t)RTE_PORT_ALL && !all_ports_stopped()) || 34461cde1b9aSShougang Wang (pid != (portid_t)RTE_PORT_ALL && !port_is_stopped(pid))) { 344761a3b0e5SAndrew Rybchenko fprintf(stderr, 344861a3b0e5SAndrew Rybchenko "Can not reset port(s), please stop port(s) first.\n"); 34491cde1b9aSShougang Wang return; 34501cde1b9aSShougang Wang } 34511cde1b9aSShougang Wang 345297f1e196SWei Dai printf("Resetting ports...\n"); 345397f1e196SWei Dai 345497f1e196SWei Dai RTE_ETH_FOREACH_DEV(pi) { 345597f1e196SWei Dai if (pid != pi && pid != (portid_t)RTE_PORT_ALL) 345697f1e196SWei Dai continue; 345797f1e196SWei Dai 345897f1e196SWei Dai if (port_is_forwarding(pi) != 0 && test_done == 0) { 345961a3b0e5SAndrew Rybchenko fprintf(stderr, 346061a3b0e5SAndrew Rybchenko "Please remove port %d from forwarding configuration.\n", 346161a3b0e5SAndrew Rybchenko pi); 346297f1e196SWei Dai continue; 346397f1e196SWei Dai } 346497f1e196SWei Dai 346597f1e196SWei Dai if (port_is_bonding_slave(pi)) { 346661a3b0e5SAndrew Rybchenko fprintf(stderr, 346761a3b0e5SAndrew Rybchenko "Please remove port %d from bonded device.\n", 346897f1e196SWei Dai pi); 346997f1e196SWei Dai continue; 347097f1e196SWei Dai } 347197f1e196SWei Dai 3472e9351eaaSQiming Yang if (is_proc_primary()) { 347397f1e196SWei Dai diag = rte_eth_dev_reset(pi); 347497f1e196SWei Dai if (diag == 0) { 347597f1e196SWei Dai port = &ports[pi]; 347697f1e196SWei Dai port->need_reconfig = 1; 347797f1e196SWei Dai port->need_reconfig_queues = 1; 347897f1e196SWei Dai } else { 347961a3b0e5SAndrew Rybchenko fprintf(stderr, "Failed to reset port %d. diag=%d\n", 348061a3b0e5SAndrew Rybchenko pi, diag); 348197f1e196SWei Dai } 348297f1e196SWei Dai } 3483e9351eaaSQiming Yang } 348497f1e196SWei Dai 348597f1e196SWei Dai printf("Done\n"); 348697f1e196SWei Dai } 348797f1e196SWei Dai 348897f1e196SWei Dai void 3489edab33b1STetsuya Mukawa attach_port(char *identifier) 3490ce8d5614SIntel { 34914f1ed78eSThomas Monjalon portid_t pi; 3492c9cce428SThomas Monjalon struct rte_dev_iterator iterator; 3493ce8d5614SIntel 3494edab33b1STetsuya Mukawa printf("Attaching a new port...\n"); 3495edab33b1STetsuya Mukawa 3496edab33b1STetsuya Mukawa if (identifier == NULL) { 349761a3b0e5SAndrew Rybchenko fprintf(stderr, "Invalid parameters are specified\n"); 3498edab33b1STetsuya Mukawa return; 3499ce8d5614SIntel } 3500ce8d5614SIntel 350175b66decSIlya Maximets if (rte_dev_probe(identifier) < 0) { 3502c9cce428SThomas Monjalon TESTPMD_LOG(ERR, "Failed to attach port %s\n", identifier); 3503edab33b1STetsuya Mukawa return; 3504c9cce428SThomas Monjalon } 3505c9cce428SThomas Monjalon 35064f1ed78eSThomas Monjalon /* first attach mode: event */ 35074f1ed78eSThomas Monjalon if (setup_on_probe_event) { 35084f1ed78eSThomas Monjalon /* new ports are detected on RTE_ETH_EVENT_NEW event */ 35094f1ed78eSThomas Monjalon for (pi = 0; pi < RTE_MAX_ETHPORTS; pi++) 35104f1ed78eSThomas Monjalon if (ports[pi].port_status == RTE_PORT_HANDLING && 35114f1ed78eSThomas Monjalon ports[pi].need_setup != 0) 35124f1ed78eSThomas Monjalon setup_attached_port(pi); 35134f1ed78eSThomas Monjalon return; 35144f1ed78eSThomas Monjalon } 35154f1ed78eSThomas Monjalon 35164f1ed78eSThomas Monjalon /* second attach mode: iterator */ 351786fa5de1SThomas Monjalon RTE_ETH_FOREACH_MATCHING_DEV(pi, identifier, &iterator) { 35184f1ed78eSThomas Monjalon /* setup ports matching the devargs used for probing */ 351986fa5de1SThomas Monjalon if (port_is_forwarding(pi)) 352086fa5de1SThomas Monjalon continue; /* port was already attached before */ 3521c9cce428SThomas Monjalon setup_attached_port(pi); 3522c9cce428SThomas Monjalon } 352386fa5de1SThomas Monjalon } 3524c9cce428SThomas Monjalon 3525c9cce428SThomas Monjalon static void 3526c9cce428SThomas Monjalon setup_attached_port(portid_t pi) 3527c9cce428SThomas Monjalon { 3528c9cce428SThomas Monjalon unsigned int socket_id; 352934fc1051SIvan Ilchenko int ret; 3530edab33b1STetsuya Mukawa 3531931126baSBernard Iremonger socket_id = (unsigned)rte_eth_dev_socket_id(pi); 353229841336SPhil Yang /* if socket_id is invalid, set to the first available socket. */ 3533931126baSBernard Iremonger if (check_socket_id(socket_id) < 0) 353429841336SPhil Yang socket_id = socket_ids[0]; 3535931126baSBernard Iremonger reconfig(pi, socket_id); 353634fc1051SIvan Ilchenko ret = rte_eth_promiscuous_enable(pi); 353734fc1051SIvan Ilchenko if (ret != 0) 353861a3b0e5SAndrew Rybchenko fprintf(stderr, 353961a3b0e5SAndrew Rybchenko "Error during enabling promiscuous mode for port %u: %s - ignore\n", 354034fc1051SIvan Ilchenko pi, rte_strerror(-ret)); 3541edab33b1STetsuya Mukawa 35424f1de450SThomas Monjalon ports_ids[nb_ports++] = pi; 35434f1de450SThomas Monjalon fwd_ports_ids[nb_fwd_ports++] = pi; 35444f1de450SThomas Monjalon nb_cfg_ports = nb_fwd_ports; 35454f1ed78eSThomas Monjalon ports[pi].need_setup = 0; 3546edab33b1STetsuya Mukawa ports[pi].port_status = RTE_PORT_STOPPED; 3547edab33b1STetsuya Mukawa 3548edab33b1STetsuya Mukawa printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports); 3549edab33b1STetsuya Mukawa printf("Done\n"); 3550edab33b1STetsuya Mukawa } 3551edab33b1STetsuya Mukawa 35520654d4a8SThomas Monjalon static void 35530654d4a8SThomas Monjalon detach_device(struct rte_device *dev) 35545f4ec54fSChen Jing D(Mark) { 3555f8e5baa2SThomas Monjalon portid_t sibling; 3556f8e5baa2SThomas Monjalon 3557f8e5baa2SThomas Monjalon if (dev == NULL) { 355861a3b0e5SAndrew Rybchenko fprintf(stderr, "Device already removed\n"); 3559f8e5baa2SThomas Monjalon return; 3560f8e5baa2SThomas Monjalon } 3561f8e5baa2SThomas Monjalon 35620654d4a8SThomas Monjalon printf("Removing a device...\n"); 3563938a184aSAdrien Mazarguil 35642a449871SThomas Monjalon RTE_ETH_FOREACH_DEV_OF(sibling, dev) { 35652a449871SThomas Monjalon if (ports[sibling].port_status != RTE_PORT_CLOSED) { 35662a449871SThomas Monjalon if (ports[sibling].port_status != RTE_PORT_STOPPED) { 356761a3b0e5SAndrew Rybchenko fprintf(stderr, "Port %u not stopped\n", 356861a3b0e5SAndrew Rybchenko sibling); 35692a449871SThomas Monjalon return; 35702a449871SThomas Monjalon } 35714b27989dSDmitry Kozlyuk flush_port_owned_resources(sibling); 35722a449871SThomas Monjalon } 35732a449871SThomas Monjalon } 35742a449871SThomas Monjalon 357575b66decSIlya Maximets if (rte_dev_remove(dev) < 0) { 3576ec5ecd7eSDavid Marchand TESTPMD_LOG(ERR, "Failed to detach device %s\n", rte_dev_name(dev)); 3577edab33b1STetsuya Mukawa return; 35783070419eSGaetan Rivet } 35794f1de450SThomas Monjalon remove_invalid_ports(); 358003ce2c53SMatan Azrad 35810654d4a8SThomas Monjalon printf("Device is detached\n"); 3582f8e5baa2SThomas Monjalon printf("Now total ports is %d\n", nb_ports); 3583edab33b1STetsuya Mukawa printf("Done\n"); 3584edab33b1STetsuya Mukawa return; 35855f4ec54fSChen Jing D(Mark) } 35865f4ec54fSChen Jing D(Mark) 3587af75078fSIntel void 35880654d4a8SThomas Monjalon detach_port_device(portid_t port_id) 35890654d4a8SThomas Monjalon { 35900a0821bcSPaulis Gributs int ret; 35910a0821bcSPaulis Gributs struct rte_eth_dev_info dev_info; 35920a0821bcSPaulis Gributs 35930654d4a8SThomas Monjalon if (port_id_is_invalid(port_id, ENABLED_WARN)) 35940654d4a8SThomas Monjalon return; 35950654d4a8SThomas Monjalon 35960654d4a8SThomas Monjalon if (ports[port_id].port_status != RTE_PORT_CLOSED) { 35970654d4a8SThomas Monjalon if (ports[port_id].port_status != RTE_PORT_STOPPED) { 359861a3b0e5SAndrew Rybchenko fprintf(stderr, "Port not stopped\n"); 35990654d4a8SThomas Monjalon return; 36000654d4a8SThomas Monjalon } 360161a3b0e5SAndrew Rybchenko fprintf(stderr, "Port was not closed\n"); 36020654d4a8SThomas Monjalon } 36030654d4a8SThomas Monjalon 36040a0821bcSPaulis Gributs ret = eth_dev_info_get_print_err(port_id, &dev_info); 36050a0821bcSPaulis Gributs if (ret != 0) { 36060a0821bcSPaulis Gributs TESTPMD_LOG(ERR, 36070a0821bcSPaulis Gributs "Failed to get device info for port %d, not detaching\n", 36080a0821bcSPaulis Gributs port_id); 36090a0821bcSPaulis Gributs return; 36100a0821bcSPaulis Gributs } 36110a0821bcSPaulis Gributs detach_device(dev_info.device); 36120654d4a8SThomas Monjalon } 36130654d4a8SThomas Monjalon 36140654d4a8SThomas Monjalon void 36155edee5f6SThomas Monjalon detach_devargs(char *identifier) 361655e51c96SNithin Dabilpuram { 361755e51c96SNithin Dabilpuram struct rte_dev_iterator iterator; 361855e51c96SNithin Dabilpuram struct rte_devargs da; 361955e51c96SNithin Dabilpuram portid_t port_id; 362055e51c96SNithin Dabilpuram 362155e51c96SNithin Dabilpuram printf("Removing a device...\n"); 362255e51c96SNithin Dabilpuram 362355e51c96SNithin Dabilpuram memset(&da, 0, sizeof(da)); 362455e51c96SNithin Dabilpuram if (rte_devargs_parsef(&da, "%s", identifier)) { 362561a3b0e5SAndrew Rybchenko fprintf(stderr, "cannot parse identifier\n"); 362655e51c96SNithin Dabilpuram return; 362755e51c96SNithin Dabilpuram } 362855e51c96SNithin Dabilpuram 362955e51c96SNithin Dabilpuram RTE_ETH_FOREACH_MATCHING_DEV(port_id, identifier, &iterator) { 363055e51c96SNithin Dabilpuram if (ports[port_id].port_status != RTE_PORT_CLOSED) { 363155e51c96SNithin Dabilpuram if (ports[port_id].port_status != RTE_PORT_STOPPED) { 363261a3b0e5SAndrew Rybchenko fprintf(stderr, "Port %u not stopped\n", 363361a3b0e5SAndrew Rybchenko port_id); 3634149677c9SStephen Hemminger rte_eth_iterator_cleanup(&iterator); 363564051bb1SXueming Li rte_devargs_reset(&da); 363655e51c96SNithin Dabilpuram return; 363755e51c96SNithin Dabilpuram } 36384b27989dSDmitry Kozlyuk flush_port_owned_resources(port_id); 363955e51c96SNithin Dabilpuram } 364055e51c96SNithin Dabilpuram } 364155e51c96SNithin Dabilpuram 3642148c51a3SDavid Marchand if (rte_eal_hotplug_remove(rte_bus_name(da.bus), da.name) != 0) { 364355e51c96SNithin Dabilpuram TESTPMD_LOG(ERR, "Failed to detach device %s(%s)\n", 3644148c51a3SDavid Marchand da.name, rte_bus_name(da.bus)); 364564051bb1SXueming Li rte_devargs_reset(&da); 364655e51c96SNithin Dabilpuram return; 364755e51c96SNithin Dabilpuram } 364855e51c96SNithin Dabilpuram 364955e51c96SNithin Dabilpuram remove_invalid_ports(); 365055e51c96SNithin Dabilpuram 365155e51c96SNithin Dabilpuram printf("Device %s is detached\n", identifier); 365255e51c96SNithin Dabilpuram printf("Now total ports is %d\n", nb_ports); 365355e51c96SNithin Dabilpuram printf("Done\n"); 365464051bb1SXueming Li rte_devargs_reset(&da); 365555e51c96SNithin Dabilpuram } 365655e51c96SNithin Dabilpuram 365755e51c96SNithin Dabilpuram void 3658af75078fSIntel pmd_test_exit(void) 3659af75078fSIntel { 3660af75078fSIntel portid_t pt_id; 366126cbb419SViacheslav Ovsiienko unsigned int i; 3662fb73e096SJeff Guo int ret; 3663af75078fSIntel 36648210ec25SPablo de Lara if (test_done == 0) 36658210ec25SPablo de Lara stop_packet_forwarding(); 36668210ec25SPablo de Lara 3667761f7ae1SJie Zhou #ifndef RTE_EXEC_ENV_WINDOWS 366826cbb419SViacheslav Ovsiienko for (i = 0 ; i < RTE_DIM(mempools) ; i++) { 36693a0968c8SShahaf Shuler if (mempools[i]) { 36703a0968c8SShahaf Shuler if (mp_alloc_type == MP_ALLOC_ANON) 36713a0968c8SShahaf Shuler rte_mempool_mem_iter(mempools[i], dma_unmap_cb, 36723a0968c8SShahaf Shuler NULL); 36733a0968c8SShahaf Shuler } 36743a0968c8SShahaf Shuler } 3675761f7ae1SJie Zhou #endif 3676d3a274ceSZhihong Wang if (ports != NULL) { 3677d3a274ceSZhihong Wang no_link_check = 1; 36787d89b261SGaetan Rivet RTE_ETH_FOREACH_DEV(pt_id) { 367908fd782bSCristian Dumitrescu printf("\nStopping port %d...\n", pt_id); 3680af75078fSIntel fflush(stdout); 3681d3a274ceSZhihong Wang stop_port(pt_id); 368208fd782bSCristian Dumitrescu } 368308fd782bSCristian Dumitrescu RTE_ETH_FOREACH_DEV(pt_id) { 368408fd782bSCristian Dumitrescu printf("\nShutting down port %d...\n", pt_id); 368508fd782bSCristian Dumitrescu fflush(stdout); 3686d3a274ceSZhihong Wang close_port(pt_id); 3687af75078fSIntel } 3688d3a274ceSZhihong Wang } 3689fb73e096SJeff Guo 3690fb73e096SJeff Guo if (hot_plug) { 3691fb73e096SJeff Guo ret = rte_dev_event_monitor_stop(); 36922049c511SJeff Guo if (ret) { 3693fb73e096SJeff Guo RTE_LOG(ERR, EAL, 3694fb73e096SJeff Guo "fail to stop device event monitor."); 36952049c511SJeff Guo return; 36962049c511SJeff Guo } 3697fb73e096SJeff Guo 36982049c511SJeff Guo ret = rte_dev_event_callback_unregister(NULL, 3699cc1bf307SJeff Guo dev_event_callback, NULL); 37002049c511SJeff Guo if (ret < 0) { 3701fb73e096SJeff Guo RTE_LOG(ERR, EAL, 37022049c511SJeff Guo "fail to unregister device event callback.\n"); 37032049c511SJeff Guo return; 37042049c511SJeff Guo } 37052049c511SJeff Guo 37062049c511SJeff Guo ret = rte_dev_hotplug_handle_disable(); 37072049c511SJeff Guo if (ret) { 37082049c511SJeff Guo RTE_LOG(ERR, EAL, 37092049c511SJeff Guo "fail to disable hotplug handling.\n"); 37102049c511SJeff Guo return; 37112049c511SJeff Guo } 3712fb73e096SJeff Guo } 371326cbb419SViacheslav Ovsiienko for (i = 0 ; i < RTE_DIM(mempools) ; i++) { 3714401b744dSShahaf Shuler if (mempools[i]) 3715a550baf2SMin Hu (Connor) mempool_free_mp(mempools[i]); 3716401b744dSShahaf Shuler } 371763b72657SIvan Ilchenko free(xstats_display); 3718fb73e096SJeff Guo 3719d3a274ceSZhihong Wang printf("\nBye...\n"); 3720af75078fSIntel } 3721af75078fSIntel 3722af75078fSIntel typedef void (*cmd_func_t)(void); 3723af75078fSIntel struct pmd_test_command { 3724af75078fSIntel const char *cmd_name; 3725af75078fSIntel cmd_func_t cmd_func; 3726af75078fSIntel }; 3727af75078fSIntel 3728ce8d5614SIntel /* Check the link status of all ports in up to 9s, and print them finally */ 3729af75078fSIntel static void 3730edab33b1STetsuya Mukawa check_all_ports_link_status(uint32_t port_mask) 3731af75078fSIntel { 3732ce8d5614SIntel #define CHECK_INTERVAL 100 /* 100ms */ 3733ce8d5614SIntel #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */ 3734f8244c63SZhiyong Yang portid_t portid; 3735f8244c63SZhiyong Yang uint8_t count, all_ports_up, print_flag = 0; 3736ce8d5614SIntel struct rte_eth_link link; 3737e661a08bSIgor Romanov int ret; 3738ba5509a6SIvan Dyukov char link_status[RTE_ETH_LINK_MAX_STR_LEN]; 3739ce8d5614SIntel 3740ce8d5614SIntel printf("Checking link statuses...\n"); 3741ce8d5614SIntel fflush(stdout); 3742ce8d5614SIntel for (count = 0; count <= MAX_CHECK_TIME; count++) { 3743ce8d5614SIntel all_ports_up = 1; 37447d89b261SGaetan Rivet RTE_ETH_FOREACH_DEV(portid) { 3745ce8d5614SIntel if ((port_mask & (1 << portid)) == 0) 3746ce8d5614SIntel continue; 3747ce8d5614SIntel memset(&link, 0, sizeof(link)); 3748e661a08bSIgor Romanov ret = rte_eth_link_get_nowait(portid, &link); 3749e661a08bSIgor Romanov if (ret < 0) { 3750e661a08bSIgor Romanov all_ports_up = 0; 3751e661a08bSIgor Romanov if (print_flag == 1) 375261a3b0e5SAndrew Rybchenko fprintf(stderr, 375361a3b0e5SAndrew Rybchenko "Port %u link get failed: %s\n", 3754e661a08bSIgor Romanov portid, rte_strerror(-ret)); 3755e661a08bSIgor Romanov continue; 3756e661a08bSIgor Romanov } 3757ce8d5614SIntel /* print link status if flag set */ 3758ce8d5614SIntel if (print_flag == 1) { 3759ba5509a6SIvan Dyukov rte_eth_link_to_str(link_status, 3760ba5509a6SIvan Dyukov sizeof(link_status), &link); 3761ba5509a6SIvan Dyukov printf("Port %d %s\n", portid, link_status); 3762ce8d5614SIntel continue; 3763ce8d5614SIntel } 3764ce8d5614SIntel /* clear all_ports_up flag if any link down */ 3765295968d1SFerruh Yigit if (link.link_status == RTE_ETH_LINK_DOWN) { 3766ce8d5614SIntel all_ports_up = 0; 3767ce8d5614SIntel break; 3768ce8d5614SIntel } 3769ce8d5614SIntel } 3770ce8d5614SIntel /* after finally printing all link status, get out */ 3771ce8d5614SIntel if (print_flag == 1) 3772ce8d5614SIntel break; 3773ce8d5614SIntel 3774ce8d5614SIntel if (all_ports_up == 0) { 3775ce8d5614SIntel fflush(stdout); 3776ce8d5614SIntel rte_delay_ms(CHECK_INTERVAL); 3777ce8d5614SIntel } 3778ce8d5614SIntel 3779ce8d5614SIntel /* set the print_flag if all ports up or timeout */ 3780ce8d5614SIntel if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) { 3781ce8d5614SIntel print_flag = 1; 3782ce8d5614SIntel } 37838ea656f8SGaetan Rivet 37848ea656f8SGaetan Rivet if (lsc_interrupt) 37858ea656f8SGaetan Rivet break; 3786ce8d5614SIntel } 3787af75078fSIntel } 3788af75078fSIntel 3789284c908cSGaetan Rivet static void 3790cc1bf307SJeff Guo rmv_port_callback(void *arg) 3791284c908cSGaetan Rivet { 37923b97888aSMatan Azrad int need_to_start = 0; 37930da2a62bSMatan Azrad int org_no_link_check = no_link_check; 379428caa76aSZhiyong Yang portid_t port_id = (intptr_t)arg; 37950a0821bcSPaulis Gributs struct rte_eth_dev_info dev_info; 37960a0821bcSPaulis Gributs int ret; 3797284c908cSGaetan Rivet 3798284c908cSGaetan Rivet RTE_ETH_VALID_PORTID_OR_RET(port_id); 3799284c908cSGaetan Rivet 38003b97888aSMatan Azrad if (!test_done && port_is_forwarding(port_id)) { 38013b97888aSMatan Azrad need_to_start = 1; 38023b97888aSMatan Azrad stop_packet_forwarding(); 38033b97888aSMatan Azrad } 38040da2a62bSMatan Azrad no_link_check = 1; 3805284c908cSGaetan Rivet stop_port(port_id); 38060da2a62bSMatan Azrad no_link_check = org_no_link_check; 38070654d4a8SThomas Monjalon 38080a0821bcSPaulis Gributs ret = eth_dev_info_get_print_err(port_id, &dev_info); 38090a0821bcSPaulis Gributs if (ret != 0) 38100a0821bcSPaulis Gributs TESTPMD_LOG(ERR, 38110a0821bcSPaulis Gributs "Failed to get device info for port %d, not detaching\n", 38120a0821bcSPaulis Gributs port_id); 3813e1d38504SPaulis Gributs else { 3814e1d38504SPaulis Gributs struct rte_device *device = dev_info.device; 3815e1d38504SPaulis Gributs close_port(port_id); 3816e1d38504SPaulis Gributs detach_device(device); /* might be already removed or have more ports */ 3817e1d38504SPaulis Gributs } 38183b97888aSMatan Azrad if (need_to_start) 38193b97888aSMatan Azrad start_packet_forwarding(0); 3820284c908cSGaetan Rivet } 3821284c908cSGaetan Rivet 382276ad4a2dSGaetan Rivet /* This function is used by the interrupt thread */ 3823d6af1a13SBernard Iremonger static int 3824f8244c63SZhiyong Yang eth_event_callback(portid_t port_id, enum rte_eth_event_type type, void *param, 3825d6af1a13SBernard Iremonger void *ret_param) 382676ad4a2dSGaetan Rivet { 382776ad4a2dSGaetan Rivet RTE_SET_USED(param); 3828d6af1a13SBernard Iremonger RTE_SET_USED(ret_param); 382976ad4a2dSGaetan Rivet 383076ad4a2dSGaetan Rivet if (type >= RTE_ETH_EVENT_MAX) { 383161a3b0e5SAndrew Rybchenko fprintf(stderr, 383261a3b0e5SAndrew Rybchenko "\nPort %" PRIu16 ": %s called upon invalid event %d\n", 383376ad4a2dSGaetan Rivet port_id, __func__, type); 383476ad4a2dSGaetan Rivet fflush(stderr); 38353af72783SGaetan Rivet } else if (event_print_mask & (UINT32_C(1) << type)) { 3836f431e010SHerakliusz Lipiec printf("\nPort %" PRIu16 ": %s event\n", port_id, 383797b5d8b5SThomas Monjalon eth_event_desc[type]); 383876ad4a2dSGaetan Rivet fflush(stdout); 383976ad4a2dSGaetan Rivet } 3840284c908cSGaetan Rivet 3841284c908cSGaetan Rivet switch (type) { 38424f1ed78eSThomas Monjalon case RTE_ETH_EVENT_NEW: 38434f1ed78eSThomas Monjalon ports[port_id].need_setup = 1; 38444f1ed78eSThomas Monjalon ports[port_id].port_status = RTE_PORT_HANDLING; 38454f1ed78eSThomas Monjalon break; 3846284c908cSGaetan Rivet case RTE_ETH_EVENT_INTR_RMV: 38474f1ed78eSThomas Monjalon if (port_id_is_invalid(port_id, DISABLED_WARN)) 38484f1ed78eSThomas Monjalon break; 3849284c908cSGaetan Rivet if (rte_eal_alarm_set(100000, 3850cc1bf307SJeff Guo rmv_port_callback, (void *)(intptr_t)port_id)) 385161a3b0e5SAndrew Rybchenko fprintf(stderr, 385261a3b0e5SAndrew Rybchenko "Could not set up deferred device removal\n"); 3853284c908cSGaetan Rivet break; 385485c6571cSThomas Monjalon case RTE_ETH_EVENT_DESTROY: 385585c6571cSThomas Monjalon ports[port_id].port_status = RTE_PORT_CLOSED; 385685c6571cSThomas Monjalon printf("Port %u is closed\n", port_id); 385785c6571cSThomas Monjalon break; 3858bc70e559SSpike Du case RTE_ETH_EVENT_RX_AVAIL_THRESH: { 3859bc70e559SSpike Du uint16_t rxq_id; 3860bc70e559SSpike Du int ret; 3861bc70e559SSpike Du 3862bc70e559SSpike Du /* avail_thresh query API rewinds rxq_id, no need to check max RxQ num */ 3863bc70e559SSpike Du for (rxq_id = 0; ; rxq_id++) { 3864bc70e559SSpike Du ret = rte_eth_rx_avail_thresh_query(port_id, &rxq_id, 3865bc70e559SSpike Du NULL); 3866bc70e559SSpike Du if (ret <= 0) 3867bc70e559SSpike Du break; 3868bc70e559SSpike Du printf("Received avail_thresh event, port: %u, rxq_id: %u\n", 3869bc70e559SSpike Du port_id, rxq_id); 3870f41a5092SSpike Du 3871f41a5092SSpike Du #ifdef RTE_NET_MLX5 3872f41a5092SSpike Du mlx5_test_avail_thresh_event_handler(port_id, rxq_id); 3873f41a5092SSpike Du #endif 3874bc70e559SSpike Du } 3875bc70e559SSpike Du break; 3876bc70e559SSpike Du } 3877284c908cSGaetan Rivet default: 3878284c908cSGaetan Rivet break; 3879284c908cSGaetan Rivet } 3880d6af1a13SBernard Iremonger return 0; 388176ad4a2dSGaetan Rivet } 388276ad4a2dSGaetan Rivet 388397b5d8b5SThomas Monjalon static int 388497b5d8b5SThomas Monjalon register_eth_event_callback(void) 388597b5d8b5SThomas Monjalon { 388697b5d8b5SThomas Monjalon int ret; 388797b5d8b5SThomas Monjalon enum rte_eth_event_type event; 388897b5d8b5SThomas Monjalon 388997b5d8b5SThomas Monjalon for (event = RTE_ETH_EVENT_UNKNOWN; 389097b5d8b5SThomas Monjalon event < RTE_ETH_EVENT_MAX; event++) { 389197b5d8b5SThomas Monjalon ret = rte_eth_dev_callback_register(RTE_ETH_ALL, 389297b5d8b5SThomas Monjalon event, 389397b5d8b5SThomas Monjalon eth_event_callback, 389497b5d8b5SThomas Monjalon NULL); 389597b5d8b5SThomas Monjalon if (ret != 0) { 389697b5d8b5SThomas Monjalon TESTPMD_LOG(ERR, "Failed to register callback for " 389797b5d8b5SThomas Monjalon "%s event\n", eth_event_desc[event]); 389897b5d8b5SThomas Monjalon return -1; 389997b5d8b5SThomas Monjalon } 390097b5d8b5SThomas Monjalon } 390197b5d8b5SThomas Monjalon 390297b5d8b5SThomas Monjalon return 0; 390397b5d8b5SThomas Monjalon } 390497b5d8b5SThomas Monjalon 3905fb73e096SJeff Guo /* This function is used by the interrupt thread */ 3906fb73e096SJeff Guo static void 3907cc1bf307SJeff Guo dev_event_callback(const char *device_name, enum rte_dev_event_type type, 3908fb73e096SJeff Guo __rte_unused void *arg) 3909fb73e096SJeff Guo { 39102049c511SJeff Guo uint16_t port_id; 39112049c511SJeff Guo int ret; 39122049c511SJeff Guo 3913fb73e096SJeff Guo if (type >= RTE_DEV_EVENT_MAX) { 3914fb73e096SJeff Guo fprintf(stderr, "%s called upon invalid event %d\n", 3915fb73e096SJeff Guo __func__, type); 3916fb73e096SJeff Guo fflush(stderr); 3917fb73e096SJeff Guo } 3918fb73e096SJeff Guo 3919fb73e096SJeff Guo switch (type) { 3920fb73e096SJeff Guo case RTE_DEV_EVENT_REMOVE: 3921cc1bf307SJeff Guo RTE_LOG(DEBUG, EAL, "The device: %s has been removed!\n", 3922fb73e096SJeff Guo device_name); 39232049c511SJeff Guo ret = rte_eth_dev_get_port_by_name(device_name, &port_id); 39242049c511SJeff Guo if (ret) { 39252049c511SJeff Guo RTE_LOG(ERR, EAL, "can not get port by device %s!\n", 39262049c511SJeff Guo device_name); 39272049c511SJeff Guo return; 39282049c511SJeff Guo } 3929cc1bf307SJeff Guo /* 3930cc1bf307SJeff Guo * Because the user's callback is invoked in eal interrupt 3931cc1bf307SJeff Guo * callback, the interrupt callback need to be finished before 3932cc1bf307SJeff Guo * it can be unregistered when detaching device. So finish 3933cc1bf307SJeff Guo * callback soon and use a deferred removal to detach device 3934cc1bf307SJeff Guo * is need. It is a workaround, once the device detaching be 3935cc1bf307SJeff Guo * moved into the eal in the future, the deferred removal could 3936cc1bf307SJeff Guo * be deleted. 3937cc1bf307SJeff Guo */ 3938cc1bf307SJeff Guo if (rte_eal_alarm_set(100000, 3939cc1bf307SJeff Guo rmv_port_callback, (void *)(intptr_t)port_id)) 3940cc1bf307SJeff Guo RTE_LOG(ERR, EAL, 3941cc1bf307SJeff Guo "Could not set up deferred device removal\n"); 3942fb73e096SJeff Guo break; 3943fb73e096SJeff Guo case RTE_DEV_EVENT_ADD: 3944fb73e096SJeff Guo RTE_LOG(ERR, EAL, "The device: %s has been added!\n", 3945fb73e096SJeff Guo device_name); 3946fb73e096SJeff Guo /* TODO: After finish kernel driver binding, 3947fb73e096SJeff Guo * begin to attach port. 3948fb73e096SJeff Guo */ 3949fb73e096SJeff Guo break; 3950fb73e096SJeff Guo default: 3951fb73e096SJeff Guo break; 3952fb73e096SJeff Guo } 3953fb73e096SJeff Guo } 3954fb73e096SJeff Guo 3955f2c5125aSPablo de Lara static void 3956f4d178c1SXueming Li rxtx_port_config(portid_t pid) 3957f2c5125aSPablo de Lara { 3958d44f8a48SQi Zhang uint16_t qid; 39595e91aeefSWei Zhao uint64_t offloads; 3960f4d178c1SXueming Li struct rte_port *port = &ports[pid]; 3961f2c5125aSPablo de Lara 3962d44f8a48SQi Zhang for (qid = 0; qid < nb_rxq; qid++) { 39633c4426dbSDmitry Kozlyuk offloads = port->rxq[qid].conf.offloads; 39643c4426dbSDmitry Kozlyuk port->rxq[qid].conf = port->dev_info.default_rxconf; 3965f4d178c1SXueming Li 3966f4d178c1SXueming Li if (rxq_share > 0 && 3967f4d178c1SXueming Li (port->dev_info.dev_capa & RTE_ETH_DEV_CAPA_RXQ_SHARE)) { 3968f4d178c1SXueming Li /* Non-zero share group to enable RxQ share. */ 39693c4426dbSDmitry Kozlyuk port->rxq[qid].conf.share_group = pid / rxq_share + 1; 39703c4426dbSDmitry Kozlyuk port->rxq[qid].conf.share_qid = qid; /* Equal mapping. */ 3971f4d178c1SXueming Li } 3972f4d178c1SXueming Li 3973575e0fd1SWei Zhao if (offloads != 0) 39743c4426dbSDmitry Kozlyuk port->rxq[qid].conf.offloads = offloads; 3975d44f8a48SQi Zhang 3976d44f8a48SQi Zhang /* Check if any Rx parameters have been passed */ 3977f2c5125aSPablo de Lara if (rx_pthresh != RTE_PMD_PARAM_UNSET) 39783c4426dbSDmitry Kozlyuk port->rxq[qid].conf.rx_thresh.pthresh = rx_pthresh; 3979f2c5125aSPablo de Lara 3980f2c5125aSPablo de Lara if (rx_hthresh != RTE_PMD_PARAM_UNSET) 39813c4426dbSDmitry Kozlyuk port->rxq[qid].conf.rx_thresh.hthresh = rx_hthresh; 3982f2c5125aSPablo de Lara 3983f2c5125aSPablo de Lara if (rx_wthresh != RTE_PMD_PARAM_UNSET) 39843c4426dbSDmitry Kozlyuk port->rxq[qid].conf.rx_thresh.wthresh = rx_wthresh; 3985f2c5125aSPablo de Lara 3986f2c5125aSPablo de Lara if (rx_free_thresh != RTE_PMD_PARAM_UNSET) 39873c4426dbSDmitry Kozlyuk port->rxq[qid].conf.rx_free_thresh = rx_free_thresh; 3988f2c5125aSPablo de Lara 3989f2c5125aSPablo de Lara if (rx_drop_en != RTE_PMD_PARAM_UNSET) 39903c4426dbSDmitry Kozlyuk port->rxq[qid].conf.rx_drop_en = rx_drop_en; 3991f2c5125aSPablo de Lara 3992d44f8a48SQi Zhang port->nb_rx_desc[qid] = nb_rxd; 3993d44f8a48SQi Zhang } 3994d44f8a48SQi Zhang 3995d44f8a48SQi Zhang for (qid = 0; qid < nb_txq; qid++) { 39963c4426dbSDmitry Kozlyuk offloads = port->txq[qid].conf.offloads; 39973c4426dbSDmitry Kozlyuk port->txq[qid].conf = port->dev_info.default_txconf; 3998575e0fd1SWei Zhao if (offloads != 0) 39993c4426dbSDmitry Kozlyuk port->txq[qid].conf.offloads = offloads; 4000d44f8a48SQi Zhang 4001d44f8a48SQi Zhang /* Check if any Tx parameters have been passed */ 4002f2c5125aSPablo de Lara if (tx_pthresh != RTE_PMD_PARAM_UNSET) 40033c4426dbSDmitry Kozlyuk port->txq[qid].conf.tx_thresh.pthresh = tx_pthresh; 4004f2c5125aSPablo de Lara 4005f2c5125aSPablo de Lara if (tx_hthresh != RTE_PMD_PARAM_UNSET) 40063c4426dbSDmitry Kozlyuk port->txq[qid].conf.tx_thresh.hthresh = tx_hthresh; 4007f2c5125aSPablo de Lara 4008f2c5125aSPablo de Lara if (tx_wthresh != RTE_PMD_PARAM_UNSET) 40093c4426dbSDmitry Kozlyuk port->txq[qid].conf.tx_thresh.wthresh = tx_wthresh; 4010f2c5125aSPablo de Lara 4011f2c5125aSPablo de Lara if (tx_rs_thresh != RTE_PMD_PARAM_UNSET) 40123c4426dbSDmitry Kozlyuk port->txq[qid].conf.tx_rs_thresh = tx_rs_thresh; 4013f2c5125aSPablo de Lara 4014f2c5125aSPablo de Lara if (tx_free_thresh != RTE_PMD_PARAM_UNSET) 40153c4426dbSDmitry Kozlyuk port->txq[qid].conf.tx_free_thresh = tx_free_thresh; 4016d44f8a48SQi Zhang 4017d44f8a48SQi Zhang port->nb_tx_desc[qid] = nb_txd; 4018d44f8a48SQi Zhang } 4019f2c5125aSPablo de Lara } 4020f2c5125aSPablo de Lara 40210c4abd36SSteve Yang /* 4022b563c142SFerruh Yigit * Helper function to set MTU from frame size 40230c4abd36SSteve Yang * 40240c4abd36SSteve Yang * port->dev_info should be set before calling this function. 40250c4abd36SSteve Yang * 40260c4abd36SSteve Yang * return 0 on success, negative on error 40270c4abd36SSteve Yang */ 40280c4abd36SSteve Yang int 4029b563c142SFerruh Yigit update_mtu_from_frame_size(portid_t portid, uint32_t max_rx_pktlen) 40300c4abd36SSteve Yang { 40310c4abd36SSteve Yang struct rte_port *port = &ports[portid]; 40320c4abd36SSteve Yang uint32_t eth_overhead; 40331bb4a528SFerruh Yigit uint16_t mtu, new_mtu; 40340c4abd36SSteve Yang 40351bb4a528SFerruh Yigit eth_overhead = get_eth_overhead(&port->dev_info); 40361bb4a528SFerruh Yigit 40371bb4a528SFerruh Yigit if (rte_eth_dev_get_mtu(portid, &mtu) != 0) { 40381bb4a528SFerruh Yigit printf("Failed to get MTU for port %u\n", portid); 40391bb4a528SFerruh Yigit return -1; 40401bb4a528SFerruh Yigit } 40411bb4a528SFerruh Yigit 40421bb4a528SFerruh Yigit new_mtu = max_rx_pktlen - eth_overhead; 40430c4abd36SSteve Yang 40441bb4a528SFerruh Yigit if (mtu == new_mtu) 40451bb4a528SFerruh Yigit return 0; 40461bb4a528SFerruh Yigit 40471bb4a528SFerruh Yigit if (eth_dev_set_mtu_mp(portid, new_mtu) != 0) { 404861a3b0e5SAndrew Rybchenko fprintf(stderr, 404961a3b0e5SAndrew Rybchenko "Failed to set MTU to %u for port %u\n", 40501bb4a528SFerruh Yigit new_mtu, portid); 40511bb4a528SFerruh Yigit return -1; 40520c4abd36SSteve Yang } 40530c4abd36SSteve Yang 40541bb4a528SFerruh Yigit port->dev_conf.rxmode.mtu = new_mtu; 40551bb4a528SFerruh Yigit 40560c4abd36SSteve Yang return 0; 40570c4abd36SSteve Yang } 40580c4abd36SSteve Yang 4059013af9b6SIntel void 4060013af9b6SIntel init_port_config(void) 4061013af9b6SIntel { 4062013af9b6SIntel portid_t pid; 4063013af9b6SIntel struct rte_port *port; 4064655eae01SJie Wang int ret, i; 4065013af9b6SIntel 40667d89b261SGaetan Rivet RTE_ETH_FOREACH_DEV(pid) { 4067013af9b6SIntel port = &ports[pid]; 40686f51deb9SIvan Ilchenko 40696f51deb9SIvan Ilchenko ret = eth_dev_info_get_print_err(pid, &port->dev_info); 40706f51deb9SIvan Ilchenko if (ret != 0) 40716f51deb9SIvan Ilchenko return; 40726f51deb9SIvan Ilchenko 40733ce690d3SBruce Richardson if (nb_rxq > 1) { 4074013af9b6SIntel port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL; 407590892962SQi Zhang port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 4076422515b9SAdrien Mazarguil rss_hf & port->dev_info.flow_type_rss_offloads; 4077af75078fSIntel } else { 4078013af9b6SIntel port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL; 4079013af9b6SIntel port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0; 4080af75078fSIntel } 40813ce690d3SBruce Richardson 40825f592039SJingjing Wu if (port->dcb_flag == 0) { 4083655eae01SJie Wang if (port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0) { 4084f9295aa2SXiaoyu Min port->dev_conf.rxmode.mq_mode = 4085f9295aa2SXiaoyu Min (enum rte_eth_rx_mq_mode) 4086295968d1SFerruh Yigit (rx_mq_mode & RTE_ETH_MQ_RX_RSS); 4087655eae01SJie Wang } else { 4088295968d1SFerruh Yigit port->dev_conf.rxmode.mq_mode = RTE_ETH_MQ_RX_NONE; 4089655eae01SJie Wang port->dev_conf.rxmode.offloads &= 4090295968d1SFerruh Yigit ~RTE_ETH_RX_OFFLOAD_RSS_HASH; 4091655eae01SJie Wang 4092655eae01SJie Wang for (i = 0; 4093655eae01SJie Wang i < port->dev_info.nb_rx_queues; 4094655eae01SJie Wang i++) 40953c4426dbSDmitry Kozlyuk port->rxq[i].conf.offloads &= 4096295968d1SFerruh Yigit ~RTE_ETH_RX_OFFLOAD_RSS_HASH; 4097655eae01SJie Wang } 40983ce690d3SBruce Richardson } 40993ce690d3SBruce Richardson 4100f4d178c1SXueming Li rxtx_port_config(pid); 4101013af9b6SIntel 4102a5279d25SIgor Romanov ret = eth_macaddr_get_print_err(pid, &port->eth_addr); 4103a5279d25SIgor Romanov if (ret != 0) 4104a5279d25SIgor Romanov return; 4105013af9b6SIntel 41060a0821bcSPaulis Gributs if (lsc_interrupt && (*port->dev_info.dev_flags & RTE_ETH_DEV_INTR_LSC)) 41078ea656f8SGaetan Rivet port->dev_conf.intr_conf.lsc = 1; 41080a0821bcSPaulis Gributs if (rmv_interrupt && (*port->dev_info.dev_flags & RTE_ETH_DEV_INTR_RMV)) 4109284c908cSGaetan Rivet port->dev_conf.intr_conf.rmv = 1; 4110013af9b6SIntel } 4111013af9b6SIntel } 4112013af9b6SIntel 411341b05095SBernard Iremonger void set_port_slave_flag(portid_t slave_pid) 411441b05095SBernard Iremonger { 411541b05095SBernard Iremonger struct rte_port *port; 411641b05095SBernard Iremonger 411741b05095SBernard Iremonger port = &ports[slave_pid]; 411841b05095SBernard Iremonger port->slave_flag = 1; 411941b05095SBernard Iremonger } 412041b05095SBernard Iremonger 412141b05095SBernard Iremonger void clear_port_slave_flag(portid_t slave_pid) 412241b05095SBernard Iremonger { 412341b05095SBernard Iremonger struct rte_port *port; 412441b05095SBernard Iremonger 412541b05095SBernard Iremonger port = &ports[slave_pid]; 412641b05095SBernard Iremonger port->slave_flag = 0; 412741b05095SBernard Iremonger } 412841b05095SBernard Iremonger 41290e545d30SBernard Iremonger uint8_t port_is_bonding_slave(portid_t slave_pid) 41300e545d30SBernard Iremonger { 41310e545d30SBernard Iremonger struct rte_port *port; 41320a0821bcSPaulis Gributs struct rte_eth_dev_info dev_info; 41330a0821bcSPaulis Gributs int ret; 41340e545d30SBernard Iremonger 41350e545d30SBernard Iremonger port = &ports[slave_pid]; 41360a0821bcSPaulis Gributs ret = eth_dev_info_get_print_err(slave_pid, &dev_info); 41370a0821bcSPaulis Gributs if (ret != 0) { 41380a0821bcSPaulis Gributs TESTPMD_LOG(ERR, 41390a0821bcSPaulis Gributs "Failed to get device info for port id %d," 41400a0821bcSPaulis Gributs "cannot determine if the port is a bonded slave", 41410a0821bcSPaulis Gributs slave_pid); 41420a0821bcSPaulis Gributs return 0; 41430a0821bcSPaulis Gributs } 41440a0821bcSPaulis Gributs if ((*dev_info.dev_flags & RTE_ETH_DEV_BONDED_SLAVE) || (port->slave_flag == 1)) 4145b8b8b344SMatan Azrad return 1; 4146b8b8b344SMatan Azrad return 0; 41470e545d30SBernard Iremonger } 41480e545d30SBernard Iremonger 4149013af9b6SIntel const uint16_t vlan_tags[] = { 4150013af9b6SIntel 0, 1, 2, 3, 4, 5, 6, 7, 4151013af9b6SIntel 8, 9, 10, 11, 12, 13, 14, 15, 4152013af9b6SIntel 16, 17, 18, 19, 20, 21, 22, 23, 4153013af9b6SIntel 24, 25, 26, 27, 28, 29, 30, 31 4154013af9b6SIntel }; 4155013af9b6SIntel 4156013af9b6SIntel static int 4157ac7c491cSKonstantin Ananyev get_eth_dcb_conf(portid_t pid, struct rte_eth_conf *eth_conf, 41581a572499SJingjing Wu enum dcb_mode_enable dcb_mode, 41591a572499SJingjing Wu enum rte_eth_nb_tcs num_tcs, 41601a572499SJingjing Wu uint8_t pfc_en) 4161013af9b6SIntel { 4162013af9b6SIntel uint8_t i; 4163ac7c491cSKonstantin Ananyev int32_t rc; 4164ac7c491cSKonstantin Ananyev struct rte_eth_rss_conf rss_conf; 4165af75078fSIntel 4166af75078fSIntel /* 4167013af9b6SIntel * Builds up the correct configuration for dcb+vt based on the vlan tags array 4168013af9b6SIntel * given above, and the number of traffic classes available for use. 4169af75078fSIntel */ 41701a572499SJingjing Wu if (dcb_mode == DCB_VT_ENABLED) { 41711a572499SJingjing Wu struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf = 41721a572499SJingjing Wu ð_conf->rx_adv_conf.vmdq_dcb_conf; 41731a572499SJingjing Wu struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf = 41741a572499SJingjing Wu ð_conf->tx_adv_conf.vmdq_dcb_tx_conf; 4175013af9b6SIntel 4176547d946cSNirmoy Das /* VMDQ+DCB RX and TX configurations */ 41771a572499SJingjing Wu vmdq_rx_conf->enable_default_pool = 0; 41781a572499SJingjing Wu vmdq_rx_conf->default_pool = 0; 41791a572499SJingjing Wu vmdq_rx_conf->nb_queue_pools = 4180295968d1SFerruh Yigit (num_tcs == RTE_ETH_4_TCS ? RTE_ETH_32_POOLS : RTE_ETH_16_POOLS); 41811a572499SJingjing Wu vmdq_tx_conf->nb_queue_pools = 4182295968d1SFerruh Yigit (num_tcs == RTE_ETH_4_TCS ? RTE_ETH_32_POOLS : RTE_ETH_16_POOLS); 4183013af9b6SIntel 41841a572499SJingjing Wu vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools; 41851a572499SJingjing Wu for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) { 41861a572499SJingjing Wu vmdq_rx_conf->pool_map[i].vlan_id = vlan_tags[i]; 41871a572499SJingjing Wu vmdq_rx_conf->pool_map[i].pools = 41881a572499SJingjing Wu 1 << (i % vmdq_rx_conf->nb_queue_pools); 4189af75078fSIntel } 4190295968d1SFerruh Yigit for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++) { 4191f59908feSWei Dai vmdq_rx_conf->dcb_tc[i] = i % num_tcs; 4192f59908feSWei Dai vmdq_tx_conf->dcb_tc[i] = i % num_tcs; 4193013af9b6SIntel } 4194013af9b6SIntel 4195013af9b6SIntel /* set DCB mode of RX and TX of multiple queues */ 4196f9295aa2SXiaoyu Min eth_conf->rxmode.mq_mode = 4197f9295aa2SXiaoyu Min (enum rte_eth_rx_mq_mode) 4198295968d1SFerruh Yigit (rx_mq_mode & RTE_ETH_MQ_RX_VMDQ_DCB); 4199295968d1SFerruh Yigit eth_conf->txmode.mq_mode = RTE_ETH_MQ_TX_VMDQ_DCB; 42001a572499SJingjing Wu } else { 42011a572499SJingjing Wu struct rte_eth_dcb_rx_conf *rx_conf = 42021a572499SJingjing Wu ð_conf->rx_adv_conf.dcb_rx_conf; 42031a572499SJingjing Wu struct rte_eth_dcb_tx_conf *tx_conf = 42041a572499SJingjing Wu ð_conf->tx_adv_conf.dcb_tx_conf; 4205013af9b6SIntel 42065139bc12STing Xu memset(&rss_conf, 0, sizeof(struct rte_eth_rss_conf)); 42075139bc12STing Xu 4208ac7c491cSKonstantin Ananyev rc = rte_eth_dev_rss_hash_conf_get(pid, &rss_conf); 4209ac7c491cSKonstantin Ananyev if (rc != 0) 4210ac7c491cSKonstantin Ananyev return rc; 4211ac7c491cSKonstantin Ananyev 42121a572499SJingjing Wu rx_conf->nb_tcs = num_tcs; 42131a572499SJingjing Wu tx_conf->nb_tcs = num_tcs; 42141a572499SJingjing Wu 4215295968d1SFerruh Yigit for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++) { 4216bcd0e432SJingjing Wu rx_conf->dcb_tc[i] = i % num_tcs; 4217bcd0e432SJingjing Wu tx_conf->dcb_tc[i] = i % num_tcs; 4218013af9b6SIntel } 4219ac7c491cSKonstantin Ananyev 4220f9295aa2SXiaoyu Min eth_conf->rxmode.mq_mode = 4221f9295aa2SXiaoyu Min (enum rte_eth_rx_mq_mode) 4222295968d1SFerruh Yigit (rx_mq_mode & RTE_ETH_MQ_RX_DCB_RSS); 4223ac7c491cSKonstantin Ananyev eth_conf->rx_adv_conf.rss_conf = rss_conf; 4224295968d1SFerruh Yigit eth_conf->txmode.mq_mode = RTE_ETH_MQ_TX_DCB; 42251a572499SJingjing Wu } 42261a572499SJingjing Wu 42271a572499SJingjing Wu if (pfc_en) 42281a572499SJingjing Wu eth_conf->dcb_capability_en = 4229295968d1SFerruh Yigit RTE_ETH_DCB_PG_SUPPORT | RTE_ETH_DCB_PFC_SUPPORT; 4230013af9b6SIntel else 4231295968d1SFerruh Yigit eth_conf->dcb_capability_en = RTE_ETH_DCB_PG_SUPPORT; 4232013af9b6SIntel 4233013af9b6SIntel return 0; 4234013af9b6SIntel } 4235013af9b6SIntel 4236013af9b6SIntel int 42371a572499SJingjing Wu init_port_dcb_config(portid_t pid, 42381a572499SJingjing Wu enum dcb_mode_enable dcb_mode, 42391a572499SJingjing Wu enum rte_eth_nb_tcs num_tcs, 42401a572499SJingjing Wu uint8_t pfc_en) 4241013af9b6SIntel { 4242013af9b6SIntel struct rte_eth_conf port_conf; 4243013af9b6SIntel struct rte_port *rte_port; 4244013af9b6SIntel int retval; 4245013af9b6SIntel uint16_t i; 4246013af9b6SIntel 4247a550baf2SMin Hu (Connor) if (num_procs > 1) { 4248a550baf2SMin Hu (Connor) printf("The multi-process feature doesn't support dcb.\n"); 4249a550baf2SMin Hu (Connor) return -ENOTSUP; 4250a550baf2SMin Hu (Connor) } 42512a977b89SWenzhuo Lu rte_port = &ports[pid]; 4252013af9b6SIntel 4253c1ba6c32SHuisong Li /* retain the original device configuration. */ 4254c1ba6c32SHuisong Li memcpy(&port_conf, &rte_port->dev_conf, sizeof(struct rte_eth_conf)); 4255d5354e89SYanglong Wu 4256013af9b6SIntel /*set configuration of DCB in vt mode and DCB in non-vt mode*/ 4257ac7c491cSKonstantin Ananyev retval = get_eth_dcb_conf(pid, &port_conf, dcb_mode, num_tcs, pfc_en); 4258013af9b6SIntel if (retval < 0) 4259013af9b6SIntel return retval; 4260295968d1SFerruh Yigit port_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_VLAN_FILTER; 4261cbe70fdeSJie Wang /* remove RSS HASH offload for DCB in vt mode */ 4262cbe70fdeSJie Wang if (port_conf.rxmode.mq_mode == RTE_ETH_MQ_RX_VMDQ_DCB) { 4263cbe70fdeSJie Wang port_conf.rxmode.offloads &= ~RTE_ETH_RX_OFFLOAD_RSS_HASH; 4264cbe70fdeSJie Wang for (i = 0; i < nb_rxq; i++) 42653c4426dbSDmitry Kozlyuk rte_port->rxq[i].conf.offloads &= 4266cbe70fdeSJie Wang ~RTE_ETH_RX_OFFLOAD_RSS_HASH; 4267cbe70fdeSJie Wang } 4268013af9b6SIntel 42692f203d44SQi Zhang /* re-configure the device . */ 42702b0e0ebaSChenbo Xia retval = rte_eth_dev_configure(pid, nb_rxq, nb_rxq, &port_conf); 42712b0e0ebaSChenbo Xia if (retval < 0) 42722b0e0ebaSChenbo Xia return retval; 42736f51deb9SIvan Ilchenko 42746f51deb9SIvan Ilchenko retval = eth_dev_info_get_print_err(pid, &rte_port->dev_info); 42756f51deb9SIvan Ilchenko if (retval != 0) 42766f51deb9SIvan Ilchenko return retval; 42772a977b89SWenzhuo Lu 42782a977b89SWenzhuo Lu /* If dev_info.vmdq_pool_base is greater than 0, 42792a977b89SWenzhuo Lu * the queue id of vmdq pools is started after pf queues. 42802a977b89SWenzhuo Lu */ 42812a977b89SWenzhuo Lu if (dcb_mode == DCB_VT_ENABLED && 42822a977b89SWenzhuo Lu rte_port->dev_info.vmdq_pool_base > 0) { 428361a3b0e5SAndrew Rybchenko fprintf(stderr, 428461a3b0e5SAndrew Rybchenko "VMDQ_DCB multi-queue mode is nonsensical for port %d.\n", 428561a3b0e5SAndrew Rybchenko pid); 42862a977b89SWenzhuo Lu return -1; 42872a977b89SWenzhuo Lu } 42882a977b89SWenzhuo Lu 42892a977b89SWenzhuo Lu /* Assume the ports in testpmd have the same dcb capability 42902a977b89SWenzhuo Lu * and has the same number of rxq and txq in dcb mode 42912a977b89SWenzhuo Lu */ 42922a977b89SWenzhuo Lu if (dcb_mode == DCB_VT_ENABLED) { 429386ef65eeSBernard Iremonger if (rte_port->dev_info.max_vfs > 0) { 429486ef65eeSBernard Iremonger nb_rxq = rte_port->dev_info.nb_rx_queues; 429586ef65eeSBernard Iremonger nb_txq = rte_port->dev_info.nb_tx_queues; 429686ef65eeSBernard Iremonger } else { 42972a977b89SWenzhuo Lu nb_rxq = rte_port->dev_info.max_rx_queues; 42982a977b89SWenzhuo Lu nb_txq = rte_port->dev_info.max_tx_queues; 429986ef65eeSBernard Iremonger } 43002a977b89SWenzhuo Lu } else { 43012a977b89SWenzhuo Lu /*if vt is disabled, use all pf queues */ 43022a977b89SWenzhuo Lu if (rte_port->dev_info.vmdq_pool_base == 0) { 43032a977b89SWenzhuo Lu nb_rxq = rte_port->dev_info.max_rx_queues; 43042a977b89SWenzhuo Lu nb_txq = rte_port->dev_info.max_tx_queues; 43052a977b89SWenzhuo Lu } else { 43062a977b89SWenzhuo Lu nb_rxq = (queueid_t)num_tcs; 43072a977b89SWenzhuo Lu nb_txq = (queueid_t)num_tcs; 43082a977b89SWenzhuo Lu 43092a977b89SWenzhuo Lu } 43102a977b89SWenzhuo Lu } 43112a977b89SWenzhuo Lu rx_free_thresh = 64; 43122a977b89SWenzhuo Lu 4313013af9b6SIntel memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf)); 4314013af9b6SIntel 4315f4d178c1SXueming Li rxtx_port_config(pid); 4316013af9b6SIntel /* VLAN filter */ 4317295968d1SFerruh Yigit rte_port->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_VLAN_FILTER; 43181a572499SJingjing Wu for (i = 0; i < RTE_DIM(vlan_tags); i++) 4319013af9b6SIntel rx_vft_set(pid, vlan_tags[i], 1); 4320013af9b6SIntel 4321a5279d25SIgor Romanov retval = eth_macaddr_get_print_err(pid, &rte_port->eth_addr); 4322a5279d25SIgor Romanov if (retval != 0) 4323a5279d25SIgor Romanov return retval; 4324a5279d25SIgor Romanov 43257741e4cfSIntel rte_port->dcb_flag = 1; 43267741e4cfSIntel 4327a690a070SHuisong Li /* Enter DCB configuration status */ 4328a690a070SHuisong Li dcb_config = 1; 4329a690a070SHuisong Li 4330013af9b6SIntel return 0; 4331af75078fSIntel } 4332af75078fSIntel 4333ffc468ffSTetsuya Mukawa static void 4334ffc468ffSTetsuya Mukawa init_port(void) 4335ffc468ffSTetsuya Mukawa { 43361b9f2746SGregory Etelson int i; 43371b9f2746SGregory Etelson 4338ffc468ffSTetsuya Mukawa /* Configuration of Ethernet ports. */ 4339ffc468ffSTetsuya Mukawa ports = rte_zmalloc("testpmd: ports", 4340ffc468ffSTetsuya Mukawa sizeof(struct rte_port) * RTE_MAX_ETHPORTS, 4341ffc468ffSTetsuya Mukawa RTE_CACHE_LINE_SIZE); 4342ffc468ffSTetsuya Mukawa if (ports == NULL) { 4343ffc468ffSTetsuya Mukawa rte_exit(EXIT_FAILURE, 4344ffc468ffSTetsuya Mukawa "rte_zmalloc(%d struct rte_port) failed\n", 4345ffc468ffSTetsuya Mukawa RTE_MAX_ETHPORTS); 4346ffc468ffSTetsuya Mukawa } 4347236bc417SGregory Etelson for (i = 0; i < RTE_MAX_ETHPORTS; i++) { 4348236bc417SGregory Etelson ports[i].fwd_mac_swap = 1; 434963b72657SIvan Ilchenko ports[i].xstats_info.allocated = false; 43501b9f2746SGregory Etelson LIST_INIT(&ports[i].flow_tunnel_list); 4351236bc417SGregory Etelson } 435229841336SPhil Yang /* Initialize ports NUMA structures */ 435329841336SPhil Yang memset(port_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS); 435429841336SPhil Yang memset(rxring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS); 435529841336SPhil Yang memset(txring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS); 4356ffc468ffSTetsuya Mukawa } 4357ffc468ffSTetsuya Mukawa 4358d3a274ceSZhihong Wang static void 4359d3a274ceSZhihong Wang force_quit(void) 4360d3a274ceSZhihong Wang { 4361d3a274ceSZhihong Wang pmd_test_exit(); 4362d3a274ceSZhihong Wang prompt_exit(); 4363d3a274ceSZhihong Wang } 4364d3a274ceSZhihong Wang 4365d3a274ceSZhihong Wang static void 4366cfea1f30SPablo de Lara print_stats(void) 4367cfea1f30SPablo de Lara { 4368cfea1f30SPablo de Lara uint8_t i; 4369cfea1f30SPablo de Lara const char clr[] = { 27, '[', '2', 'J', '\0' }; 4370cfea1f30SPablo de Lara const char top_left[] = { 27, '[', '1', ';', '1', 'H', '\0' }; 4371cfea1f30SPablo de Lara 4372cfea1f30SPablo de Lara /* Clear screen and move to top left */ 4373cfea1f30SPablo de Lara printf("%s%s", clr, top_left); 4374cfea1f30SPablo de Lara 4375cfea1f30SPablo de Lara printf("\nPort statistics ===================================="); 4376cfea1f30SPablo de Lara for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) 4377cfea1f30SPablo de Lara nic_stats_display(fwd_ports_ids[i]); 4378683d1e82SIgor Romanov 4379683d1e82SIgor Romanov fflush(stdout); 4380cfea1f30SPablo de Lara } 4381cfea1f30SPablo de Lara 4382cfea1f30SPablo de Lara static void 4383d3a274ceSZhihong Wang signal_handler(int signum) 4384d3a274ceSZhihong Wang { 4385d3a274ceSZhihong Wang if (signum == SIGINT || signum == SIGTERM) { 438661a3b0e5SAndrew Rybchenko fprintf(stderr, "\nSignal %d received, preparing to exit...\n", 4387d3a274ceSZhihong Wang signum); 4388a8d0d473SBruce Richardson #ifdef RTE_LIB_PDUMP 4389102b7329SReshma Pattan /* uninitialize packet capture framework */ 4390102b7329SReshma Pattan rte_pdump_uninit(); 4391102b7329SReshma Pattan #endif 4392a8d0d473SBruce Richardson #ifdef RTE_LIB_LATENCYSTATS 43938b36297dSAmit Gupta if (latencystats_enabled != 0) 439462d3216dSReshma Pattan rte_latencystats_uninit(); 439562d3216dSReshma Pattan #endif 4396d3a274ceSZhihong Wang force_quit(); 4397d9a191a0SPhil Yang /* Set flag to indicate the force termination. */ 4398d9a191a0SPhil Yang f_quit = 1; 4399d3a274ceSZhihong Wang /* exit with the expected status */ 4400761f7ae1SJie Zhou #ifndef RTE_EXEC_ENV_WINDOWS 4401d3a274ceSZhihong Wang signal(signum, SIG_DFL); 4402d3a274ceSZhihong Wang kill(getpid(), signum); 4403761f7ae1SJie Zhou #endif 4404d3a274ceSZhihong Wang } 4405d3a274ceSZhihong Wang } 4406d3a274ceSZhihong Wang 4407af75078fSIntel int 4408af75078fSIntel main(int argc, char** argv) 4409af75078fSIntel { 4410af75078fSIntel int diag; 4411f8244c63SZhiyong Yang portid_t port_id; 44124918a357SXiaoyun Li uint16_t count; 4413fb73e096SJeff Guo int ret; 4414af75078fSIntel 4415d3a274ceSZhihong Wang signal(SIGINT, signal_handler); 4416d3a274ceSZhihong Wang signal(SIGTERM, signal_handler); 4417d3a274ceSZhihong Wang 4418285fd101SOlivier Matz testpmd_logtype = rte_log_register("testpmd"); 4419285fd101SOlivier Matz if (testpmd_logtype < 0) 442016267ceeSStephen Hemminger rte_exit(EXIT_FAILURE, "Cannot register log type"); 4421285fd101SOlivier Matz rte_log_set_level(testpmd_logtype, RTE_LOG_DEBUG); 4422285fd101SOlivier Matz 44239201806eSStephen Hemminger diag = rte_eal_init(argc, argv); 44249201806eSStephen Hemminger if (diag < 0) 442516267ceeSStephen Hemminger rte_exit(EXIT_FAILURE, "Cannot init EAL: %s\n", 442616267ceeSStephen Hemminger rte_strerror(rte_errno)); 44279201806eSStephen Hemminger 442897b5d8b5SThomas Monjalon ret = register_eth_event_callback(); 442997b5d8b5SThomas Monjalon if (ret != 0) 443016267ceeSStephen Hemminger rte_exit(EXIT_FAILURE, "Cannot register for ethdev events"); 443197b5d8b5SThomas Monjalon 4432a8d0d473SBruce Richardson #ifdef RTE_LIB_PDUMP 44334aa0d012SAnatoly Burakov /* initialize packet capture framework */ 4434e9436f54STiwei Bie rte_pdump_init(); 44354aa0d012SAnatoly Burakov #endif 44364aa0d012SAnatoly Burakov 44374918a357SXiaoyun Li count = 0; 44384918a357SXiaoyun Li RTE_ETH_FOREACH_DEV(port_id) { 44394918a357SXiaoyun Li ports_ids[count] = port_id; 44404918a357SXiaoyun Li count++; 44414918a357SXiaoyun Li } 44424918a357SXiaoyun Li nb_ports = (portid_t) count; 44434aa0d012SAnatoly Burakov if (nb_ports == 0) 44444aa0d012SAnatoly Burakov TESTPMD_LOG(WARNING, "No probed ethernet devices\n"); 44454aa0d012SAnatoly Burakov 44464aa0d012SAnatoly Burakov /* allocate port structures, and init them */ 44474aa0d012SAnatoly Burakov init_port(); 44484aa0d012SAnatoly Burakov 44494aa0d012SAnatoly Burakov set_def_fwd_config(); 44504aa0d012SAnatoly Burakov if (nb_lcores == 0) 445116267ceeSStephen Hemminger rte_exit(EXIT_FAILURE, "No cores defined for forwarding\n" 445216267ceeSStephen Hemminger "Check the core mask argument\n"); 44534aa0d012SAnatoly Burakov 4454e505d84cSAnatoly Burakov /* Bitrate/latency stats disabled by default */ 4455a8d0d473SBruce Richardson #ifdef RTE_LIB_BITRATESTATS 4456e505d84cSAnatoly Burakov bitrate_enabled = 0; 4457e505d84cSAnatoly Burakov #endif 4458a8d0d473SBruce Richardson #ifdef RTE_LIB_LATENCYSTATS 4459e505d84cSAnatoly Burakov latencystats_enabled = 0; 4460e505d84cSAnatoly Burakov #endif 4461e505d84cSAnatoly Burakov 4462fb7b8b32SAnatoly Burakov /* on FreeBSD, mlockall() is disabled by default */ 44635fbc1d49SBruce Richardson #ifdef RTE_EXEC_ENV_FREEBSD 4464fb7b8b32SAnatoly Burakov do_mlockall = 0; 4465fb7b8b32SAnatoly Burakov #else 4466fb7b8b32SAnatoly Burakov do_mlockall = 1; 4467fb7b8b32SAnatoly Burakov #endif 4468fb7b8b32SAnatoly Burakov 4469e505d84cSAnatoly Burakov argc -= diag; 4470e505d84cSAnatoly Burakov argv += diag; 4471e505d84cSAnatoly Burakov if (argc > 1) 4472e505d84cSAnatoly Burakov launch_args_parse(argc, argv); 4473e505d84cSAnatoly Burakov 4474761f7ae1SJie Zhou #ifndef RTE_EXEC_ENV_WINDOWS 4475e505d84cSAnatoly Burakov if (do_mlockall && mlockall(MCL_CURRENT | MCL_FUTURE)) { 4476285fd101SOlivier Matz TESTPMD_LOG(NOTICE, "mlockall() failed with error \"%s\"\n", 44771c036b16SEelco Chaudron strerror(errno)); 44781c036b16SEelco Chaudron } 4479761f7ae1SJie Zhou #endif 44801c036b16SEelco Chaudron 448199cabef0SPablo de Lara if (tx_first && interactive) 448299cabef0SPablo de Lara rte_exit(EXIT_FAILURE, "--tx-first cannot be used on " 448399cabef0SPablo de Lara "interactive mode.\n"); 44848820cba4SDavid Hunt 44858820cba4SDavid Hunt if (tx_first && lsc_interrupt) { 448661a3b0e5SAndrew Rybchenko fprintf(stderr, 448761a3b0e5SAndrew Rybchenko "Warning: lsc_interrupt needs to be off when using tx_first. Disabling.\n"); 44888820cba4SDavid Hunt lsc_interrupt = 0; 44898820cba4SDavid Hunt } 44908820cba4SDavid Hunt 44915a8fb55cSReshma Pattan if (!nb_rxq && !nb_txq) 449261a3b0e5SAndrew Rybchenko fprintf(stderr, 449361a3b0e5SAndrew Rybchenko "Warning: Either rx or tx queues should be non-zero\n"); 44945a8fb55cSReshma Pattan 44955a8fb55cSReshma Pattan if (nb_rxq > 1 && nb_rxq > nb_txq) 449661a3b0e5SAndrew Rybchenko fprintf(stderr, 449761a3b0e5SAndrew Rybchenko "Warning: nb_rxq=%d enables RSS configuration, but nb_txq=%d will prevent to fully test it.\n", 4498af75078fSIntel nb_rxq, nb_txq); 4499af75078fSIntel 4500af75078fSIntel init_config(); 4501fb73e096SJeff Guo 4502fb73e096SJeff Guo if (hot_plug) { 45032049c511SJeff Guo ret = rte_dev_hotplug_handle_enable(); 4504fb73e096SJeff Guo if (ret) { 45052049c511SJeff Guo RTE_LOG(ERR, EAL, 45062049c511SJeff Guo "fail to enable hotplug handling."); 4507fb73e096SJeff Guo return -1; 4508fb73e096SJeff Guo } 4509fb73e096SJeff Guo 45102049c511SJeff Guo ret = rte_dev_event_monitor_start(); 45112049c511SJeff Guo if (ret) { 45122049c511SJeff Guo RTE_LOG(ERR, EAL, 45132049c511SJeff Guo "fail to start device event monitoring."); 45142049c511SJeff Guo return -1; 45152049c511SJeff Guo } 45162049c511SJeff Guo 45172049c511SJeff Guo ret = rte_dev_event_callback_register(NULL, 4518cc1bf307SJeff Guo dev_event_callback, NULL); 45192049c511SJeff Guo if (ret) { 45202049c511SJeff Guo RTE_LOG(ERR, EAL, 45212049c511SJeff Guo "fail to register device event callback\n"); 45222049c511SJeff Guo return -1; 45232049c511SJeff Guo } 4524fb73e096SJeff Guo } 4525fb73e096SJeff Guo 45267e403725SGregory Etelson if (!no_device_start && start_port(RTE_PORT_ALL) != 0) { 45277e403725SGregory Etelson if (!interactive) { 45287e403725SGregory Etelson rte_eal_cleanup(); 4529148f963fSBruce Richardson rte_exit(EXIT_FAILURE, "Start ports failed\n"); 45307e403725SGregory Etelson } 45317e403725SGregory Etelson fprintf(stderr, "Start ports failed\n"); 45327e403725SGregory Etelson } 4533af75078fSIntel 4534ce8d5614SIntel /* set all ports to promiscuous mode by default */ 453534fc1051SIvan Ilchenko RTE_ETH_FOREACH_DEV(port_id) { 453634fc1051SIvan Ilchenko ret = rte_eth_promiscuous_enable(port_id); 453734fc1051SIvan Ilchenko if (ret != 0) 453861a3b0e5SAndrew Rybchenko fprintf(stderr, 453961a3b0e5SAndrew Rybchenko "Error during enabling promiscuous mode for port %u: %s - ignore\n", 454034fc1051SIvan Ilchenko port_id, rte_strerror(-ret)); 454134fc1051SIvan Ilchenko } 4542af75078fSIntel 4543bb9be9a4SDavid Marchand #ifdef RTE_LIB_METRICS 45447e4441c8SRemy Horton /* Init metrics library */ 45457e4441c8SRemy Horton rte_metrics_init(rte_socket_id()); 4546bb9be9a4SDavid Marchand #endif 45477e4441c8SRemy Horton 4548a8d0d473SBruce Richardson #ifdef RTE_LIB_LATENCYSTATS 454962d3216dSReshma Pattan if (latencystats_enabled != 0) { 455062d3216dSReshma Pattan int ret = rte_latencystats_init(1, NULL); 455162d3216dSReshma Pattan if (ret) 455261a3b0e5SAndrew Rybchenko fprintf(stderr, 455361a3b0e5SAndrew Rybchenko "Warning: latencystats init() returned error %d\n", 455461a3b0e5SAndrew Rybchenko ret); 455561a3b0e5SAndrew Rybchenko fprintf(stderr, "Latencystats running on lcore %d\n", 455662d3216dSReshma Pattan latencystats_lcore_id); 455762d3216dSReshma Pattan } 455862d3216dSReshma Pattan #endif 455962d3216dSReshma Pattan 45607e4441c8SRemy Horton /* Setup bitrate stats */ 4561a8d0d473SBruce Richardson #ifdef RTE_LIB_BITRATESTATS 4562e25e6c70SRemy Horton if (bitrate_enabled != 0) { 45637e4441c8SRemy Horton bitrate_data = rte_stats_bitrate_create(); 45647e4441c8SRemy Horton if (bitrate_data == NULL) 4565e25e6c70SRemy Horton rte_exit(EXIT_FAILURE, 4566e25e6c70SRemy Horton "Could not allocate bitrate data.\n"); 45677e4441c8SRemy Horton rte_stats_bitrate_reg(bitrate_data); 4568e25e6c70SRemy Horton } 45697e4441c8SRemy Horton #endif 457099a4974aSRobin Jarry 457199a4974aSRobin Jarry if (record_core_cycles) 457299a4974aSRobin Jarry rte_lcore_register_usage_cb(lcore_usage_callback); 457399a4974aSRobin Jarry 4574a8d0d473SBruce Richardson #ifdef RTE_LIB_CMDLINE 4575592ab76fSDavid Marchand if (init_cmdline() != 0) 4576592ab76fSDavid Marchand rte_exit(EXIT_FAILURE, 4577592ab76fSDavid Marchand "Could not initialise cmdline context.\n"); 4578592ab76fSDavid Marchand 457981ef862bSAllain Legacy if (strlen(cmdline_filename) != 0) 458081ef862bSAllain Legacy cmdline_read_from_file(cmdline_filename); 458181ef862bSAllain Legacy 4582ca7feb22SCyril Chemparathy if (interactive == 1) { 4583ca7feb22SCyril Chemparathy if (auto_start) { 4584ca7feb22SCyril Chemparathy printf("Start automatic packet forwarding\n"); 4585ca7feb22SCyril Chemparathy start_packet_forwarding(0); 4586ca7feb22SCyril Chemparathy } 4587af75078fSIntel prompt(); 45880de738cfSJiayu Hu pmd_test_exit(); 4589ca7feb22SCyril Chemparathy } else 45900d56cb81SThomas Monjalon #endif 45910d56cb81SThomas Monjalon { 4592af75078fSIntel char c; 4593af75078fSIntel int rc; 4594af75078fSIntel 4595d9a191a0SPhil Yang f_quit = 0; 4596d9a191a0SPhil Yang 4597af75078fSIntel printf("No commandline core given, start packet forwarding\n"); 459899cabef0SPablo de Lara start_packet_forwarding(tx_first); 4599cfea1f30SPablo de Lara if (stats_period != 0) { 4600cfea1f30SPablo de Lara uint64_t prev_time = 0, cur_time, diff_time = 0; 4601cfea1f30SPablo de Lara uint64_t timer_period; 4602cfea1f30SPablo de Lara 4603cfea1f30SPablo de Lara /* Convert to number of cycles */ 4604cfea1f30SPablo de Lara timer_period = stats_period * rte_get_timer_hz(); 4605cfea1f30SPablo de Lara 4606d9a191a0SPhil Yang while (f_quit == 0) { 4607cfea1f30SPablo de Lara cur_time = rte_get_timer_cycles(); 4608cfea1f30SPablo de Lara diff_time += cur_time - prev_time; 4609cfea1f30SPablo de Lara 4610cfea1f30SPablo de Lara if (diff_time >= timer_period) { 4611cfea1f30SPablo de Lara print_stats(); 4612cfea1f30SPablo de Lara /* Reset the timer */ 4613cfea1f30SPablo de Lara diff_time = 0; 4614cfea1f30SPablo de Lara } 4615cfea1f30SPablo de Lara /* Sleep to avoid unnecessary checks */ 4616cfea1f30SPablo de Lara prev_time = cur_time; 4617761f7ae1SJie Zhou rte_delay_us_sleep(US_PER_S); 4618cfea1f30SPablo de Lara } 4619cfea1f30SPablo de Lara } 4620cfea1f30SPablo de Lara 4621af75078fSIntel printf("Press enter to exit\n"); 4622af75078fSIntel rc = read(0, &c, 1); 4623d3a274ceSZhihong Wang pmd_test_exit(); 4624af75078fSIntel if (rc < 0) 4625af75078fSIntel return 1; 4626af75078fSIntel } 4627af75078fSIntel 46285e516c89SStephen Hemminger ret = rte_eal_cleanup(); 46295e516c89SStephen Hemminger if (ret != 0) 46305e516c89SStephen Hemminger rte_exit(EXIT_FAILURE, 46315e516c89SStephen Hemminger "EAL cleanup failed: %s\n", strerror(-ret)); 46325e516c89SStephen Hemminger 46335e516c89SStephen Hemminger return EXIT_SUCCESS; 4634af75078fSIntel } 4635