1174a1631SBruce Richardson /* SPDX-License-Identifier: BSD-3-Clause 2174a1631SBruce Richardson * Copyright(c) 2010-2017 Intel Corporation 3af75078fSIntel */ 4af75078fSIntel 5af75078fSIntel #include <stdarg.h> 6af75078fSIntel #include <stdio.h> 7af75078fSIntel #include <stdlib.h> 8af75078fSIntel #include <signal.h> 9af75078fSIntel #include <string.h> 10af75078fSIntel #include <time.h> 11af75078fSIntel #include <fcntl.h> 12761f7ae1SJie Zhou #ifndef RTE_EXEC_ENV_WINDOWS 131c036b16SEelco Chaudron #include <sys/mman.h> 140fd1386cSStephen Hemminger #include <sys/select.h> 15761f7ae1SJie Zhou #endif 16af75078fSIntel #include <sys/types.h> 17af75078fSIntel #include <errno.h> 18fb73e096SJeff Guo #include <stdbool.h> 19af75078fSIntel 20af75078fSIntel #include <sys/queue.h> 21af75078fSIntel #include <sys/stat.h> 22af75078fSIntel 23af75078fSIntel #include <stdint.h> 24af75078fSIntel #include <unistd.h> 25af75078fSIntel #include <inttypes.h> 26af75078fSIntel 27af75078fSIntel #include <rte_common.h> 28d1eb542eSOlivier Matz #include <rte_errno.h> 29af75078fSIntel #include <rte_byteorder.h> 30af75078fSIntel #include <rte_log.h> 31af75078fSIntel #include <rte_debug.h> 32af75078fSIntel #include <rte_cycles.h> 33af75078fSIntel #include <rte_memory.h> 34af75078fSIntel #include <rte_memcpy.h> 35af75078fSIntel #include <rte_launch.h> 36770ebc06SDavid Marchand #include <rte_bus.h> 37af75078fSIntel #include <rte_eal.h> 38284c908cSGaetan Rivet #include <rte_alarm.h> 39af75078fSIntel #include <rte_per_lcore.h> 40af75078fSIntel #include <rte_lcore.h> 41af75078fSIntel #include <rte_branch_prediction.h> 42af75078fSIntel #include <rte_mempool.h> 43af75078fSIntel #include <rte_malloc.h> 44af75078fSIntel #include <rte_mbuf.h> 450e798567SPavan Nikhilesh #include <rte_mbuf_pool_ops.h> 46af75078fSIntel #include <rte_interrupts.h> 47af75078fSIntel #include <rte_ether.h> 48af75078fSIntel #include <rte_ethdev.h> 49edab33b1STetsuya Mukawa #include <rte_dev.h> 50af75078fSIntel #include <rte_string_fns.h> 51a8d0d473SBruce Richardson #ifdef RTE_NET_IXGBE 52e261265eSRadu Nicolau #include <rte_pmd_ixgbe.h> 53e261265eSRadu Nicolau #endif 54a8d0d473SBruce Richardson #ifdef RTE_LIB_PDUMP 55102b7329SReshma Pattan #include <rte_pdump.h> 56102b7329SReshma Pattan #endif 57938a184aSAdrien Mazarguil #include <rte_flow.h> 58bb9be9a4SDavid Marchand #ifdef RTE_LIB_METRICS 597e4441c8SRemy Horton #include <rte_metrics.h> 60bb9be9a4SDavid Marchand #endif 61a8d0d473SBruce Richardson #ifdef RTE_LIB_BITRATESTATS 627e4441c8SRemy Horton #include <rte_bitrate.h> 637e4441c8SRemy Horton #endif 64a8d0d473SBruce Richardson #ifdef RTE_LIB_LATENCYSTATS 6562d3216dSReshma Pattan #include <rte_latencystats.h> 6662d3216dSReshma Pattan #endif 67761f7ae1SJie Zhou #ifdef RTE_EXEC_ENV_WINDOWS 68761f7ae1SJie Zhou #include <process.h> 69761f7ae1SJie Zhou #endif 70e46372d7SHuisong Li #ifdef RTE_NET_BOND 71e46372d7SHuisong Li #include <rte_eth_bond.h> 72e46372d7SHuisong Li #endif 73f41a5092SSpike Du #ifdef RTE_NET_MLX5 74f41a5092SSpike Du #include "mlx5_testpmd.h" 75f41a5092SSpike Du #endif 76af75078fSIntel 77af75078fSIntel #include "testpmd.h" 78af75078fSIntel 79c7f5dba7SAnatoly Burakov #ifndef MAP_HUGETLB 80c7f5dba7SAnatoly Burakov /* FreeBSD may not have MAP_HUGETLB (in fact, it probably doesn't) */ 81c7f5dba7SAnatoly Burakov #define HUGE_FLAG (0x40000) 82c7f5dba7SAnatoly Burakov #else 83c7f5dba7SAnatoly Burakov #define HUGE_FLAG MAP_HUGETLB 84c7f5dba7SAnatoly Burakov #endif 85c7f5dba7SAnatoly Burakov 86c7f5dba7SAnatoly Burakov #ifndef MAP_HUGE_SHIFT 87c7f5dba7SAnatoly Burakov /* older kernels (or FreeBSD) will not have this define */ 88c7f5dba7SAnatoly Burakov #define HUGE_SHIFT (26) 89c7f5dba7SAnatoly Burakov #else 90c7f5dba7SAnatoly Burakov #define HUGE_SHIFT MAP_HUGE_SHIFT 91c7f5dba7SAnatoly Burakov #endif 92c7f5dba7SAnatoly Burakov 93c7f5dba7SAnatoly Burakov #define EXTMEM_HEAP_NAME "extmem" 9413b19642SDmitry Kozlyuk /* 9513b19642SDmitry Kozlyuk * Zone size with the malloc overhead (max of debug and release variants) 9613b19642SDmitry Kozlyuk * must fit into the smallest supported hugepage size (2M), 9713b19642SDmitry Kozlyuk * so that an IOVA-contiguous zone of this size can always be allocated 9813b19642SDmitry Kozlyuk * if there are free 2M hugepages. 9913b19642SDmitry Kozlyuk */ 10013b19642SDmitry Kozlyuk #define EXTBUF_ZONE_SIZE (RTE_PGSIZE_2M - 4 * RTE_CACHE_LINE_SIZE) 101c7f5dba7SAnatoly Burakov 102af75078fSIntel uint16_t verbose_level = 0; /**< Silent by default. */ 103285fd101SOlivier Matz int testpmd_logtype; /**< Log type for testpmd logs */ 104af75078fSIntel 105cb056611SStephen Hemminger /* use main core for command line ? */ 106af75078fSIntel uint8_t interactive = 0; 107ca7feb22SCyril Chemparathy uint8_t auto_start = 0; 10899cabef0SPablo de Lara uint8_t tx_first; 10981ef862bSAllain Legacy char cmdline_filename[PATH_MAX] = {0}; 110af75078fSIntel 111af75078fSIntel /* 112af75078fSIntel * NUMA support configuration. 113af75078fSIntel * When set, the NUMA support attempts to dispatch the allocation of the 114af75078fSIntel * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the 115af75078fSIntel * probed ports among the CPU sockets 0 and 1. 116af75078fSIntel * Otherwise, all memory is allocated from CPU socket 0. 117af75078fSIntel */ 118999b2ee0SBruce Richardson uint8_t numa_support = 1; /**< numa enabled by default */ 119af75078fSIntel 120af75078fSIntel /* 121b6ea6408SIntel * In UMA mode,all memory is allocated from socket 0 if --socket-num is 122b6ea6408SIntel * not configured. 123b6ea6408SIntel */ 124b6ea6408SIntel uint8_t socket_num = UMA_NO_CONFIG; 125b6ea6408SIntel 126b6ea6408SIntel /* 127c7f5dba7SAnatoly Burakov * Select mempool allocation type: 128c7f5dba7SAnatoly Burakov * - native: use regular DPDK memory 129c7f5dba7SAnatoly Burakov * - anon: use regular DPDK memory to create mempool, but populate using 130c7f5dba7SAnatoly Burakov * anonymous memory (may not be IOVA-contiguous) 131c7f5dba7SAnatoly Burakov * - xmem: use externally allocated hugepage memory 132148f963fSBruce Richardson */ 133c7f5dba7SAnatoly Burakov uint8_t mp_alloc_type = MP_ALLOC_NATIVE; 134148f963fSBruce Richardson 135148f963fSBruce Richardson /* 13663531389SGeorgios Katsikas * Store specified sockets on which memory pool to be used by ports 13763531389SGeorgios Katsikas * is allocated. 13863531389SGeorgios Katsikas */ 13963531389SGeorgios Katsikas uint8_t port_numa[RTE_MAX_ETHPORTS]; 14063531389SGeorgios Katsikas 14163531389SGeorgios Katsikas /* 14263531389SGeorgios Katsikas * Store specified sockets on which RX ring to be used by ports 14363531389SGeorgios Katsikas * is allocated. 14463531389SGeorgios Katsikas */ 14563531389SGeorgios Katsikas uint8_t rxring_numa[RTE_MAX_ETHPORTS]; 14663531389SGeorgios Katsikas 14763531389SGeorgios Katsikas /* 14863531389SGeorgios Katsikas * Store specified sockets on which TX ring to be used by ports 14963531389SGeorgios Katsikas * is allocated. 15063531389SGeorgios Katsikas */ 15163531389SGeorgios Katsikas uint8_t txring_numa[RTE_MAX_ETHPORTS]; 15263531389SGeorgios Katsikas 15363531389SGeorgios Katsikas /* 154af75078fSIntel * Record the Ethernet address of peer target ports to which packets are 155af75078fSIntel * forwarded. 156547d946cSNirmoy Das * Must be instantiated with the ethernet addresses of peer traffic generator 157af75078fSIntel * ports. 158af75078fSIntel */ 1596d13ea8eSOlivier Matz struct rte_ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS]; 160af75078fSIntel portid_t nb_peer_eth_addrs = 0; 161af75078fSIntel 162af75078fSIntel /* 163af75078fSIntel * Probed Target Environment. 164af75078fSIntel */ 165af75078fSIntel struct rte_port *ports; /**< For all probed ethernet ports. */ 166af75078fSIntel portid_t nb_ports; /**< Number of probed ethernet ports. */ 167af75078fSIntel struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */ 168af75078fSIntel lcoreid_t nb_lcores; /**< Number of probed logical cores. */ 169af75078fSIntel 1704918a357SXiaoyun Li portid_t ports_ids[RTE_MAX_ETHPORTS]; /**< Store all port ids. */ 1714918a357SXiaoyun Li 172af75078fSIntel /* 173af75078fSIntel * Test Forwarding Configuration. 174af75078fSIntel * nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores 175af75078fSIntel * nb_fwd_ports <= nb_cfg_ports <= nb_ports 176af75078fSIntel */ 177af75078fSIntel lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */ 178af75078fSIntel lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */ 179af75078fSIntel portid_t nb_cfg_ports; /**< Number of configured ports. */ 180af75078fSIntel portid_t nb_fwd_ports; /**< Number of forwarding ports. */ 181af75078fSIntel 182af75078fSIntel unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */ 183af75078fSIntel portid_t fwd_ports_ids[RTE_MAX_ETHPORTS]; /**< Port ids configuration. */ 184af75078fSIntel 185af75078fSIntel struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */ 186af75078fSIntel streamid_t nb_fwd_streams; /**< Is equal to (nb_ports * nb_rxq). */ 187af75078fSIntel 188af75078fSIntel /* 189af75078fSIntel * Forwarding engines. 190af75078fSIntel */ 191af75078fSIntel struct fwd_engine * fwd_engines[] = { 192af75078fSIntel &io_fwd_engine, 193af75078fSIntel &mac_fwd_engine, 194d47388f1SCyril Chemparathy &mac_swap_engine, 195e9e23a61SCyril Chemparathy &flow_gen_engine, 196af75078fSIntel &rx_only_engine, 197af75078fSIntel &tx_only_engine, 198af75078fSIntel &csum_fwd_engine, 199168dfa61SIvan Boule &icmp_echo_engine, 2003c156061SJens Freimann &noisy_vnf_engine, 2012564abdaSShiri Kuzin &five_tuple_swap_fwd_engine, 202af75078fSIntel #ifdef RTE_LIBRTE_IEEE1588 203af75078fSIntel &ieee1588_fwd_engine, 204af75078fSIntel #endif 20559840375SXueming Li &shared_rxq_engine, 206af75078fSIntel NULL, 207af75078fSIntel }; 208af75078fSIntel 20926cbb419SViacheslav Ovsiienko struct rte_mempool *mempools[RTE_MAX_NUMA_NODES * MAX_SEGS_BUFFER_SPLIT]; 21059fcf854SShahaf Shuler uint16_t mempool_flags; 211401b744dSShahaf Shuler 212af75078fSIntel struct fwd_config cur_fwd_config; 213af75078fSIntel struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */ 214bf56fce1SZhihong Wang uint32_t retry_enabled; 215bf56fce1SZhihong Wang uint32_t burst_tx_delay_time = BURST_TX_WAIT_US; 216bf56fce1SZhihong Wang uint32_t burst_tx_retry_num = BURST_TX_RETRIES; 217af75078fSIntel 21826cbb419SViacheslav Ovsiienko uint32_t mbuf_data_size_n = 1; /* Number of specified mbuf sizes. */ 21926cbb419SViacheslav Ovsiienko uint16_t mbuf_data_size[MAX_SEGS_BUFFER_SPLIT] = { 22026cbb419SViacheslav Ovsiienko DEFAULT_MBUF_DATA_SIZE 22126cbb419SViacheslav Ovsiienko }; /**< Mbuf data space size. */ 222c8798818SIntel uint32_t param_total_num_mbufs = 0; /**< number of mbufs in all pools - if 223c8798818SIntel * specified on command-line. */ 224cfea1f30SPablo de Lara uint16_t stats_period; /**< Period to show statistics (disabled by default) */ 225d9a191a0SPhil Yang 22663b72657SIvan Ilchenko /** Extended statistics to show. */ 22763b72657SIvan Ilchenko struct rte_eth_xstat_name *xstats_display; 22863b72657SIvan Ilchenko 22963b72657SIvan Ilchenko unsigned int xstats_display_num; /**< Size of extended statistics to show */ 23063b72657SIvan Ilchenko 231d9a191a0SPhil Yang /* 232d9a191a0SPhil Yang * In container, it cannot terminate the process which running with 'stats-period' 233d9a191a0SPhil Yang * option. Set flag to exit stats period loop after received SIGINT/SIGTERM. 234d9a191a0SPhil Yang */ 2350fd1386cSStephen Hemminger volatile uint8_t f_quit; 2363889a322SHuisong Li uint8_t cl_quit; /* Quit testpmd from cmdline. */ 237d9a191a0SPhil Yang 238af75078fSIntel /* 2391bb4a528SFerruh Yigit * Max Rx frame size, set by '--max-pkt-len' parameter. 2401bb4a528SFerruh Yigit */ 2411bb4a528SFerruh Yigit uint32_t max_rx_pkt_len; 2421bb4a528SFerruh Yigit 2431bb4a528SFerruh Yigit /* 2440f2096d7SViacheslav Ovsiienko * Configuration of packet segments used to scatter received packets 2450f2096d7SViacheslav Ovsiienko * if some of split features is configured. 2460f2096d7SViacheslav Ovsiienko */ 2470f2096d7SViacheslav Ovsiienko uint16_t rx_pkt_seg_lengths[MAX_SEGS_BUFFER_SPLIT]; 2480f2096d7SViacheslav Ovsiienko uint8_t rx_pkt_nb_segs; /**< Number of segments to split */ 24991c78e09SViacheslav Ovsiienko uint16_t rx_pkt_seg_offsets[MAX_SEGS_BUFFER_SPLIT]; 25091c78e09SViacheslav Ovsiienko uint8_t rx_pkt_nb_offs; /**< Number of specified offsets */ 25152e2e7edSYuan Wang uint32_t rx_pkt_hdr_protos[MAX_SEGS_BUFFER_SPLIT]; 2520f2096d7SViacheslav Ovsiienko 253a4bf5421SHanumanth Pothula uint8_t multi_rx_mempool; /**< Enables multi-rx-mempool feature */ 254a4bf5421SHanumanth Pothula 2550f2096d7SViacheslav Ovsiienko /* 256af75078fSIntel * Configuration of packet segments used by the "txonly" processing engine. 257af75078fSIntel */ 258af75078fSIntel uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */ 259af75078fSIntel uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = { 260af75078fSIntel TXONLY_DEF_PACKET_LEN, 261af75078fSIntel }; 262af75078fSIntel uint8_t tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */ 263af75078fSIntel 26479bec05bSKonstantin Ananyev enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF; 26579bec05bSKonstantin Ananyev /**< Split policy for packets to TX. */ 26679bec05bSKonstantin Ananyev 26782010ef5SYongseok Koh uint8_t txonly_multi_flow; 26882010ef5SYongseok Koh /**< Whether multiple flows are generated in TXONLY mode. */ 26982010ef5SYongseok Koh 2704940344dSViacheslav Ovsiienko uint32_t tx_pkt_times_inter; 2714940344dSViacheslav Ovsiienko /**< Timings for send scheduling in TXONLY mode, time between bursts. */ 2724940344dSViacheslav Ovsiienko 2734940344dSViacheslav Ovsiienko uint32_t tx_pkt_times_intra; 2744940344dSViacheslav Ovsiienko /**< Timings for send scheduling in TXONLY mode, time between packets. */ 2754940344dSViacheslav Ovsiienko 276af75078fSIntel uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */ 2776c02043eSIgor Russkikh uint16_t nb_pkt_flowgen_clones; /**< Number of Tx packet clones to send in flowgen mode. */ 278861e7684SZhihong Wang int nb_flows_flowgen = 1024; /**< Number of flows in flowgen mode. */ 279e9378bbcSCunming Liang uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */ 280af75078fSIntel 281900550deSIntel /* current configuration is in DCB or not,0 means it is not in DCB mode */ 282900550deSIntel uint8_t dcb_config = 0; 283900550deSIntel 284af75078fSIntel /* 285af75078fSIntel * Configurable number of RX/TX queues. 286af75078fSIntel */ 2871c69df45SOri Kam queueid_t nb_hairpinq; /**< Number of hairpin queues per port. */ 288af75078fSIntel queueid_t nb_rxq = 1; /**< Number of RX queues per port. */ 289af75078fSIntel queueid_t nb_txq = 1; /**< Number of TX queues per port. */ 290af75078fSIntel 291af75078fSIntel /* 292af75078fSIntel * Configurable number of RX/TX ring descriptors. 2938599ed31SRemy Horton * Defaults are supplied by drivers via ethdev. 294af75078fSIntel */ 2954ed89049SDavid Marchand #define RX_DESC_DEFAULT 0 2964ed89049SDavid Marchand #define TX_DESC_DEFAULT 0 2974ed89049SDavid Marchand uint16_t nb_rxd = RX_DESC_DEFAULT; /**< Number of RX descriptors. */ 2984ed89049SDavid Marchand uint16_t nb_txd = TX_DESC_DEFAULT; /**< Number of TX descriptors. */ 299af75078fSIntel 300f2c5125aSPablo de Lara #define RTE_PMD_PARAM_UNSET -1 301af75078fSIntel /* 302af75078fSIntel * Configurable values of RX and TX ring threshold registers. 303af75078fSIntel */ 304af75078fSIntel 305f2c5125aSPablo de Lara int8_t rx_pthresh = RTE_PMD_PARAM_UNSET; 306f2c5125aSPablo de Lara int8_t rx_hthresh = RTE_PMD_PARAM_UNSET; 307f2c5125aSPablo de Lara int8_t rx_wthresh = RTE_PMD_PARAM_UNSET; 308af75078fSIntel 309f2c5125aSPablo de Lara int8_t tx_pthresh = RTE_PMD_PARAM_UNSET; 310f2c5125aSPablo de Lara int8_t tx_hthresh = RTE_PMD_PARAM_UNSET; 311f2c5125aSPablo de Lara int8_t tx_wthresh = RTE_PMD_PARAM_UNSET; 312af75078fSIntel 313af75078fSIntel /* 314af75078fSIntel * Configurable value of RX free threshold. 315af75078fSIntel */ 316f2c5125aSPablo de Lara int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET; 317af75078fSIntel 318af75078fSIntel /* 319ce8d5614SIntel * Configurable value of RX drop enable. 320ce8d5614SIntel */ 321f2c5125aSPablo de Lara int8_t rx_drop_en = RTE_PMD_PARAM_UNSET; 322ce8d5614SIntel 323ce8d5614SIntel /* 324af75078fSIntel * Configurable value of TX free threshold. 325af75078fSIntel */ 326f2c5125aSPablo de Lara int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET; 327af75078fSIntel 328af75078fSIntel /* 329af75078fSIntel * Configurable value of TX RS bit threshold. 330af75078fSIntel */ 331f2c5125aSPablo de Lara int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET; 332af75078fSIntel 333af75078fSIntel /* 3341d343c19SMike Pattrick * Configurable sub-forwarding mode for the noisy_vnf forwarding mode. 3351d343c19SMike Pattrick */ 3361d343c19SMike Pattrick enum noisy_fwd_mode noisy_fwd_mode; 3371d343c19SMike Pattrick 3381d343c19SMike Pattrick /* String version of enum noisy_fwd_mode */ 3391d343c19SMike Pattrick const char * const noisy_fwd_mode_desc[] = { 3401d343c19SMike Pattrick [NOISY_FWD_MODE_IO] = "io", 3411d343c19SMike Pattrick [NOISY_FWD_MODE_MAC] = "mac", 3421d343c19SMike Pattrick [NOISY_FWD_MODE_MACSWAP] = "macswap", 3431d343c19SMike Pattrick [NOISY_FWD_MODE_5TSWAP] = "5tswap", 3441d343c19SMike Pattrick [NOISY_FWD_MODE_MAX] = NULL, 3451d343c19SMike Pattrick }; 3461d343c19SMike Pattrick 3471d343c19SMike Pattrick /* 3483c156061SJens Freimann * Configurable value of buffered packets before sending. 3493c156061SJens Freimann */ 3503c156061SJens Freimann uint16_t noisy_tx_sw_bufsz; 3513c156061SJens Freimann 3523c156061SJens Freimann /* 3533c156061SJens Freimann * Configurable value of packet buffer timeout. 3543c156061SJens Freimann */ 3553c156061SJens Freimann uint16_t noisy_tx_sw_buf_flush_time; 3563c156061SJens Freimann 3573c156061SJens Freimann /* 3583c156061SJens Freimann * Configurable value for size of VNF internal memory area 3593c156061SJens Freimann * used for simulating noisy neighbour behaviour 3603c156061SJens Freimann */ 3613c156061SJens Freimann uint64_t noisy_lkup_mem_sz; 3623c156061SJens Freimann 3633c156061SJens Freimann /* 3643c156061SJens Freimann * Configurable value of number of random writes done in 3653c156061SJens Freimann * VNF simulation memory area. 3663c156061SJens Freimann */ 3673c156061SJens Freimann uint64_t noisy_lkup_num_writes; 3683c156061SJens Freimann 3693c156061SJens Freimann /* 3703c156061SJens Freimann * Configurable value of number of random reads done in 3713c156061SJens Freimann * VNF simulation memory area. 3723c156061SJens Freimann */ 3733c156061SJens Freimann uint64_t noisy_lkup_num_reads; 3743c156061SJens Freimann 3753c156061SJens Freimann /* 3763c156061SJens Freimann * Configurable value of number of random reads/writes done in 3773c156061SJens Freimann * VNF simulation memory area. 3783c156061SJens Freimann */ 3793c156061SJens Freimann uint64_t noisy_lkup_num_reads_writes; 3803c156061SJens Freimann 3813c156061SJens Freimann /* 382af75078fSIntel * Receive Side Scaling (RSS) configuration. 383af75078fSIntel */ 384295968d1SFerruh Yigit uint64_t rss_hf = RTE_ETH_RSS_IP; /* RSS IP by default. */ 385af75078fSIntel 386af75078fSIntel /* 387af75078fSIntel * Port topology configuration 388af75078fSIntel */ 389af75078fSIntel uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */ 390af75078fSIntel 3917741e4cfSIntel /* 3927741e4cfSIntel * Avoids to flush all the RX streams before starts forwarding. 3937741e4cfSIntel */ 3947741e4cfSIntel uint8_t no_flush_rx = 0; /* flush by default */ 3957741e4cfSIntel 396af75078fSIntel /* 3977ee3e944SVasily Philipov * Flow API isolated mode. 3987ee3e944SVasily Philipov */ 3997ee3e944SVasily Philipov uint8_t flow_isolate_all; 4007ee3e944SVasily Philipov 4017ee3e944SVasily Philipov /* 402543df472SChengwen Feng * Disable port flow flush when stop port. 403543df472SChengwen Feng */ 404543df472SChengwen Feng uint8_t no_flow_flush = 0; /* do flow flush by default */ 405543df472SChengwen Feng 406543df472SChengwen Feng /* 407bc202406SDavid Marchand * Avoids to check link status when starting/stopping a port. 408bc202406SDavid Marchand */ 409bc202406SDavid Marchand uint8_t no_link_check = 0; /* check by default */ 410bc202406SDavid Marchand 411bc202406SDavid Marchand /* 4126937d210SStephen Hemminger * Don't automatically start all ports in interactive mode. 4136937d210SStephen Hemminger */ 4146937d210SStephen Hemminger uint8_t no_device_start = 0; 4156937d210SStephen Hemminger 4166937d210SStephen Hemminger /* 4178ea656f8SGaetan Rivet * Enable link status change notification 4188ea656f8SGaetan Rivet */ 4198ea656f8SGaetan Rivet uint8_t lsc_interrupt = 1; /* enabled by default */ 4208ea656f8SGaetan Rivet 4218ea656f8SGaetan Rivet /* 422284c908cSGaetan Rivet * Enable device removal notification. 423284c908cSGaetan Rivet */ 424284c908cSGaetan Rivet uint8_t rmv_interrupt = 1; /* enabled by default */ 425284c908cSGaetan Rivet 426fb73e096SJeff Guo uint8_t hot_plug = 0; /**< hotplug disabled by default. */ 427fb73e096SJeff Guo 4284f1ed78eSThomas Monjalon /* After attach, port setup is called on event or by iterator */ 4294f1ed78eSThomas Monjalon bool setup_on_probe_event = true; 4304f1ed78eSThomas Monjalon 431b0a9354aSPavan Nikhilesh /* Clear ptypes on port initialization. */ 432b0a9354aSPavan Nikhilesh uint8_t clear_ptypes = true; 433b0a9354aSPavan Nikhilesh 43401817b10SBing Zhao /* Hairpin ports configuration mode. */ 43523095155SDariusz Sosnowski uint32_t hairpin_mode; 43601817b10SBing Zhao 43797b5d8b5SThomas Monjalon /* Pretty printing of ethdev events */ 43897b5d8b5SThomas Monjalon static const char * const eth_event_desc[] = { 43997b5d8b5SThomas Monjalon [RTE_ETH_EVENT_UNKNOWN] = "unknown", 44097b5d8b5SThomas Monjalon [RTE_ETH_EVENT_INTR_LSC] = "link state change", 44197b5d8b5SThomas Monjalon [RTE_ETH_EVENT_QUEUE_STATE] = "queue state", 44297b5d8b5SThomas Monjalon [RTE_ETH_EVENT_INTR_RESET] = "reset", 44397b5d8b5SThomas Monjalon [RTE_ETH_EVENT_VF_MBOX] = "VF mbox", 44497b5d8b5SThomas Monjalon [RTE_ETH_EVENT_IPSEC] = "IPsec", 44597b5d8b5SThomas Monjalon [RTE_ETH_EVENT_MACSEC] = "MACsec", 44697b5d8b5SThomas Monjalon [RTE_ETH_EVENT_INTR_RMV] = "device removal", 44797b5d8b5SThomas Monjalon [RTE_ETH_EVENT_NEW] = "device probed", 44897b5d8b5SThomas Monjalon [RTE_ETH_EVENT_DESTROY] = "device released", 4490e459ffaSDong Zhou [RTE_ETH_EVENT_FLOW_AGED] = "flow aged", 450bc70e559SSpike Du [RTE_ETH_EVENT_RX_AVAIL_THRESH] = "RxQ available descriptors threshold reached", 451eb0d471aSKalesh AP [RTE_ETH_EVENT_ERR_RECOVERING] = "error recovering", 452eb0d471aSKalesh AP [RTE_ETH_EVENT_RECOVERY_SUCCESS] = "error recovery successful", 453eb0d471aSKalesh AP [RTE_ETH_EVENT_RECOVERY_FAILED] = "error recovery failed", 45497b5d8b5SThomas Monjalon [RTE_ETH_EVENT_MAX] = NULL, 45597b5d8b5SThomas Monjalon }; 45697b5d8b5SThomas Monjalon 457284c908cSGaetan Rivet /* 4583af72783SGaetan Rivet * Display or mask ether events 4593af72783SGaetan Rivet * Default to all events except VF_MBOX 4603af72783SGaetan Rivet */ 4613af72783SGaetan Rivet uint32_t event_print_mask = (UINT32_C(1) << RTE_ETH_EVENT_UNKNOWN) | 4623af72783SGaetan Rivet (UINT32_C(1) << RTE_ETH_EVENT_INTR_LSC) | 4633af72783SGaetan Rivet (UINT32_C(1) << RTE_ETH_EVENT_QUEUE_STATE) | 4643af72783SGaetan Rivet (UINT32_C(1) << RTE_ETH_EVENT_INTR_RESET) | 465badb87c1SAnoob Joseph (UINT32_C(1) << RTE_ETH_EVENT_IPSEC) | 4663af72783SGaetan Rivet (UINT32_C(1) << RTE_ETH_EVENT_MACSEC) | 4670e459ffaSDong Zhou (UINT32_C(1) << RTE_ETH_EVENT_INTR_RMV) | 468eb0d471aSKalesh AP (UINT32_C(1) << RTE_ETH_EVENT_FLOW_AGED) | 469eb0d471aSKalesh AP (UINT32_C(1) << RTE_ETH_EVENT_ERR_RECOVERING) | 470eb0d471aSKalesh AP (UINT32_C(1) << RTE_ETH_EVENT_RECOVERY_SUCCESS) | 471eb0d471aSKalesh AP (UINT32_C(1) << RTE_ETH_EVENT_RECOVERY_FAILED); 472e505d84cSAnatoly Burakov /* 473e505d84cSAnatoly Burakov * Decide if all memory are locked for performance. 474e505d84cSAnatoly Burakov */ 475e505d84cSAnatoly Burakov int do_mlockall = 0; 4763af72783SGaetan Rivet 477a8d0d473SBruce Richardson #ifdef RTE_LIB_LATENCYSTATS 47862d3216dSReshma Pattan 47962d3216dSReshma Pattan /* 48062d3216dSReshma Pattan * Set when latency stats is enabled in the commandline 48162d3216dSReshma Pattan */ 48262d3216dSReshma Pattan uint8_t latencystats_enabled; 48362d3216dSReshma Pattan 48462d3216dSReshma Pattan /* 4857be78d02SJosh Soref * Lcore ID to service latency statistics. 48662d3216dSReshma Pattan */ 48762d3216dSReshma Pattan lcoreid_t latencystats_lcore_id = -1; 48862d3216dSReshma Pattan 48962d3216dSReshma Pattan #endif 49062d3216dSReshma Pattan 4917b7e5ba7SIntel /* 492af75078fSIntel * Ethernet device configuration. 493af75078fSIntel */ 4941bb4a528SFerruh Yigit struct rte_eth_rxmode rx_mode; 495af75078fSIntel 49607e5f7bdSShahaf Shuler struct rte_eth_txmode tx_mode = { 497295968d1SFerruh Yigit .offloads = RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE, 49807e5f7bdSShahaf Shuler }; 499fd8c20aaSShahaf Shuler 5002950a769SDeclan Doherty volatile int test_done = 1; /* stop packet forwarding when set to 1. */ 501af75078fSIntel 502a4fd5eeeSElza Mathew /* 503a4fd5eeeSElza Mathew * Display zero values by default for xstats 504a4fd5eeeSElza Mathew */ 505a4fd5eeeSElza Mathew uint8_t xstats_hide_zero; 506a4fd5eeeSElza Mathew 507bc700b67SDharmik Thakkar /* 508bc700b67SDharmik Thakkar * Measure of CPU cycles disabled by default 509bc700b67SDharmik Thakkar */ 510bc700b67SDharmik Thakkar uint8_t record_core_cycles; 511bc700b67SDharmik Thakkar 5120e4b1963SDharmik Thakkar /* 5130e4b1963SDharmik Thakkar * Display of RX and TX bursts disabled by default 5140e4b1963SDharmik Thakkar */ 5150e4b1963SDharmik Thakkar uint8_t record_burst_stats; 5160e4b1963SDharmik Thakkar 517f4d178c1SXueming Li /* 518f4d178c1SXueming Li * Number of ports per shared Rx queue group, 0 disable. 519f4d178c1SXueming Li */ 520f4d178c1SXueming Li uint32_t rxq_share; 521f4d178c1SXueming Li 522c9cafcc8SShahaf Shuler unsigned int num_sockets = 0; 523c9cafcc8SShahaf Shuler unsigned int socket_ids[RTE_MAX_NUMA_NODES]; 5247acf894dSStephen Hurd 525a8d0d473SBruce Richardson #ifdef RTE_LIB_BITRATESTATS 5267e4441c8SRemy Horton /* Bitrate statistics */ 5277e4441c8SRemy Horton struct rte_stats_bitrates *bitrate_data; 528e25e6c70SRemy Horton lcoreid_t bitrate_lcore_id; 529e25e6c70SRemy Horton uint8_t bitrate_enabled; 530e25e6c70SRemy Horton #endif 5317e4441c8SRemy Horton 5326970401eSDavid Marchand #ifdef RTE_LIB_GRO 533b40f8d78SJiayu Hu struct gro_status gro_ports[RTE_MAX_ETHPORTS]; 534b7091f1dSJiayu Hu uint8_t gro_flush_cycles = GRO_DEFAULT_FLUSH_CYCLES; 5356970401eSDavid Marchand #endif 536b40f8d78SJiayu Hu 537f9295aa2SXiaoyu Min /* 538f9295aa2SXiaoyu Min * hexadecimal bitmask of RX mq mode can be enabled. 539f9295aa2SXiaoyu Min */ 540295968d1SFerruh Yigit enum rte_eth_rx_mq_mode rx_mq_mode = RTE_ETH_MQ_RX_VMDQ_DCB_RSS; 541f9295aa2SXiaoyu Min 542b7b78a08SAjit Khaparde /* 543b7b78a08SAjit Khaparde * Used to set forced link speed 544b7b78a08SAjit Khaparde */ 545b7b78a08SAjit Khaparde uint32_t eth_link_speed; 546b7b78a08SAjit Khaparde 547a550baf2SMin Hu (Connor) /* 548a550baf2SMin Hu (Connor) * ID of the current process in multi-process, used to 549a550baf2SMin Hu (Connor) * configure the queues to be polled. 550a550baf2SMin Hu (Connor) */ 551a550baf2SMin Hu (Connor) int proc_id; 552a550baf2SMin Hu (Connor) 553a550baf2SMin Hu (Connor) /* 554a550baf2SMin Hu (Connor) * Number of processes in multi-process, used to 555a550baf2SMin Hu (Connor) * configure the queues to be polled. 556a550baf2SMin Hu (Connor) */ 557a550baf2SMin Hu (Connor) unsigned int num_procs = 1; 558a550baf2SMin Hu (Connor) 559f6d8a6d3SIvan Malov static void 560f6d8a6d3SIvan Malov eth_rx_metadata_negotiate_mp(uint16_t port_id) 561f6d8a6d3SIvan Malov { 562f6d8a6d3SIvan Malov uint64_t rx_meta_features = 0; 563f6d8a6d3SIvan Malov int ret; 564f6d8a6d3SIvan Malov 565f6d8a6d3SIvan Malov if (!is_proc_primary()) 566f6d8a6d3SIvan Malov return; 567f6d8a6d3SIvan Malov 568f6d8a6d3SIvan Malov rx_meta_features |= RTE_ETH_RX_METADATA_USER_FLAG; 569f6d8a6d3SIvan Malov rx_meta_features |= RTE_ETH_RX_METADATA_USER_MARK; 570f6d8a6d3SIvan Malov rx_meta_features |= RTE_ETH_RX_METADATA_TUNNEL_ID; 571f6d8a6d3SIvan Malov 572f6d8a6d3SIvan Malov ret = rte_eth_rx_metadata_negotiate(port_id, &rx_meta_features); 573f6d8a6d3SIvan Malov if (ret == 0) { 574f6d8a6d3SIvan Malov if (!(rx_meta_features & RTE_ETH_RX_METADATA_USER_FLAG)) { 575f6d8a6d3SIvan Malov TESTPMD_LOG(DEBUG, "Flow action FLAG will not affect Rx mbufs on port %u\n", 576f6d8a6d3SIvan Malov port_id); 577f6d8a6d3SIvan Malov } 578f6d8a6d3SIvan Malov 579f6d8a6d3SIvan Malov if (!(rx_meta_features & RTE_ETH_RX_METADATA_USER_MARK)) { 580f6d8a6d3SIvan Malov TESTPMD_LOG(DEBUG, "Flow action MARK will not affect Rx mbufs on port %u\n", 581f6d8a6d3SIvan Malov port_id); 582f6d8a6d3SIvan Malov } 583f6d8a6d3SIvan Malov 584f6d8a6d3SIvan Malov if (!(rx_meta_features & RTE_ETH_RX_METADATA_TUNNEL_ID)) { 585f6d8a6d3SIvan Malov TESTPMD_LOG(DEBUG, "Flow tunnel offload support might be limited or unavailable on port %u\n", 586f6d8a6d3SIvan Malov port_id); 587f6d8a6d3SIvan Malov } 588f6d8a6d3SIvan Malov } else if (ret != -ENOTSUP) { 589f6d8a6d3SIvan Malov rte_exit(EXIT_FAILURE, "Error when negotiating Rx meta features on port %u: %s\n", 590f6d8a6d3SIvan Malov port_id, rte_strerror(-ret)); 591f6d8a6d3SIvan Malov } 592f6d8a6d3SIvan Malov } 593f6d8a6d3SIvan Malov 594a550baf2SMin Hu (Connor) static int 595a550baf2SMin Hu (Connor) eth_dev_configure_mp(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q, 596a550baf2SMin Hu (Connor) const struct rte_eth_conf *dev_conf) 597a550baf2SMin Hu (Connor) { 598a550baf2SMin Hu (Connor) if (is_proc_primary()) 599a550baf2SMin Hu (Connor) return rte_eth_dev_configure(port_id, nb_rx_q, nb_tx_q, 600a550baf2SMin Hu (Connor) dev_conf); 601a550baf2SMin Hu (Connor) return 0; 602a550baf2SMin Hu (Connor) } 603a550baf2SMin Hu (Connor) 604a550baf2SMin Hu (Connor) static int 605e46372d7SHuisong Li change_bonding_slave_port_status(portid_t bond_pid, bool is_stop) 606e46372d7SHuisong Li { 607e46372d7SHuisong Li #ifdef RTE_NET_BOND 608e46372d7SHuisong Li 609e46372d7SHuisong Li portid_t slave_pids[RTE_MAX_ETHPORTS]; 610e46372d7SHuisong Li struct rte_port *port; 611e46372d7SHuisong Li int num_slaves; 612e46372d7SHuisong Li portid_t slave_pid; 613e46372d7SHuisong Li int i; 614e46372d7SHuisong Li 615e46372d7SHuisong Li num_slaves = rte_eth_bond_slaves_get(bond_pid, slave_pids, 616e46372d7SHuisong Li RTE_MAX_ETHPORTS); 617e46372d7SHuisong Li if (num_slaves < 0) { 618e46372d7SHuisong Li fprintf(stderr, "Failed to get slave list for port = %u\n", 619e46372d7SHuisong Li bond_pid); 620e46372d7SHuisong Li return num_slaves; 621e46372d7SHuisong Li } 622e46372d7SHuisong Li 623e46372d7SHuisong Li for (i = 0; i < num_slaves; i++) { 624e46372d7SHuisong Li slave_pid = slave_pids[i]; 625e46372d7SHuisong Li port = &ports[slave_pid]; 626e46372d7SHuisong Li port->port_status = 627e46372d7SHuisong Li is_stop ? RTE_PORT_STOPPED : RTE_PORT_STARTED; 628e46372d7SHuisong Li } 629e46372d7SHuisong Li #else 630e46372d7SHuisong Li RTE_SET_USED(bond_pid); 631e46372d7SHuisong Li RTE_SET_USED(is_stop); 632e46372d7SHuisong Li #endif 633e46372d7SHuisong Li return 0; 634e46372d7SHuisong Li } 635e46372d7SHuisong Li 636e46372d7SHuisong Li static int 637a550baf2SMin Hu (Connor) eth_dev_start_mp(uint16_t port_id) 638a550baf2SMin Hu (Connor) { 639e46372d7SHuisong Li int ret; 640e46372d7SHuisong Li 641e46372d7SHuisong Li if (is_proc_primary()) { 642e46372d7SHuisong Li ret = rte_eth_dev_start(port_id); 643e46372d7SHuisong Li if (ret != 0) 644e46372d7SHuisong Li return ret; 645e46372d7SHuisong Li 646e46372d7SHuisong Li struct rte_port *port = &ports[port_id]; 647e46372d7SHuisong Li 648e46372d7SHuisong Li /* 649e46372d7SHuisong Li * Starting a bonded port also starts all slaves under the bonded 650e46372d7SHuisong Li * device. So if this port is bond device, we need to modify the 651e46372d7SHuisong Li * port status of these slaves. 652e46372d7SHuisong Li */ 653e46372d7SHuisong Li if (port->bond_flag == 1) 654e46372d7SHuisong Li return change_bonding_slave_port_status(port_id, false); 655e46372d7SHuisong Li } 656a550baf2SMin Hu (Connor) 657a550baf2SMin Hu (Connor) return 0; 658a550baf2SMin Hu (Connor) } 659a550baf2SMin Hu (Connor) 660a550baf2SMin Hu (Connor) static int 661a550baf2SMin Hu (Connor) eth_dev_stop_mp(uint16_t port_id) 662a550baf2SMin Hu (Connor) { 663e46372d7SHuisong Li int ret; 664e46372d7SHuisong Li 665e46372d7SHuisong Li if (is_proc_primary()) { 666e46372d7SHuisong Li ret = rte_eth_dev_stop(port_id); 667e46372d7SHuisong Li if (ret != 0) 668e46372d7SHuisong Li return ret; 669e46372d7SHuisong Li 670e46372d7SHuisong Li struct rte_port *port = &ports[port_id]; 671e46372d7SHuisong Li 672e46372d7SHuisong Li /* 673e46372d7SHuisong Li * Stopping a bonded port also stops all slaves under the bonded 674e46372d7SHuisong Li * device. So if this port is bond device, we need to modify the 675e46372d7SHuisong Li * port status of these slaves. 676e46372d7SHuisong Li */ 677e46372d7SHuisong Li if (port->bond_flag == 1) 678e46372d7SHuisong Li return change_bonding_slave_port_status(port_id, true); 679e46372d7SHuisong Li } 680a550baf2SMin Hu (Connor) 681a550baf2SMin Hu (Connor) return 0; 682a550baf2SMin Hu (Connor) } 683a550baf2SMin Hu (Connor) 684a550baf2SMin Hu (Connor) static void 685a550baf2SMin Hu (Connor) mempool_free_mp(struct rte_mempool *mp) 686a550baf2SMin Hu (Connor) { 687a550baf2SMin Hu (Connor) if (is_proc_primary()) 688a550baf2SMin Hu (Connor) rte_mempool_free(mp); 689a550baf2SMin Hu (Connor) } 690a550baf2SMin Hu (Connor) 691a550baf2SMin Hu (Connor) static int 692a550baf2SMin Hu (Connor) eth_dev_set_mtu_mp(uint16_t port_id, uint16_t mtu) 693a550baf2SMin Hu (Connor) { 694a550baf2SMin Hu (Connor) if (is_proc_primary()) 695a550baf2SMin Hu (Connor) return rte_eth_dev_set_mtu(port_id, mtu); 696a550baf2SMin Hu (Connor) 697a550baf2SMin Hu (Connor) return 0; 698a550baf2SMin Hu (Connor) } 699a550baf2SMin Hu (Connor) 700ed30d9b6SIntel /* Forward function declarations */ 701c9cce428SThomas Monjalon static void setup_attached_port(portid_t pi); 702edab33b1STetsuya Mukawa static void check_all_ports_link_status(uint32_t port_mask); 703f8244c63SZhiyong Yang static int eth_event_callback(portid_t port_id, 70476ad4a2dSGaetan Rivet enum rte_eth_event_type type, 705d6af1a13SBernard Iremonger void *param, void *ret_param); 706cc1bf307SJeff Guo static void dev_event_callback(const char *device_name, 707fb73e096SJeff Guo enum rte_dev_event_type type, 708fb73e096SJeff Guo void *param); 70963b72657SIvan Ilchenko static void fill_xstats_display_info(void); 710ce8d5614SIntel 711ce8d5614SIntel /* 712ce8d5614SIntel * Check if all the ports are started. 713ce8d5614SIntel * If yes, return positive value. If not, return zero. 714ce8d5614SIntel */ 715ce8d5614SIntel static int all_ports_started(void); 716ed30d9b6SIntel 7176970401eSDavid Marchand #ifdef RTE_LIB_GSO 71852f38a20SJiayu Hu struct gso_status gso_ports[RTE_MAX_ETHPORTS]; 71935b2d13fSOlivier Matz uint16_t gso_max_segment_size = RTE_ETHER_MAX_LEN - RTE_ETHER_CRC_LEN; 7206970401eSDavid Marchand #endif 72152f38a20SJiayu Hu 722b57b66a9SOri Kam /* Holds the registered mbuf dynamic flags names. */ 723b57b66a9SOri Kam char dynf_names[64][RTE_MBUF_DYN_NAMESIZE]; 724b57b66a9SOri Kam 72563b72657SIvan Ilchenko 726af75078fSIntel /* 72798a7ea33SJerin Jacob * Helper function to check if socket is already discovered. 728c9cafcc8SShahaf Shuler * If yes, return positive value. If not, return zero. 729c9cafcc8SShahaf Shuler */ 730c9cafcc8SShahaf Shuler int 731c9cafcc8SShahaf Shuler new_socket_id(unsigned int socket_id) 732c9cafcc8SShahaf Shuler { 733c9cafcc8SShahaf Shuler unsigned int i; 734c9cafcc8SShahaf Shuler 735c9cafcc8SShahaf Shuler for (i = 0; i < num_sockets; i++) { 736c9cafcc8SShahaf Shuler if (socket_ids[i] == socket_id) 737c9cafcc8SShahaf Shuler return 0; 738c9cafcc8SShahaf Shuler } 739c9cafcc8SShahaf Shuler return 1; 740c9cafcc8SShahaf Shuler } 741c9cafcc8SShahaf Shuler 742c9cafcc8SShahaf Shuler /* 743af75078fSIntel * Setup default configuration. 744af75078fSIntel */ 745af75078fSIntel static void 746af75078fSIntel set_default_fwd_lcores_config(void) 747af75078fSIntel { 748af75078fSIntel unsigned int i; 749af75078fSIntel unsigned int nb_lc; 7507acf894dSStephen Hurd unsigned int sock_num; 751af75078fSIntel 752af75078fSIntel nb_lc = 0; 753af75078fSIntel for (i = 0; i < RTE_MAX_LCORE; i++) { 754dbfb8ec7SPhil Yang if (!rte_lcore_is_enabled(i)) 755dbfb8ec7SPhil Yang continue; 756c9cafcc8SShahaf Shuler sock_num = rte_lcore_to_socket_id(i); 757c9cafcc8SShahaf Shuler if (new_socket_id(sock_num)) { 758c9cafcc8SShahaf Shuler if (num_sockets >= RTE_MAX_NUMA_NODES) { 759c9cafcc8SShahaf Shuler rte_exit(EXIT_FAILURE, 760c9cafcc8SShahaf Shuler "Total sockets greater than %u\n", 761c9cafcc8SShahaf Shuler RTE_MAX_NUMA_NODES); 762c9cafcc8SShahaf Shuler } 763c9cafcc8SShahaf Shuler socket_ids[num_sockets++] = sock_num; 7647acf894dSStephen Hurd } 765cb056611SStephen Hemminger if (i == rte_get_main_lcore()) 766f54fe5eeSStephen Hurd continue; 767f54fe5eeSStephen Hurd fwd_lcores_cpuids[nb_lc++] = i; 768af75078fSIntel } 769af75078fSIntel nb_lcores = (lcoreid_t) nb_lc; 770af75078fSIntel nb_cfg_lcores = nb_lcores; 771af75078fSIntel nb_fwd_lcores = 1; 772af75078fSIntel } 773af75078fSIntel 774af75078fSIntel static void 775af75078fSIntel set_def_peer_eth_addrs(void) 776af75078fSIntel { 777af75078fSIntel portid_t i; 778af75078fSIntel 779af75078fSIntel for (i = 0; i < RTE_MAX_ETHPORTS; i++) { 78035b2d13fSOlivier Matz peer_eth_addrs[i].addr_bytes[0] = RTE_ETHER_LOCAL_ADMIN_ADDR; 781af75078fSIntel peer_eth_addrs[i].addr_bytes[5] = i; 782af75078fSIntel } 783af75078fSIntel } 784af75078fSIntel 785af75078fSIntel static void 786af75078fSIntel set_default_fwd_ports_config(void) 787af75078fSIntel { 788af75078fSIntel portid_t pt_id; 78965a7360cSMatan Azrad int i = 0; 790af75078fSIntel 791effdb8bbSPhil Yang RTE_ETH_FOREACH_DEV(pt_id) { 79265a7360cSMatan Azrad fwd_ports_ids[i++] = pt_id; 793af75078fSIntel 794effdb8bbSPhil Yang /* Update sockets info according to the attached device */ 795effdb8bbSPhil Yang int socket_id = rte_eth_dev_socket_id(pt_id); 796effdb8bbSPhil Yang if (socket_id >= 0 && new_socket_id(socket_id)) { 797effdb8bbSPhil Yang if (num_sockets >= RTE_MAX_NUMA_NODES) { 798effdb8bbSPhil Yang rte_exit(EXIT_FAILURE, 799effdb8bbSPhil Yang "Total sockets greater than %u\n", 800effdb8bbSPhil Yang RTE_MAX_NUMA_NODES); 801effdb8bbSPhil Yang } 802effdb8bbSPhil Yang socket_ids[num_sockets++] = socket_id; 803effdb8bbSPhil Yang } 804effdb8bbSPhil Yang } 805effdb8bbSPhil Yang 806af75078fSIntel nb_cfg_ports = nb_ports; 807af75078fSIntel nb_fwd_ports = nb_ports; 808af75078fSIntel } 809af75078fSIntel 810af75078fSIntel void 811af75078fSIntel set_def_fwd_config(void) 812af75078fSIntel { 813af75078fSIntel set_default_fwd_lcores_config(); 814af75078fSIntel set_def_peer_eth_addrs(); 815af75078fSIntel set_default_fwd_ports_config(); 816af75078fSIntel } 817af75078fSIntel 818761f7ae1SJie Zhou #ifndef RTE_EXEC_ENV_WINDOWS 819c7f5dba7SAnatoly Burakov /* extremely pessimistic estimation of memory required to create a mempool */ 820c7f5dba7SAnatoly Burakov static int 821c7f5dba7SAnatoly Burakov calc_mem_size(uint32_t nb_mbufs, uint32_t mbuf_sz, size_t pgsz, size_t *out) 822c7f5dba7SAnatoly Burakov { 823c7f5dba7SAnatoly Burakov unsigned int n_pages, mbuf_per_pg, leftover; 824c7f5dba7SAnatoly Burakov uint64_t total_mem, mbuf_mem, obj_sz; 825c7f5dba7SAnatoly Burakov 826c7f5dba7SAnatoly Burakov /* there is no good way to predict how much space the mempool will 827c7f5dba7SAnatoly Burakov * occupy because it will allocate chunks on the fly, and some of those 828c7f5dba7SAnatoly Burakov * will come from default DPDK memory while some will come from our 829c7f5dba7SAnatoly Burakov * external memory, so just assume 128MB will be enough for everyone. 830c7f5dba7SAnatoly Burakov */ 831c7f5dba7SAnatoly Burakov uint64_t hdr_mem = 128 << 20; 832c7f5dba7SAnatoly Burakov 833c7f5dba7SAnatoly Burakov /* account for possible non-contiguousness */ 834c7f5dba7SAnatoly Burakov obj_sz = rte_mempool_calc_obj_size(mbuf_sz, 0, NULL); 835c7f5dba7SAnatoly Burakov if (obj_sz > pgsz) { 836c7f5dba7SAnatoly Burakov TESTPMD_LOG(ERR, "Object size is bigger than page size\n"); 837c7f5dba7SAnatoly Burakov return -1; 838c7f5dba7SAnatoly Burakov } 839c7f5dba7SAnatoly Burakov 840c7f5dba7SAnatoly Burakov mbuf_per_pg = pgsz / obj_sz; 841c7f5dba7SAnatoly Burakov leftover = (nb_mbufs % mbuf_per_pg) > 0; 842c7f5dba7SAnatoly Burakov n_pages = (nb_mbufs / mbuf_per_pg) + leftover; 843c7f5dba7SAnatoly Burakov 844c7f5dba7SAnatoly Burakov mbuf_mem = n_pages * pgsz; 845c7f5dba7SAnatoly Burakov 846c7f5dba7SAnatoly Burakov total_mem = RTE_ALIGN(hdr_mem + mbuf_mem, pgsz); 847c7f5dba7SAnatoly Burakov 848c7f5dba7SAnatoly Burakov if (total_mem > SIZE_MAX) { 849c7f5dba7SAnatoly Burakov TESTPMD_LOG(ERR, "Memory size too big\n"); 850c7f5dba7SAnatoly Burakov return -1; 851c7f5dba7SAnatoly Burakov } 852c7f5dba7SAnatoly Burakov *out = (size_t)total_mem; 853c7f5dba7SAnatoly Burakov 854c7f5dba7SAnatoly Burakov return 0; 855c7f5dba7SAnatoly Burakov } 856c7f5dba7SAnatoly Burakov 857c7f5dba7SAnatoly Burakov static int 858c7f5dba7SAnatoly Burakov pagesz_flags(uint64_t page_sz) 859c7f5dba7SAnatoly Burakov { 860c7f5dba7SAnatoly Burakov /* as per mmap() manpage, all page sizes are log2 of page size 861c7f5dba7SAnatoly Burakov * shifted by MAP_HUGE_SHIFT 862c7f5dba7SAnatoly Burakov */ 8639d650537SAnatoly Burakov int log2 = rte_log2_u64(page_sz); 864c7f5dba7SAnatoly Burakov 865c7f5dba7SAnatoly Burakov return (log2 << HUGE_SHIFT); 866c7f5dba7SAnatoly Burakov } 867c7f5dba7SAnatoly Burakov 868c7f5dba7SAnatoly Burakov static void * 869c7f5dba7SAnatoly Burakov alloc_mem(size_t memsz, size_t pgsz, bool huge) 870c7f5dba7SAnatoly Burakov { 871c7f5dba7SAnatoly Burakov void *addr; 872c7f5dba7SAnatoly Burakov int flags; 873c7f5dba7SAnatoly Burakov 874c7f5dba7SAnatoly Burakov /* allocate anonymous hugepages */ 875c7f5dba7SAnatoly Burakov flags = MAP_ANONYMOUS | MAP_PRIVATE; 876c7f5dba7SAnatoly Burakov if (huge) 877c7f5dba7SAnatoly Burakov flags |= HUGE_FLAG | pagesz_flags(pgsz); 878c7f5dba7SAnatoly Burakov 879c7f5dba7SAnatoly Burakov addr = mmap(NULL, memsz, PROT_READ | PROT_WRITE, flags, -1, 0); 880c7f5dba7SAnatoly Burakov if (addr == MAP_FAILED) 881c7f5dba7SAnatoly Burakov return NULL; 882c7f5dba7SAnatoly Burakov 883c7f5dba7SAnatoly Burakov return addr; 884c7f5dba7SAnatoly Burakov } 885c7f5dba7SAnatoly Burakov 886c7f5dba7SAnatoly Burakov struct extmem_param { 887c7f5dba7SAnatoly Burakov void *addr; 888c7f5dba7SAnatoly Burakov size_t len; 889c7f5dba7SAnatoly Burakov size_t pgsz; 890c7f5dba7SAnatoly Burakov rte_iova_t *iova_table; 891c7f5dba7SAnatoly Burakov unsigned int iova_table_len; 892c7f5dba7SAnatoly Burakov }; 893c7f5dba7SAnatoly Burakov 894c7f5dba7SAnatoly Burakov static int 895c7f5dba7SAnatoly Burakov create_extmem(uint32_t nb_mbufs, uint32_t mbuf_sz, struct extmem_param *param, 896c7f5dba7SAnatoly Burakov bool huge) 897c7f5dba7SAnatoly Burakov { 898c7f5dba7SAnatoly Burakov uint64_t pgsizes[] = {RTE_PGSIZE_2M, RTE_PGSIZE_1G, /* x86_64, ARM */ 899c7f5dba7SAnatoly Burakov RTE_PGSIZE_16M, RTE_PGSIZE_16G}; /* POWER */ 900c7f5dba7SAnatoly Burakov unsigned int cur_page, n_pages, pgsz_idx; 901c7f5dba7SAnatoly Burakov size_t mem_sz, cur_pgsz; 902c7f5dba7SAnatoly Burakov rte_iova_t *iovas = NULL; 903c7f5dba7SAnatoly Burakov void *addr; 904c7f5dba7SAnatoly Burakov int ret; 905c7f5dba7SAnatoly Burakov 906c7f5dba7SAnatoly Burakov for (pgsz_idx = 0; pgsz_idx < RTE_DIM(pgsizes); pgsz_idx++) { 907c7f5dba7SAnatoly Burakov /* skip anything that is too big */ 908c7f5dba7SAnatoly Burakov if (pgsizes[pgsz_idx] > SIZE_MAX) 909c7f5dba7SAnatoly Burakov continue; 910c7f5dba7SAnatoly Burakov 911c7f5dba7SAnatoly Burakov cur_pgsz = pgsizes[pgsz_idx]; 912c7f5dba7SAnatoly Burakov 913c7f5dba7SAnatoly Burakov /* if we were told not to allocate hugepages, override */ 914c7f5dba7SAnatoly Burakov if (!huge) 915c7f5dba7SAnatoly Burakov cur_pgsz = sysconf(_SC_PAGESIZE); 916c7f5dba7SAnatoly Burakov 917c7f5dba7SAnatoly Burakov ret = calc_mem_size(nb_mbufs, mbuf_sz, cur_pgsz, &mem_sz); 918c7f5dba7SAnatoly Burakov if (ret < 0) { 919c7f5dba7SAnatoly Burakov TESTPMD_LOG(ERR, "Cannot calculate memory size\n"); 920c7f5dba7SAnatoly Burakov return -1; 921c7f5dba7SAnatoly Burakov } 922c7f5dba7SAnatoly Burakov 923c7f5dba7SAnatoly Burakov /* allocate our memory */ 924c7f5dba7SAnatoly Burakov addr = alloc_mem(mem_sz, cur_pgsz, huge); 925c7f5dba7SAnatoly Burakov 926c7f5dba7SAnatoly Burakov /* if we couldn't allocate memory with a specified page size, 927c7f5dba7SAnatoly Burakov * that doesn't mean we can't do it with other page sizes, so 928c7f5dba7SAnatoly Burakov * try another one. 929c7f5dba7SAnatoly Burakov */ 930c7f5dba7SAnatoly Burakov if (addr == NULL) 931c7f5dba7SAnatoly Burakov continue; 932c7f5dba7SAnatoly Burakov 933c7f5dba7SAnatoly Burakov /* store IOVA addresses for every page in this memory area */ 934c7f5dba7SAnatoly Burakov n_pages = mem_sz / cur_pgsz; 935c7f5dba7SAnatoly Burakov 936c7f5dba7SAnatoly Burakov iovas = malloc(sizeof(*iovas) * n_pages); 937c7f5dba7SAnatoly Burakov 938c7f5dba7SAnatoly Burakov if (iovas == NULL) { 939c7f5dba7SAnatoly Burakov TESTPMD_LOG(ERR, "Cannot allocate memory for iova addresses\n"); 940c7f5dba7SAnatoly Burakov goto fail; 941c7f5dba7SAnatoly Burakov } 942c7f5dba7SAnatoly Burakov /* lock memory if it's not huge pages */ 943c7f5dba7SAnatoly Burakov if (!huge) 944c7f5dba7SAnatoly Burakov mlock(addr, mem_sz); 945c7f5dba7SAnatoly Burakov 946c7f5dba7SAnatoly Burakov /* populate IOVA addresses */ 947c7f5dba7SAnatoly Burakov for (cur_page = 0; cur_page < n_pages; cur_page++) { 948c7f5dba7SAnatoly Burakov rte_iova_t iova; 949c7f5dba7SAnatoly Burakov size_t offset; 950c7f5dba7SAnatoly Burakov void *cur; 951c7f5dba7SAnatoly Burakov 952c7f5dba7SAnatoly Burakov offset = cur_pgsz * cur_page; 953c7f5dba7SAnatoly Burakov cur = RTE_PTR_ADD(addr, offset); 954c7f5dba7SAnatoly Burakov 955c7f5dba7SAnatoly Burakov /* touch the page before getting its IOVA */ 956c7f5dba7SAnatoly Burakov *(volatile char *)cur = 0; 957c7f5dba7SAnatoly Burakov 958c7f5dba7SAnatoly Burakov iova = rte_mem_virt2iova(cur); 959c7f5dba7SAnatoly Burakov 960c7f5dba7SAnatoly Burakov iovas[cur_page] = iova; 961c7f5dba7SAnatoly Burakov } 962c7f5dba7SAnatoly Burakov 963c7f5dba7SAnatoly Burakov break; 964c7f5dba7SAnatoly Burakov } 965c7f5dba7SAnatoly Burakov /* if we couldn't allocate anything */ 966c7f5dba7SAnatoly Burakov if (iovas == NULL) 967c7f5dba7SAnatoly Burakov return -1; 968c7f5dba7SAnatoly Burakov 969c7f5dba7SAnatoly Burakov param->addr = addr; 970c7f5dba7SAnatoly Burakov param->len = mem_sz; 971c7f5dba7SAnatoly Burakov param->pgsz = cur_pgsz; 972c7f5dba7SAnatoly Burakov param->iova_table = iovas; 973c7f5dba7SAnatoly Burakov param->iova_table_len = n_pages; 974c7f5dba7SAnatoly Burakov 975c7f5dba7SAnatoly Burakov return 0; 976c7f5dba7SAnatoly Burakov fail: 977c7f5dba7SAnatoly Burakov free(iovas); 978c7f5dba7SAnatoly Burakov if (addr) 979c7f5dba7SAnatoly Burakov munmap(addr, mem_sz); 980c7f5dba7SAnatoly Burakov 981c7f5dba7SAnatoly Burakov return -1; 982c7f5dba7SAnatoly Burakov } 983c7f5dba7SAnatoly Burakov 984c7f5dba7SAnatoly Burakov static int 985c7f5dba7SAnatoly Burakov setup_extmem(uint32_t nb_mbufs, uint32_t mbuf_sz, bool huge) 986c7f5dba7SAnatoly Burakov { 987c7f5dba7SAnatoly Burakov struct extmem_param param; 988c7f5dba7SAnatoly Burakov int socket_id, ret; 989c7f5dba7SAnatoly Burakov 990c7f5dba7SAnatoly Burakov memset(¶m, 0, sizeof(param)); 991c7f5dba7SAnatoly Burakov 992c7f5dba7SAnatoly Burakov /* check if our heap exists */ 993c7f5dba7SAnatoly Burakov socket_id = rte_malloc_heap_get_socket(EXTMEM_HEAP_NAME); 994c7f5dba7SAnatoly Burakov if (socket_id < 0) { 995c7f5dba7SAnatoly Burakov /* create our heap */ 996c7f5dba7SAnatoly Burakov ret = rte_malloc_heap_create(EXTMEM_HEAP_NAME); 997c7f5dba7SAnatoly Burakov if (ret < 0) { 998c7f5dba7SAnatoly Burakov TESTPMD_LOG(ERR, "Cannot create heap\n"); 999c7f5dba7SAnatoly Burakov return -1; 1000c7f5dba7SAnatoly Burakov } 1001c7f5dba7SAnatoly Burakov } 1002c7f5dba7SAnatoly Burakov 1003c7f5dba7SAnatoly Burakov ret = create_extmem(nb_mbufs, mbuf_sz, ¶m, huge); 1004c7f5dba7SAnatoly Burakov if (ret < 0) { 1005c7f5dba7SAnatoly Burakov TESTPMD_LOG(ERR, "Cannot create memory area\n"); 1006c7f5dba7SAnatoly Burakov return -1; 1007c7f5dba7SAnatoly Burakov } 1008c7f5dba7SAnatoly Burakov 1009c7f5dba7SAnatoly Burakov /* we now have a valid memory area, so add it to heap */ 1010c7f5dba7SAnatoly Burakov ret = rte_malloc_heap_memory_add(EXTMEM_HEAP_NAME, 1011c7f5dba7SAnatoly Burakov param.addr, param.len, param.iova_table, 1012c7f5dba7SAnatoly Burakov param.iova_table_len, param.pgsz); 1013c7f5dba7SAnatoly Burakov 1014c7f5dba7SAnatoly Burakov /* when using VFIO, memory is automatically mapped for DMA by EAL */ 1015c7f5dba7SAnatoly Burakov 1016c7f5dba7SAnatoly Burakov /* not needed any more */ 1017c7f5dba7SAnatoly Burakov free(param.iova_table); 1018c7f5dba7SAnatoly Burakov 1019c7f5dba7SAnatoly Burakov if (ret < 0) { 1020c7f5dba7SAnatoly Burakov TESTPMD_LOG(ERR, "Cannot add memory to heap\n"); 1021c7f5dba7SAnatoly Burakov munmap(param.addr, param.len); 1022c7f5dba7SAnatoly Burakov return -1; 1023c7f5dba7SAnatoly Burakov } 1024c7f5dba7SAnatoly Burakov 1025c7f5dba7SAnatoly Burakov /* success */ 1026c7f5dba7SAnatoly Burakov 1027c7f5dba7SAnatoly Burakov TESTPMD_LOG(DEBUG, "Allocated %zuMB of external memory\n", 1028c7f5dba7SAnatoly Burakov param.len >> 20); 1029c7f5dba7SAnatoly Burakov 1030c7f5dba7SAnatoly Burakov return 0; 1031c7f5dba7SAnatoly Burakov } 10323a0968c8SShahaf Shuler static void 10333a0968c8SShahaf Shuler dma_unmap_cb(struct rte_mempool *mp __rte_unused, void *opaque __rte_unused, 10343a0968c8SShahaf Shuler struct rte_mempool_memhdr *memhdr, unsigned mem_idx __rte_unused) 10353a0968c8SShahaf Shuler { 10363a0968c8SShahaf Shuler uint16_t pid = 0; 10373a0968c8SShahaf Shuler int ret; 10383a0968c8SShahaf Shuler 10393a0968c8SShahaf Shuler RTE_ETH_FOREACH_DEV(pid) { 10400a0821bcSPaulis Gributs struct rte_eth_dev_info dev_info; 10413a0968c8SShahaf Shuler 10420a0821bcSPaulis Gributs ret = eth_dev_info_get_print_err(pid, &dev_info); 10430a0821bcSPaulis Gributs if (ret != 0) { 10440a0821bcSPaulis Gributs TESTPMD_LOG(DEBUG, 10450a0821bcSPaulis Gributs "unable to get device info for port %d on addr 0x%p," 10460a0821bcSPaulis Gributs "mempool unmapping will not be performed\n", 10470a0821bcSPaulis Gributs pid, memhdr->addr); 10480a0821bcSPaulis Gributs continue; 10490a0821bcSPaulis Gributs } 10500a0821bcSPaulis Gributs 10510a0821bcSPaulis Gributs ret = rte_dev_dma_unmap(dev_info.device, memhdr->addr, 0, memhdr->len); 10523a0968c8SShahaf Shuler if (ret) { 10533a0968c8SShahaf Shuler TESTPMD_LOG(DEBUG, 10543a0968c8SShahaf Shuler "unable to DMA unmap addr 0x%p " 10553a0968c8SShahaf Shuler "for device %s\n", 1056ec5ecd7eSDavid Marchand memhdr->addr, rte_dev_name(dev_info.device)); 10573a0968c8SShahaf Shuler } 10583a0968c8SShahaf Shuler } 10593a0968c8SShahaf Shuler ret = rte_extmem_unregister(memhdr->addr, memhdr->len); 10603a0968c8SShahaf Shuler if (ret) { 10613a0968c8SShahaf Shuler TESTPMD_LOG(DEBUG, 10623a0968c8SShahaf Shuler "unable to un-register addr 0x%p\n", memhdr->addr); 10633a0968c8SShahaf Shuler } 10643a0968c8SShahaf Shuler } 10653a0968c8SShahaf Shuler 10663a0968c8SShahaf Shuler static void 10673a0968c8SShahaf Shuler dma_map_cb(struct rte_mempool *mp __rte_unused, void *opaque __rte_unused, 10683a0968c8SShahaf Shuler struct rte_mempool_memhdr *memhdr, unsigned mem_idx __rte_unused) 10693a0968c8SShahaf Shuler { 10703a0968c8SShahaf Shuler uint16_t pid = 0; 10713a0968c8SShahaf Shuler size_t page_size = sysconf(_SC_PAGESIZE); 10723a0968c8SShahaf Shuler int ret; 10733a0968c8SShahaf Shuler 10743a0968c8SShahaf Shuler ret = rte_extmem_register(memhdr->addr, memhdr->len, NULL, 0, 10753a0968c8SShahaf Shuler page_size); 10763a0968c8SShahaf Shuler if (ret) { 10773a0968c8SShahaf Shuler TESTPMD_LOG(DEBUG, 10783a0968c8SShahaf Shuler "unable to register addr 0x%p\n", memhdr->addr); 10793a0968c8SShahaf Shuler return; 10803a0968c8SShahaf Shuler } 10813a0968c8SShahaf Shuler RTE_ETH_FOREACH_DEV(pid) { 10820a0821bcSPaulis Gributs struct rte_eth_dev_info dev_info; 10833a0968c8SShahaf Shuler 10840a0821bcSPaulis Gributs ret = eth_dev_info_get_print_err(pid, &dev_info); 10850a0821bcSPaulis Gributs if (ret != 0) { 10860a0821bcSPaulis Gributs TESTPMD_LOG(DEBUG, 10870a0821bcSPaulis Gributs "unable to get device info for port %d on addr 0x%p," 10880a0821bcSPaulis Gributs "mempool mapping will not be performed\n", 10890a0821bcSPaulis Gributs pid, memhdr->addr); 10900a0821bcSPaulis Gributs continue; 10910a0821bcSPaulis Gributs } 10920a0821bcSPaulis Gributs ret = rte_dev_dma_map(dev_info.device, memhdr->addr, 0, memhdr->len); 10933a0968c8SShahaf Shuler if (ret) { 10943a0968c8SShahaf Shuler TESTPMD_LOG(DEBUG, 10953a0968c8SShahaf Shuler "unable to DMA map addr 0x%p " 10963a0968c8SShahaf Shuler "for device %s\n", 1097ec5ecd7eSDavid Marchand memhdr->addr, rte_dev_name(dev_info.device)); 10983a0968c8SShahaf Shuler } 10993a0968c8SShahaf Shuler } 11003a0968c8SShahaf Shuler } 1101761f7ae1SJie Zhou #endif 1102c7f5dba7SAnatoly Burakov 110372512e18SViacheslav Ovsiienko static unsigned int 110472512e18SViacheslav Ovsiienko setup_extbuf(uint32_t nb_mbufs, uint16_t mbuf_sz, unsigned int socket_id, 110572512e18SViacheslav Ovsiienko char *pool_name, struct rte_pktmbuf_extmem **ext_mem) 110672512e18SViacheslav Ovsiienko { 110772512e18SViacheslav Ovsiienko struct rte_pktmbuf_extmem *xmem; 110872512e18SViacheslav Ovsiienko unsigned int ext_num, zone_num, elt_num; 110972512e18SViacheslav Ovsiienko uint16_t elt_size; 111072512e18SViacheslav Ovsiienko 111172512e18SViacheslav Ovsiienko elt_size = RTE_ALIGN_CEIL(mbuf_sz, RTE_CACHE_LINE_SIZE); 111272512e18SViacheslav Ovsiienko elt_num = EXTBUF_ZONE_SIZE / elt_size; 111372512e18SViacheslav Ovsiienko zone_num = (nb_mbufs + elt_num - 1) / elt_num; 111472512e18SViacheslav Ovsiienko 111572512e18SViacheslav Ovsiienko xmem = malloc(sizeof(struct rte_pktmbuf_extmem) * zone_num); 111672512e18SViacheslav Ovsiienko if (xmem == NULL) { 111772512e18SViacheslav Ovsiienko TESTPMD_LOG(ERR, "Cannot allocate memory for " 111872512e18SViacheslav Ovsiienko "external buffer descriptors\n"); 111972512e18SViacheslav Ovsiienko *ext_mem = NULL; 112072512e18SViacheslav Ovsiienko return 0; 112172512e18SViacheslav Ovsiienko } 112272512e18SViacheslav Ovsiienko for (ext_num = 0; ext_num < zone_num; ext_num++) { 112372512e18SViacheslav Ovsiienko struct rte_pktmbuf_extmem *xseg = xmem + ext_num; 112472512e18SViacheslav Ovsiienko const struct rte_memzone *mz; 112572512e18SViacheslav Ovsiienko char mz_name[RTE_MEMZONE_NAMESIZE]; 112672512e18SViacheslav Ovsiienko int ret; 112772512e18SViacheslav Ovsiienko 112872512e18SViacheslav Ovsiienko ret = snprintf(mz_name, sizeof(mz_name), 112972512e18SViacheslav Ovsiienko RTE_MEMPOOL_MZ_FORMAT "_xb_%u", pool_name, ext_num); 113072512e18SViacheslav Ovsiienko if (ret < 0 || ret >= (int)sizeof(mz_name)) { 113172512e18SViacheslav Ovsiienko errno = ENAMETOOLONG; 113272512e18SViacheslav Ovsiienko ext_num = 0; 113372512e18SViacheslav Ovsiienko break; 113472512e18SViacheslav Ovsiienko } 113513b19642SDmitry Kozlyuk mz = rte_memzone_reserve(mz_name, EXTBUF_ZONE_SIZE, 113672512e18SViacheslav Ovsiienko socket_id, 113772512e18SViacheslav Ovsiienko RTE_MEMZONE_IOVA_CONTIG | 113872512e18SViacheslav Ovsiienko RTE_MEMZONE_1GB | 113913b19642SDmitry Kozlyuk RTE_MEMZONE_SIZE_HINT_ONLY); 114072512e18SViacheslav Ovsiienko if (mz == NULL) { 114172512e18SViacheslav Ovsiienko /* 114272512e18SViacheslav Ovsiienko * The caller exits on external buffer creation 114372512e18SViacheslav Ovsiienko * error, so there is no need to free memzones. 114472512e18SViacheslav Ovsiienko */ 114572512e18SViacheslav Ovsiienko errno = ENOMEM; 114672512e18SViacheslav Ovsiienko ext_num = 0; 114772512e18SViacheslav Ovsiienko break; 114872512e18SViacheslav Ovsiienko } 114972512e18SViacheslav Ovsiienko xseg->buf_ptr = mz->addr; 115072512e18SViacheslav Ovsiienko xseg->buf_iova = mz->iova; 115172512e18SViacheslav Ovsiienko xseg->buf_len = EXTBUF_ZONE_SIZE; 115272512e18SViacheslav Ovsiienko xseg->elt_size = elt_size; 115372512e18SViacheslav Ovsiienko } 115472512e18SViacheslav Ovsiienko if (ext_num == 0 && xmem != NULL) { 115572512e18SViacheslav Ovsiienko free(xmem); 115672512e18SViacheslav Ovsiienko xmem = NULL; 115772512e18SViacheslav Ovsiienko } 115872512e18SViacheslav Ovsiienko *ext_mem = xmem; 115972512e18SViacheslav Ovsiienko return ext_num; 116072512e18SViacheslav Ovsiienko } 116172512e18SViacheslav Ovsiienko 1162af75078fSIntel /* 1163af75078fSIntel * Configuration initialisation done once at init time. 1164af75078fSIntel */ 1165401b744dSShahaf Shuler static struct rte_mempool * 1166af75078fSIntel mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf, 116726cbb419SViacheslav Ovsiienko unsigned int socket_id, uint16_t size_idx) 1168af75078fSIntel { 1169af75078fSIntel char pool_name[RTE_MEMPOOL_NAMESIZE]; 1170bece7b6cSChristian Ehrhardt struct rte_mempool *rte_mp = NULL; 1171761f7ae1SJie Zhou #ifndef RTE_EXEC_ENV_WINDOWS 1172af75078fSIntel uint32_t mb_size; 1173af75078fSIntel 1174dfb03bbeSOlivier Matz mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size; 1175761f7ae1SJie Zhou #endif 117626cbb419SViacheslav Ovsiienko mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name), size_idx); 1177a550baf2SMin Hu (Connor) if (!is_proc_primary()) { 1178a550baf2SMin Hu (Connor) rte_mp = rte_mempool_lookup(pool_name); 1179a550baf2SMin Hu (Connor) if (rte_mp == NULL) 1180a550baf2SMin Hu (Connor) rte_exit(EXIT_FAILURE, 1181a550baf2SMin Hu (Connor) "Get mbuf pool for socket %u failed: %s\n", 1182a550baf2SMin Hu (Connor) socket_id, rte_strerror(rte_errno)); 1183a550baf2SMin Hu (Connor) return rte_mp; 1184a550baf2SMin Hu (Connor) } 1185148f963fSBruce Richardson 1186285fd101SOlivier Matz TESTPMD_LOG(INFO, 1187d1eb542eSOlivier Matz "create a new mbuf pool <%s>: n=%u, size=%u, socket=%u\n", 1188d1eb542eSOlivier Matz pool_name, nb_mbuf, mbuf_seg_size, socket_id); 1189d1eb542eSOlivier Matz 1190c7f5dba7SAnatoly Burakov switch (mp_alloc_type) { 1191c7f5dba7SAnatoly Burakov case MP_ALLOC_NATIVE: 1192c7f5dba7SAnatoly Burakov { 1193c7f5dba7SAnatoly Burakov /* wrapper to rte_mempool_create() */ 1194c7f5dba7SAnatoly Burakov TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n", 1195c7f5dba7SAnatoly Burakov rte_mbuf_best_mempool_ops()); 1196c7f5dba7SAnatoly Burakov rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf, 1197c7f5dba7SAnatoly Burakov mb_mempool_cache, 0, mbuf_seg_size, socket_id); 1198c7f5dba7SAnatoly Burakov break; 1199c7f5dba7SAnatoly Burakov } 1200761f7ae1SJie Zhou #ifndef RTE_EXEC_ENV_WINDOWS 1201c7f5dba7SAnatoly Burakov case MP_ALLOC_ANON: 1202c7f5dba7SAnatoly Burakov { 1203b19a0c75SOlivier Matz rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf, 1204c7f5dba7SAnatoly Burakov mb_size, (unsigned int) mb_mempool_cache, 1205148f963fSBruce Richardson sizeof(struct rte_pktmbuf_pool_private), 120659fcf854SShahaf Shuler socket_id, mempool_flags); 120724427bb9SOlivier Matz if (rte_mp == NULL) 120824427bb9SOlivier Matz goto err; 1209b19a0c75SOlivier Matz 1210b19a0c75SOlivier Matz if (rte_mempool_populate_anon(rte_mp) == 0) { 1211b19a0c75SOlivier Matz rte_mempool_free(rte_mp); 1212b19a0c75SOlivier Matz rte_mp = NULL; 121324427bb9SOlivier Matz goto err; 1214b19a0c75SOlivier Matz } 1215b19a0c75SOlivier Matz rte_pktmbuf_pool_init(rte_mp, NULL); 1216b19a0c75SOlivier Matz rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL); 12173a0968c8SShahaf Shuler rte_mempool_mem_iter(rte_mp, dma_map_cb, NULL); 1218c7f5dba7SAnatoly Burakov break; 1219c7f5dba7SAnatoly Burakov } 1220c7f5dba7SAnatoly Burakov case MP_ALLOC_XMEM: 1221c7f5dba7SAnatoly Burakov case MP_ALLOC_XMEM_HUGE: 1222c7f5dba7SAnatoly Burakov { 1223c7f5dba7SAnatoly Burakov int heap_socket; 1224c7f5dba7SAnatoly Burakov bool huge = mp_alloc_type == MP_ALLOC_XMEM_HUGE; 1225c7f5dba7SAnatoly Burakov 1226c7f5dba7SAnatoly Burakov if (setup_extmem(nb_mbuf, mbuf_seg_size, huge) < 0) 1227c7f5dba7SAnatoly Burakov rte_exit(EXIT_FAILURE, "Could not create external memory\n"); 1228c7f5dba7SAnatoly Burakov 1229c7f5dba7SAnatoly Burakov heap_socket = 1230c7f5dba7SAnatoly Burakov rte_malloc_heap_get_socket(EXTMEM_HEAP_NAME); 1231c7f5dba7SAnatoly Burakov if (heap_socket < 0) 1232c7f5dba7SAnatoly Burakov rte_exit(EXIT_FAILURE, "Could not get external memory socket ID\n"); 1233c7f5dba7SAnatoly Burakov 12340e798567SPavan Nikhilesh TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n", 12350e798567SPavan Nikhilesh rte_mbuf_best_mempool_ops()); 1236ea0c20eaSOlivier Matz rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf, 1237c7f5dba7SAnatoly Burakov mb_mempool_cache, 0, mbuf_seg_size, 1238c7f5dba7SAnatoly Burakov heap_socket); 1239c7f5dba7SAnatoly Burakov break; 1240c7f5dba7SAnatoly Burakov } 1241761f7ae1SJie Zhou #endif 124272512e18SViacheslav Ovsiienko case MP_ALLOC_XBUF: 124372512e18SViacheslav Ovsiienko { 124472512e18SViacheslav Ovsiienko struct rte_pktmbuf_extmem *ext_mem; 124572512e18SViacheslav Ovsiienko unsigned int ext_num; 124672512e18SViacheslav Ovsiienko 124772512e18SViacheslav Ovsiienko ext_num = setup_extbuf(nb_mbuf, mbuf_seg_size, 124872512e18SViacheslav Ovsiienko socket_id, pool_name, &ext_mem); 124972512e18SViacheslav Ovsiienko if (ext_num == 0) 125072512e18SViacheslav Ovsiienko rte_exit(EXIT_FAILURE, 125172512e18SViacheslav Ovsiienko "Can't create pinned data buffers\n"); 125272512e18SViacheslav Ovsiienko 125372512e18SViacheslav Ovsiienko TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n", 125472512e18SViacheslav Ovsiienko rte_mbuf_best_mempool_ops()); 125572512e18SViacheslav Ovsiienko rte_mp = rte_pktmbuf_pool_create_extbuf 125672512e18SViacheslav Ovsiienko (pool_name, nb_mbuf, mb_mempool_cache, 125772512e18SViacheslav Ovsiienko 0, mbuf_seg_size, socket_id, 125872512e18SViacheslav Ovsiienko ext_mem, ext_num); 125972512e18SViacheslav Ovsiienko free(ext_mem); 126072512e18SViacheslav Ovsiienko break; 126172512e18SViacheslav Ovsiienko } 1262c7f5dba7SAnatoly Burakov default: 1263c7f5dba7SAnatoly Burakov { 1264c7f5dba7SAnatoly Burakov rte_exit(EXIT_FAILURE, "Invalid mempool creation mode\n"); 1265c7f5dba7SAnatoly Burakov } 1266bece7b6cSChristian Ehrhardt } 1267148f963fSBruce Richardson 1268761f7ae1SJie Zhou #ifndef RTE_EXEC_ENV_WINDOWS 126924427bb9SOlivier Matz err: 1270761f7ae1SJie Zhou #endif 1271af75078fSIntel if (rte_mp == NULL) { 1272d1eb542eSOlivier Matz rte_exit(EXIT_FAILURE, 1273d1eb542eSOlivier Matz "Creation of mbuf pool for socket %u failed: %s\n", 1274d1eb542eSOlivier Matz socket_id, rte_strerror(rte_errno)); 1275148f963fSBruce Richardson } else if (verbose_level > 0) { 1276591a9d79SStephen Hemminger rte_mempool_dump(stdout, rte_mp); 1277af75078fSIntel } 1278401b744dSShahaf Shuler return rte_mp; 1279af75078fSIntel } 1280af75078fSIntel 128120a0286fSLiu Xiaofeng /* 128220a0286fSLiu Xiaofeng * Check given socket id is valid or not with NUMA mode, 128320a0286fSLiu Xiaofeng * if valid, return 0, else return -1 128420a0286fSLiu Xiaofeng */ 128520a0286fSLiu Xiaofeng static int 128620a0286fSLiu Xiaofeng check_socket_id(const unsigned int socket_id) 128720a0286fSLiu Xiaofeng { 128820a0286fSLiu Xiaofeng static int warning_once = 0; 128920a0286fSLiu Xiaofeng 1290c9cafcc8SShahaf Shuler if (new_socket_id(socket_id)) { 129120a0286fSLiu Xiaofeng if (!warning_once && numa_support) 129261a3b0e5SAndrew Rybchenko fprintf(stderr, 129361a3b0e5SAndrew Rybchenko "Warning: NUMA should be configured manually by using --port-numa-config and --ring-numa-config parameters along with --numa.\n"); 129420a0286fSLiu Xiaofeng warning_once = 1; 129520a0286fSLiu Xiaofeng return -1; 129620a0286fSLiu Xiaofeng } 129720a0286fSLiu Xiaofeng return 0; 129820a0286fSLiu Xiaofeng } 129920a0286fSLiu Xiaofeng 13003f7311baSWei Dai /* 13013f7311baSWei Dai * Get the allowed maximum number of RX queues. 13023f7311baSWei Dai * *pid return the port id which has minimal value of 13033f7311baSWei Dai * max_rx_queues in all ports. 13043f7311baSWei Dai */ 13053f7311baSWei Dai queueid_t 13063f7311baSWei Dai get_allowed_max_nb_rxq(portid_t *pid) 13073f7311baSWei Dai { 13089e6b36c3SDavid Marchand queueid_t allowed_max_rxq = RTE_MAX_QUEUES_PER_PORT; 13096f51deb9SIvan Ilchenko bool max_rxq_valid = false; 13103f7311baSWei Dai portid_t pi; 13113f7311baSWei Dai struct rte_eth_dev_info dev_info; 13123f7311baSWei Dai 13133f7311baSWei Dai RTE_ETH_FOREACH_DEV(pi) { 13146f51deb9SIvan Ilchenko if (eth_dev_info_get_print_err(pi, &dev_info) != 0) 13156f51deb9SIvan Ilchenko continue; 13166f51deb9SIvan Ilchenko 13176f51deb9SIvan Ilchenko max_rxq_valid = true; 13183f7311baSWei Dai if (dev_info.max_rx_queues < allowed_max_rxq) { 13193f7311baSWei Dai allowed_max_rxq = dev_info.max_rx_queues; 13203f7311baSWei Dai *pid = pi; 13213f7311baSWei Dai } 13223f7311baSWei Dai } 13236f51deb9SIvan Ilchenko return max_rxq_valid ? allowed_max_rxq : 0; 13243f7311baSWei Dai } 13253f7311baSWei Dai 13263f7311baSWei Dai /* 13273f7311baSWei Dai * Check input rxq is valid or not. 13283f7311baSWei Dai * If input rxq is not greater than any of maximum number 13293f7311baSWei Dai * of RX queues of all ports, it is valid. 13303f7311baSWei Dai * if valid, return 0, else return -1 13313f7311baSWei Dai */ 13323f7311baSWei Dai int 13333f7311baSWei Dai check_nb_rxq(queueid_t rxq) 13343f7311baSWei Dai { 13353f7311baSWei Dai queueid_t allowed_max_rxq; 13363f7311baSWei Dai portid_t pid = 0; 13373f7311baSWei Dai 13383f7311baSWei Dai allowed_max_rxq = get_allowed_max_nb_rxq(&pid); 13393f7311baSWei Dai if (rxq > allowed_max_rxq) { 134061a3b0e5SAndrew Rybchenko fprintf(stderr, 134161a3b0e5SAndrew Rybchenko "Fail: input rxq (%u) can't be greater than max_rx_queues (%u) of port %u\n", 134261a3b0e5SAndrew Rybchenko rxq, allowed_max_rxq, pid); 13433f7311baSWei Dai return -1; 13443f7311baSWei Dai } 13453f7311baSWei Dai return 0; 13463f7311baSWei Dai } 13473f7311baSWei Dai 134836db4f6cSWei Dai /* 134936db4f6cSWei Dai * Get the allowed maximum number of TX queues. 135036db4f6cSWei Dai * *pid return the port id which has minimal value of 135136db4f6cSWei Dai * max_tx_queues in all ports. 135236db4f6cSWei Dai */ 135336db4f6cSWei Dai queueid_t 135436db4f6cSWei Dai get_allowed_max_nb_txq(portid_t *pid) 135536db4f6cSWei Dai { 13569e6b36c3SDavid Marchand queueid_t allowed_max_txq = RTE_MAX_QUEUES_PER_PORT; 13576f51deb9SIvan Ilchenko bool max_txq_valid = false; 135836db4f6cSWei Dai portid_t pi; 135936db4f6cSWei Dai struct rte_eth_dev_info dev_info; 136036db4f6cSWei Dai 136136db4f6cSWei Dai RTE_ETH_FOREACH_DEV(pi) { 13626f51deb9SIvan Ilchenko if (eth_dev_info_get_print_err(pi, &dev_info) != 0) 13636f51deb9SIvan Ilchenko continue; 13646f51deb9SIvan Ilchenko 13656f51deb9SIvan Ilchenko max_txq_valid = true; 136636db4f6cSWei Dai if (dev_info.max_tx_queues < allowed_max_txq) { 136736db4f6cSWei Dai allowed_max_txq = dev_info.max_tx_queues; 136836db4f6cSWei Dai *pid = pi; 136936db4f6cSWei Dai } 137036db4f6cSWei Dai } 13716f51deb9SIvan Ilchenko return max_txq_valid ? allowed_max_txq : 0; 137236db4f6cSWei Dai } 137336db4f6cSWei Dai 137436db4f6cSWei Dai /* 137536db4f6cSWei Dai * Check input txq is valid or not. 137636db4f6cSWei Dai * If input txq is not greater than any of maximum number 137736db4f6cSWei Dai * of TX queues of all ports, it is valid. 137836db4f6cSWei Dai * if valid, return 0, else return -1 137936db4f6cSWei Dai */ 138036db4f6cSWei Dai int 138136db4f6cSWei Dai check_nb_txq(queueid_t txq) 138236db4f6cSWei Dai { 138336db4f6cSWei Dai queueid_t allowed_max_txq; 138436db4f6cSWei Dai portid_t pid = 0; 138536db4f6cSWei Dai 138636db4f6cSWei Dai allowed_max_txq = get_allowed_max_nb_txq(&pid); 138736db4f6cSWei Dai if (txq > allowed_max_txq) { 138861a3b0e5SAndrew Rybchenko fprintf(stderr, 138961a3b0e5SAndrew Rybchenko "Fail: input txq (%u) can't be greater than max_tx_queues (%u) of port %u\n", 139061a3b0e5SAndrew Rybchenko txq, allowed_max_txq, pid); 139136db4f6cSWei Dai return -1; 139236db4f6cSWei Dai } 139336db4f6cSWei Dai return 0; 139436db4f6cSWei Dai } 139536db4f6cSWei Dai 13961c69df45SOri Kam /* 139799e040d3SLijun Ou * Get the allowed maximum number of RXDs of every rx queue. 139899e040d3SLijun Ou * *pid return the port id which has minimal value of 139999e040d3SLijun Ou * max_rxd in all queues of all ports. 140099e040d3SLijun Ou */ 140199e040d3SLijun Ou static uint16_t 140299e040d3SLijun Ou get_allowed_max_nb_rxd(portid_t *pid) 140399e040d3SLijun Ou { 140499e040d3SLijun Ou uint16_t allowed_max_rxd = UINT16_MAX; 140599e040d3SLijun Ou portid_t pi; 140699e040d3SLijun Ou struct rte_eth_dev_info dev_info; 140799e040d3SLijun Ou 140899e040d3SLijun Ou RTE_ETH_FOREACH_DEV(pi) { 140999e040d3SLijun Ou if (eth_dev_info_get_print_err(pi, &dev_info) != 0) 141099e040d3SLijun Ou continue; 141199e040d3SLijun Ou 141299e040d3SLijun Ou if (dev_info.rx_desc_lim.nb_max < allowed_max_rxd) { 141399e040d3SLijun Ou allowed_max_rxd = dev_info.rx_desc_lim.nb_max; 141499e040d3SLijun Ou *pid = pi; 141599e040d3SLijun Ou } 141699e040d3SLijun Ou } 141799e040d3SLijun Ou return allowed_max_rxd; 141899e040d3SLijun Ou } 141999e040d3SLijun Ou 142099e040d3SLijun Ou /* 142199e040d3SLijun Ou * Get the allowed minimal number of RXDs of every rx queue. 142299e040d3SLijun Ou * *pid return the port id which has minimal value of 142399e040d3SLijun Ou * min_rxd in all queues of all ports. 142499e040d3SLijun Ou */ 142599e040d3SLijun Ou static uint16_t 142699e040d3SLijun Ou get_allowed_min_nb_rxd(portid_t *pid) 142799e040d3SLijun Ou { 142899e040d3SLijun Ou uint16_t allowed_min_rxd = 0; 142999e040d3SLijun Ou portid_t pi; 143099e040d3SLijun Ou struct rte_eth_dev_info dev_info; 143199e040d3SLijun Ou 143299e040d3SLijun Ou RTE_ETH_FOREACH_DEV(pi) { 143399e040d3SLijun Ou if (eth_dev_info_get_print_err(pi, &dev_info) != 0) 143499e040d3SLijun Ou continue; 143599e040d3SLijun Ou 143699e040d3SLijun Ou if (dev_info.rx_desc_lim.nb_min > allowed_min_rxd) { 143799e040d3SLijun Ou allowed_min_rxd = dev_info.rx_desc_lim.nb_min; 143899e040d3SLijun Ou *pid = pi; 143999e040d3SLijun Ou } 144099e040d3SLijun Ou } 144199e040d3SLijun Ou 144299e040d3SLijun Ou return allowed_min_rxd; 144399e040d3SLijun Ou } 144499e040d3SLijun Ou 144599e040d3SLijun Ou /* 144699e040d3SLijun Ou * Check input rxd is valid or not. 144799e040d3SLijun Ou * If input rxd is not greater than any of maximum number 144899e040d3SLijun Ou * of RXDs of every Rx queues and is not less than any of 144999e040d3SLijun Ou * minimal number of RXDs of every Rx queues, it is valid. 145099e040d3SLijun Ou * if valid, return 0, else return -1 145199e040d3SLijun Ou */ 145299e040d3SLijun Ou int 145399e040d3SLijun Ou check_nb_rxd(queueid_t rxd) 145499e040d3SLijun Ou { 145599e040d3SLijun Ou uint16_t allowed_max_rxd; 145699e040d3SLijun Ou uint16_t allowed_min_rxd; 145799e040d3SLijun Ou portid_t pid = 0; 145899e040d3SLijun Ou 145999e040d3SLijun Ou allowed_max_rxd = get_allowed_max_nb_rxd(&pid); 146099e040d3SLijun Ou if (rxd > allowed_max_rxd) { 146161a3b0e5SAndrew Rybchenko fprintf(stderr, 146261a3b0e5SAndrew Rybchenko "Fail: input rxd (%u) can't be greater than max_rxds (%u) of port %u\n", 146361a3b0e5SAndrew Rybchenko rxd, allowed_max_rxd, pid); 146499e040d3SLijun Ou return -1; 146599e040d3SLijun Ou } 146699e040d3SLijun Ou 146799e040d3SLijun Ou allowed_min_rxd = get_allowed_min_nb_rxd(&pid); 146899e040d3SLijun Ou if (rxd < allowed_min_rxd) { 146961a3b0e5SAndrew Rybchenko fprintf(stderr, 147061a3b0e5SAndrew Rybchenko "Fail: input rxd (%u) can't be less than min_rxds (%u) of port %u\n", 147161a3b0e5SAndrew Rybchenko rxd, allowed_min_rxd, pid); 147299e040d3SLijun Ou return -1; 147399e040d3SLijun Ou } 147499e040d3SLijun Ou 147599e040d3SLijun Ou return 0; 147699e040d3SLijun Ou } 147799e040d3SLijun Ou 147899e040d3SLijun Ou /* 147999e040d3SLijun Ou * Get the allowed maximum number of TXDs of every rx queues. 148099e040d3SLijun Ou * *pid return the port id which has minimal value of 148199e040d3SLijun Ou * max_txd in every tx queue. 148299e040d3SLijun Ou */ 148399e040d3SLijun Ou static uint16_t 148499e040d3SLijun Ou get_allowed_max_nb_txd(portid_t *pid) 148599e040d3SLijun Ou { 148699e040d3SLijun Ou uint16_t allowed_max_txd = UINT16_MAX; 148799e040d3SLijun Ou portid_t pi; 148899e040d3SLijun Ou struct rte_eth_dev_info dev_info; 148999e040d3SLijun Ou 149099e040d3SLijun Ou RTE_ETH_FOREACH_DEV(pi) { 149199e040d3SLijun Ou if (eth_dev_info_get_print_err(pi, &dev_info) != 0) 149299e040d3SLijun Ou continue; 149399e040d3SLijun Ou 149499e040d3SLijun Ou if (dev_info.tx_desc_lim.nb_max < allowed_max_txd) { 149599e040d3SLijun Ou allowed_max_txd = dev_info.tx_desc_lim.nb_max; 149699e040d3SLijun Ou *pid = pi; 149799e040d3SLijun Ou } 149899e040d3SLijun Ou } 149999e040d3SLijun Ou return allowed_max_txd; 150099e040d3SLijun Ou } 150199e040d3SLijun Ou 150299e040d3SLijun Ou /* 150399e040d3SLijun Ou * Get the allowed maximum number of TXDs of every tx queues. 150499e040d3SLijun Ou * *pid return the port id which has minimal value of 150599e040d3SLijun Ou * min_txd in every tx queue. 150699e040d3SLijun Ou */ 150799e040d3SLijun Ou static uint16_t 150899e040d3SLijun Ou get_allowed_min_nb_txd(portid_t *pid) 150999e040d3SLijun Ou { 151099e040d3SLijun Ou uint16_t allowed_min_txd = 0; 151199e040d3SLijun Ou portid_t pi; 151299e040d3SLijun Ou struct rte_eth_dev_info dev_info; 151399e040d3SLijun Ou 151499e040d3SLijun Ou RTE_ETH_FOREACH_DEV(pi) { 151599e040d3SLijun Ou if (eth_dev_info_get_print_err(pi, &dev_info) != 0) 151699e040d3SLijun Ou continue; 151799e040d3SLijun Ou 151899e040d3SLijun Ou if (dev_info.tx_desc_lim.nb_min > allowed_min_txd) { 151999e040d3SLijun Ou allowed_min_txd = dev_info.tx_desc_lim.nb_min; 152099e040d3SLijun Ou *pid = pi; 152199e040d3SLijun Ou } 152299e040d3SLijun Ou } 152399e040d3SLijun Ou 152499e040d3SLijun Ou return allowed_min_txd; 152599e040d3SLijun Ou } 152699e040d3SLijun Ou 152799e040d3SLijun Ou /* 152899e040d3SLijun Ou * Check input txd is valid or not. 152999e040d3SLijun Ou * If input txd is not greater than any of maximum number 153099e040d3SLijun Ou * of TXDs of every Rx queues, it is valid. 153199e040d3SLijun Ou * if valid, return 0, else return -1 153299e040d3SLijun Ou */ 153399e040d3SLijun Ou int 153499e040d3SLijun Ou check_nb_txd(queueid_t txd) 153599e040d3SLijun Ou { 153699e040d3SLijun Ou uint16_t allowed_max_txd; 153799e040d3SLijun Ou uint16_t allowed_min_txd; 153899e040d3SLijun Ou portid_t pid = 0; 153999e040d3SLijun Ou 154099e040d3SLijun Ou allowed_max_txd = get_allowed_max_nb_txd(&pid); 154199e040d3SLijun Ou if (txd > allowed_max_txd) { 154261a3b0e5SAndrew Rybchenko fprintf(stderr, 154361a3b0e5SAndrew Rybchenko "Fail: input txd (%u) can't be greater than max_txds (%u) of port %u\n", 154461a3b0e5SAndrew Rybchenko txd, allowed_max_txd, pid); 154599e040d3SLijun Ou return -1; 154699e040d3SLijun Ou } 154799e040d3SLijun Ou 154899e040d3SLijun Ou allowed_min_txd = get_allowed_min_nb_txd(&pid); 154999e040d3SLijun Ou if (txd < allowed_min_txd) { 155061a3b0e5SAndrew Rybchenko fprintf(stderr, 155161a3b0e5SAndrew Rybchenko "Fail: input txd (%u) can't be less than min_txds (%u) of port %u\n", 155261a3b0e5SAndrew Rybchenko txd, allowed_min_txd, pid); 155399e040d3SLijun Ou return -1; 155499e040d3SLijun Ou } 155599e040d3SLijun Ou return 0; 155699e040d3SLijun Ou } 155799e040d3SLijun Ou 155899e040d3SLijun Ou 155999e040d3SLijun Ou /* 15601c69df45SOri Kam * Get the allowed maximum number of hairpin queues. 15611c69df45SOri Kam * *pid return the port id which has minimal value of 15621c69df45SOri Kam * max_hairpin_queues in all ports. 15631c69df45SOri Kam */ 15641c69df45SOri Kam queueid_t 15651c69df45SOri Kam get_allowed_max_nb_hairpinq(portid_t *pid) 15661c69df45SOri Kam { 15679e6b36c3SDavid Marchand queueid_t allowed_max_hairpinq = RTE_MAX_QUEUES_PER_PORT; 15681c69df45SOri Kam portid_t pi; 15691c69df45SOri Kam struct rte_eth_hairpin_cap cap; 15701c69df45SOri Kam 15711c69df45SOri Kam RTE_ETH_FOREACH_DEV(pi) { 15721c69df45SOri Kam if (rte_eth_dev_hairpin_capability_get(pi, &cap) != 0) { 15731c69df45SOri Kam *pid = pi; 15741c69df45SOri Kam return 0; 15751c69df45SOri Kam } 15761c69df45SOri Kam if (cap.max_nb_queues < allowed_max_hairpinq) { 15771c69df45SOri Kam allowed_max_hairpinq = cap.max_nb_queues; 15781c69df45SOri Kam *pid = pi; 15791c69df45SOri Kam } 15801c69df45SOri Kam } 15811c69df45SOri Kam return allowed_max_hairpinq; 15821c69df45SOri Kam } 15831c69df45SOri Kam 15841c69df45SOri Kam /* 15851c69df45SOri Kam * Check input hairpin is valid or not. 15861c69df45SOri Kam * If input hairpin is not greater than any of maximum number 15871c69df45SOri Kam * of hairpin queues of all ports, it is valid. 15881c69df45SOri Kam * if valid, return 0, else return -1 15891c69df45SOri Kam */ 15901c69df45SOri Kam int 15911c69df45SOri Kam check_nb_hairpinq(queueid_t hairpinq) 15921c69df45SOri Kam { 15931c69df45SOri Kam queueid_t allowed_max_hairpinq; 15941c69df45SOri Kam portid_t pid = 0; 15951c69df45SOri Kam 15961c69df45SOri Kam allowed_max_hairpinq = get_allowed_max_nb_hairpinq(&pid); 15971c69df45SOri Kam if (hairpinq > allowed_max_hairpinq) { 159861a3b0e5SAndrew Rybchenko fprintf(stderr, 159961a3b0e5SAndrew Rybchenko "Fail: input hairpin (%u) can't be greater than max_hairpin_queues (%u) of port %u\n", 16001c69df45SOri Kam hairpinq, allowed_max_hairpinq, pid); 16011c69df45SOri Kam return -1; 16021c69df45SOri Kam } 16031c69df45SOri Kam return 0; 16041c69df45SOri Kam } 16051c69df45SOri Kam 16061bb4a528SFerruh Yigit static int 16071bb4a528SFerruh Yigit get_eth_overhead(struct rte_eth_dev_info *dev_info) 16081bb4a528SFerruh Yigit { 16091bb4a528SFerruh Yigit uint32_t eth_overhead; 16101bb4a528SFerruh Yigit 16111bb4a528SFerruh Yigit if (dev_info->max_mtu != UINT16_MAX && 16121bb4a528SFerruh Yigit dev_info->max_rx_pktlen > dev_info->max_mtu) 16131bb4a528SFerruh Yigit eth_overhead = dev_info->max_rx_pktlen - dev_info->max_mtu; 16141bb4a528SFerruh Yigit else 16151bb4a528SFerruh Yigit eth_overhead = RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN; 16161bb4a528SFerruh Yigit 16171bb4a528SFerruh Yigit return eth_overhead; 16181bb4a528SFerruh Yigit } 16191bb4a528SFerruh Yigit 1620af75078fSIntel static void 1621b6b8a1ebSViacheslav Ovsiienko init_config_port_offloads(portid_t pid, uint32_t socket_id) 1622b6b8a1ebSViacheslav Ovsiienko { 1623b6b8a1ebSViacheslav Ovsiienko struct rte_port *port = &ports[pid]; 1624b6b8a1ebSViacheslav Ovsiienko int ret; 1625b6b8a1ebSViacheslav Ovsiienko int i; 1626b6b8a1ebSViacheslav Ovsiienko 1627f6d8a6d3SIvan Malov eth_rx_metadata_negotiate_mp(pid); 1628f6d8a6d3SIvan Malov 1629b6b8a1ebSViacheslav Ovsiienko port->dev_conf.txmode = tx_mode; 1630b6b8a1ebSViacheslav Ovsiienko port->dev_conf.rxmode = rx_mode; 1631b6b8a1ebSViacheslav Ovsiienko 1632b6b8a1ebSViacheslav Ovsiienko ret = eth_dev_info_get_print_err(pid, &port->dev_info); 1633b6b8a1ebSViacheslav Ovsiienko if (ret != 0) 1634b6b8a1ebSViacheslav Ovsiienko rte_exit(EXIT_FAILURE, "rte_eth_dev_info_get() failed\n"); 1635b6b8a1ebSViacheslav Ovsiienko 1636295968d1SFerruh Yigit if (!(port->dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)) 1637b6b8a1ebSViacheslav Ovsiienko port->dev_conf.txmode.offloads &= 1638295968d1SFerruh Yigit ~RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE; 1639b6b8a1ebSViacheslav Ovsiienko 1640b6b8a1ebSViacheslav Ovsiienko /* Apply Rx offloads configuration */ 1641b6b8a1ebSViacheslav Ovsiienko for (i = 0; i < port->dev_info.max_rx_queues; i++) 16423c4426dbSDmitry Kozlyuk port->rxq[i].conf.offloads = port->dev_conf.rxmode.offloads; 1643b6b8a1ebSViacheslav Ovsiienko /* Apply Tx offloads configuration */ 1644b6b8a1ebSViacheslav Ovsiienko for (i = 0; i < port->dev_info.max_tx_queues; i++) 16453c4426dbSDmitry Kozlyuk port->txq[i].conf.offloads = port->dev_conf.txmode.offloads; 1646b6b8a1ebSViacheslav Ovsiienko 1647b6b8a1ebSViacheslav Ovsiienko if (eth_link_speed) 1648b6b8a1ebSViacheslav Ovsiienko port->dev_conf.link_speeds = eth_link_speed; 1649b6b8a1ebSViacheslav Ovsiienko 16501bb4a528SFerruh Yigit if (max_rx_pkt_len) 16511bb4a528SFerruh Yigit port->dev_conf.rxmode.mtu = max_rx_pkt_len - 16521bb4a528SFerruh Yigit get_eth_overhead(&port->dev_info); 16531bb4a528SFerruh Yigit 1654b6b8a1ebSViacheslav Ovsiienko /* set flag to initialize port/queue */ 1655b6b8a1ebSViacheslav Ovsiienko port->need_reconfig = 1; 1656b6b8a1ebSViacheslav Ovsiienko port->need_reconfig_queues = 1; 1657b6b8a1ebSViacheslav Ovsiienko port->socket_id = socket_id; 1658b6b8a1ebSViacheslav Ovsiienko port->tx_metadata = 0; 1659b6b8a1ebSViacheslav Ovsiienko 1660b6b8a1ebSViacheslav Ovsiienko /* 1661b6b8a1ebSViacheslav Ovsiienko * Check for maximum number of segments per MTU. 1662b6b8a1ebSViacheslav Ovsiienko * Accordingly update the mbuf data size. 1663b6b8a1ebSViacheslav Ovsiienko */ 1664b6b8a1ebSViacheslav Ovsiienko if (port->dev_info.rx_desc_lim.nb_mtu_seg_max != UINT16_MAX && 1665b6b8a1ebSViacheslav Ovsiienko port->dev_info.rx_desc_lim.nb_mtu_seg_max != 0) { 16661bb4a528SFerruh Yigit uint32_t eth_overhead = get_eth_overhead(&port->dev_info); 16671bb4a528SFerruh Yigit uint16_t mtu; 1668b6b8a1ebSViacheslav Ovsiienko 16691bb4a528SFerruh Yigit if (rte_eth_dev_get_mtu(pid, &mtu) == 0) { 16701bb4a528SFerruh Yigit uint16_t data_size = (mtu + eth_overhead) / 16711bb4a528SFerruh Yigit port->dev_info.rx_desc_lim.nb_mtu_seg_max; 16721bb4a528SFerruh Yigit uint16_t buffer_size = data_size + RTE_PKTMBUF_HEADROOM; 16731bb4a528SFerruh Yigit 16741bb4a528SFerruh Yigit if (buffer_size > mbuf_data_size[0]) { 16751bb4a528SFerruh Yigit mbuf_data_size[0] = buffer_size; 1676b6b8a1ebSViacheslav Ovsiienko TESTPMD_LOG(WARNING, 1677b6b8a1ebSViacheslav Ovsiienko "Configured mbuf size of the first segment %hu\n", 1678b6b8a1ebSViacheslav Ovsiienko mbuf_data_size[0]); 1679b6b8a1ebSViacheslav Ovsiienko } 1680b6b8a1ebSViacheslav Ovsiienko } 1681b6b8a1ebSViacheslav Ovsiienko } 16821bb4a528SFerruh Yigit } 1683b6b8a1ebSViacheslav Ovsiienko 1684b6b8a1ebSViacheslav Ovsiienko static void 1685af75078fSIntel init_config(void) 1686af75078fSIntel { 1687ce8d5614SIntel portid_t pid; 1688af75078fSIntel struct rte_mempool *mbp; 1689af75078fSIntel unsigned int nb_mbuf_per_pool; 1690af75078fSIntel lcoreid_t lc_id; 16916970401eSDavid Marchand #ifdef RTE_LIB_GRO 1692b7091f1dSJiayu Hu struct rte_gro_param gro_param; 16936970401eSDavid Marchand #endif 16946970401eSDavid Marchand #ifdef RTE_LIB_GSO 169552f38a20SJiayu Hu uint32_t gso_types; 16966970401eSDavid Marchand #endif 1697487f9a59SYulong Pei 1698af75078fSIntel /* Configuration of logical cores. */ 1699af75078fSIntel fwd_lcores = rte_zmalloc("testpmd: fwd_lcores", 1700af75078fSIntel sizeof(struct fwd_lcore *) * nb_lcores, 1701fdf20fa7SSergio Gonzalez Monroy RTE_CACHE_LINE_SIZE); 1702af75078fSIntel if (fwd_lcores == NULL) { 1703ce8d5614SIntel rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) " 1704ce8d5614SIntel "failed\n", nb_lcores); 1705af75078fSIntel } 1706af75078fSIntel for (lc_id = 0; lc_id < nb_lcores; lc_id++) { 1707af75078fSIntel fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore", 1708af75078fSIntel sizeof(struct fwd_lcore), 1709fdf20fa7SSergio Gonzalez Monroy RTE_CACHE_LINE_SIZE); 1710af75078fSIntel if (fwd_lcores[lc_id] == NULL) { 1711ce8d5614SIntel rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) " 1712ce8d5614SIntel "failed\n"); 1713af75078fSIntel } 1714af75078fSIntel fwd_lcores[lc_id]->cpuid_idx = lc_id; 1715af75078fSIntel } 1716af75078fSIntel 17177d89b261SGaetan Rivet RTE_ETH_FOREACH_DEV(pid) { 1718b6b8a1ebSViacheslav Ovsiienko uint32_t socket_id; 17196f51deb9SIvan Ilchenko 1720b6ea6408SIntel if (numa_support) { 1721b6b8a1ebSViacheslav Ovsiienko socket_id = port_numa[pid]; 1722b6b8a1ebSViacheslav Ovsiienko if (port_numa[pid] == NUMA_NO_CONFIG) { 1723b6b8a1ebSViacheslav Ovsiienko socket_id = rte_eth_dev_socket_id(pid); 172420a0286fSLiu Xiaofeng 172529841336SPhil Yang /* 172629841336SPhil Yang * if socket_id is invalid, 172729841336SPhil Yang * set to the first available socket. 172829841336SPhil Yang */ 172920a0286fSLiu Xiaofeng if (check_socket_id(socket_id) < 0) 173029841336SPhil Yang socket_id = socket_ids[0]; 1731b6ea6408SIntel } 1732b6b8a1ebSViacheslav Ovsiienko } else { 1733b6b8a1ebSViacheslav Ovsiienko socket_id = (socket_num == UMA_NO_CONFIG) ? 1734b6b8a1ebSViacheslav Ovsiienko 0 : socket_num; 1735b6ea6408SIntel } 1736b6b8a1ebSViacheslav Ovsiienko /* Apply default TxRx configuration for all ports */ 1737b6b8a1ebSViacheslav Ovsiienko init_config_port_offloads(pid, socket_id); 1738ce8d5614SIntel } 17393ab64341SOlivier Matz /* 17403ab64341SOlivier Matz * Create pools of mbuf. 17413ab64341SOlivier Matz * If NUMA support is disabled, create a single pool of mbuf in 17423ab64341SOlivier Matz * socket 0 memory by default. 17433ab64341SOlivier Matz * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1. 17443ab64341SOlivier Matz * 17453ab64341SOlivier Matz * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and 17463ab64341SOlivier Matz * nb_txd can be configured at run time. 17473ab64341SOlivier Matz */ 17483ab64341SOlivier Matz if (param_total_num_mbufs) 17493ab64341SOlivier Matz nb_mbuf_per_pool = param_total_num_mbufs; 17503ab64341SOlivier Matz else { 17514ed89049SDavid Marchand nb_mbuf_per_pool = RX_DESC_MAX + 17523ab64341SOlivier Matz (nb_lcores * mb_mempool_cache) + 17534ed89049SDavid Marchand TX_DESC_MAX + MAX_PKT_BURST; 17543ab64341SOlivier Matz nb_mbuf_per_pool *= RTE_MAX_ETHPORTS; 17553ab64341SOlivier Matz } 17563ab64341SOlivier Matz 1757b6ea6408SIntel if (numa_support) { 175826cbb419SViacheslav Ovsiienko uint8_t i, j; 1759ce8d5614SIntel 1760c9cafcc8SShahaf Shuler for (i = 0; i < num_sockets; i++) 176126cbb419SViacheslav Ovsiienko for (j = 0; j < mbuf_data_size_n; j++) 176226cbb419SViacheslav Ovsiienko mempools[i * MAX_SEGS_BUFFER_SPLIT + j] = 176326cbb419SViacheslav Ovsiienko mbuf_pool_create(mbuf_data_size[j], 1764401b744dSShahaf Shuler nb_mbuf_per_pool, 176526cbb419SViacheslav Ovsiienko socket_ids[i], j); 17663ab64341SOlivier Matz } else { 176726cbb419SViacheslav Ovsiienko uint8_t i; 176826cbb419SViacheslav Ovsiienko 176926cbb419SViacheslav Ovsiienko for (i = 0; i < mbuf_data_size_n; i++) 177026cbb419SViacheslav Ovsiienko mempools[i] = mbuf_pool_create 177126cbb419SViacheslav Ovsiienko (mbuf_data_size[i], 1772401b744dSShahaf Shuler nb_mbuf_per_pool, 177326cbb419SViacheslav Ovsiienko socket_num == UMA_NO_CONFIG ? 177426cbb419SViacheslav Ovsiienko 0 : socket_num, i); 17753ab64341SOlivier Matz } 1776b6ea6408SIntel 1777b6ea6408SIntel init_port_config(); 17785886ae07SAdrien Mazarguil 17796970401eSDavid Marchand #ifdef RTE_LIB_GSO 1780295968d1SFerruh Yigit gso_types = RTE_ETH_TX_OFFLOAD_TCP_TSO | RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO | 1781295968d1SFerruh Yigit RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO | RTE_ETH_TX_OFFLOAD_UDP_TSO; 17826970401eSDavid Marchand #endif 17835886ae07SAdrien Mazarguil /* 17845886ae07SAdrien Mazarguil * Records which Mbuf pool to use by each logical core, if needed. 17855886ae07SAdrien Mazarguil */ 17865886ae07SAdrien Mazarguil for (lc_id = 0; lc_id < nb_lcores; lc_id++) { 17878fd8bebcSAdrien Mazarguil mbp = mbuf_pool_find( 178826cbb419SViacheslav Ovsiienko rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]), 0); 17898fd8bebcSAdrien Mazarguil 17905886ae07SAdrien Mazarguil if (mbp == NULL) 179126cbb419SViacheslav Ovsiienko mbp = mbuf_pool_find(0, 0); 17925886ae07SAdrien Mazarguil fwd_lcores[lc_id]->mbp = mbp; 17936970401eSDavid Marchand #ifdef RTE_LIB_GSO 179452f38a20SJiayu Hu /* initialize GSO context */ 179552f38a20SJiayu Hu fwd_lcores[lc_id]->gso_ctx.direct_pool = mbp; 179652f38a20SJiayu Hu fwd_lcores[lc_id]->gso_ctx.indirect_pool = mbp; 179752f38a20SJiayu Hu fwd_lcores[lc_id]->gso_ctx.gso_types = gso_types; 179835b2d13fSOlivier Matz fwd_lcores[lc_id]->gso_ctx.gso_size = RTE_ETHER_MAX_LEN - 179935b2d13fSOlivier Matz RTE_ETHER_CRC_LEN; 180052f38a20SJiayu Hu fwd_lcores[lc_id]->gso_ctx.flag = 0; 18016970401eSDavid Marchand #endif 18025886ae07SAdrien Mazarguil } 18035886ae07SAdrien Mazarguil 18040c0db76fSBernard Iremonger fwd_config_setup(); 1805b7091f1dSJiayu Hu 18066970401eSDavid Marchand #ifdef RTE_LIB_GRO 1807b7091f1dSJiayu Hu /* create a gro context for each lcore */ 1808b7091f1dSJiayu Hu gro_param.gro_types = RTE_GRO_TCP_IPV4; 1809b7091f1dSJiayu Hu gro_param.max_flow_num = GRO_MAX_FLUSH_CYCLES; 1810b7091f1dSJiayu Hu gro_param.max_item_per_flow = MAX_PKT_BURST; 1811b7091f1dSJiayu Hu for (lc_id = 0; lc_id < nb_lcores; lc_id++) { 1812b7091f1dSJiayu Hu gro_param.socket_id = rte_lcore_to_socket_id( 1813b7091f1dSJiayu Hu fwd_lcores_cpuids[lc_id]); 1814b7091f1dSJiayu Hu fwd_lcores[lc_id]->gro_ctx = rte_gro_ctx_create(&gro_param); 1815b7091f1dSJiayu Hu if (fwd_lcores[lc_id]->gro_ctx == NULL) { 1816b7091f1dSJiayu Hu rte_exit(EXIT_FAILURE, 1817b7091f1dSJiayu Hu "rte_gro_ctx_create() failed\n"); 1818b7091f1dSJiayu Hu } 1819b7091f1dSJiayu Hu } 18206970401eSDavid Marchand #endif 1821ce8d5614SIntel } 1822ce8d5614SIntel 18232950a769SDeclan Doherty 18242950a769SDeclan Doherty void 1825a21d5a4bSDeclan Doherty reconfig(portid_t new_port_id, unsigned socket_id) 18262950a769SDeclan Doherty { 18272950a769SDeclan Doherty /* Reconfiguration of Ethernet ports. */ 1828b6b8a1ebSViacheslav Ovsiienko init_config_port_offloads(new_port_id, socket_id); 18292950a769SDeclan Doherty init_port_config(); 18302950a769SDeclan Doherty } 18312950a769SDeclan Doherty 1832ce8d5614SIntel int 1833ce8d5614SIntel init_fwd_streams(void) 1834ce8d5614SIntel { 1835ce8d5614SIntel portid_t pid; 1836ce8d5614SIntel struct rte_port *port; 1837ce8d5614SIntel streamid_t sm_id, nb_fwd_streams_new; 18385a8fb55cSReshma Pattan queueid_t q; 1839ce8d5614SIntel 1840ce8d5614SIntel /* set socket id according to numa or not */ 18417d89b261SGaetan Rivet RTE_ETH_FOREACH_DEV(pid) { 1842ce8d5614SIntel port = &ports[pid]; 1843ce8d5614SIntel if (nb_rxq > port->dev_info.max_rx_queues) { 184461a3b0e5SAndrew Rybchenko fprintf(stderr, 184561a3b0e5SAndrew Rybchenko "Fail: nb_rxq(%d) is greater than max_rx_queues(%d)\n", 184661a3b0e5SAndrew Rybchenko nb_rxq, port->dev_info.max_rx_queues); 1847ce8d5614SIntel return -1; 1848ce8d5614SIntel } 1849ce8d5614SIntel if (nb_txq > port->dev_info.max_tx_queues) { 185061a3b0e5SAndrew Rybchenko fprintf(stderr, 185161a3b0e5SAndrew Rybchenko "Fail: nb_txq(%d) is greater than max_tx_queues(%d)\n", 185261a3b0e5SAndrew Rybchenko nb_txq, port->dev_info.max_tx_queues); 1853ce8d5614SIntel return -1; 1854ce8d5614SIntel } 185520a0286fSLiu Xiaofeng if (numa_support) { 185620a0286fSLiu Xiaofeng if (port_numa[pid] != NUMA_NO_CONFIG) 185720a0286fSLiu Xiaofeng port->socket_id = port_numa[pid]; 185820a0286fSLiu Xiaofeng else { 1859b6ea6408SIntel port->socket_id = rte_eth_dev_socket_id(pid); 186020a0286fSLiu Xiaofeng 186129841336SPhil Yang /* 186229841336SPhil Yang * if socket_id is invalid, 186329841336SPhil Yang * set to the first available socket. 186429841336SPhil Yang */ 186520a0286fSLiu Xiaofeng if (check_socket_id(port->socket_id) < 0) 186629841336SPhil Yang port->socket_id = socket_ids[0]; 186720a0286fSLiu Xiaofeng } 186820a0286fSLiu Xiaofeng } 1869b6ea6408SIntel else { 1870b6ea6408SIntel if (socket_num == UMA_NO_CONFIG) 1871af75078fSIntel port->socket_id = 0; 1872b6ea6408SIntel else 1873b6ea6408SIntel port->socket_id = socket_num; 1874b6ea6408SIntel } 1875af75078fSIntel } 1876af75078fSIntel 18775a8fb55cSReshma Pattan q = RTE_MAX(nb_rxq, nb_txq); 18785a8fb55cSReshma Pattan if (q == 0) { 187961a3b0e5SAndrew Rybchenko fprintf(stderr, 188061a3b0e5SAndrew Rybchenko "Fail: Cannot allocate fwd streams as number of queues is 0\n"); 18815a8fb55cSReshma Pattan return -1; 18825a8fb55cSReshma Pattan } 18835a8fb55cSReshma Pattan nb_fwd_streams_new = (streamid_t)(nb_ports * q); 1884ce8d5614SIntel if (nb_fwd_streams_new == nb_fwd_streams) 1885ce8d5614SIntel return 0; 1886ce8d5614SIntel /* clear the old */ 1887ce8d5614SIntel if (fwd_streams != NULL) { 1888ce8d5614SIntel for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) { 1889ce8d5614SIntel if (fwd_streams[sm_id] == NULL) 1890ce8d5614SIntel continue; 1891ce8d5614SIntel rte_free(fwd_streams[sm_id]); 1892ce8d5614SIntel fwd_streams[sm_id] = NULL; 1893af75078fSIntel } 1894ce8d5614SIntel rte_free(fwd_streams); 1895ce8d5614SIntel fwd_streams = NULL; 1896ce8d5614SIntel } 1897ce8d5614SIntel 1898ce8d5614SIntel /* init new */ 1899ce8d5614SIntel nb_fwd_streams = nb_fwd_streams_new; 19001f84c469SMatan Azrad if (nb_fwd_streams) { 1901ce8d5614SIntel fwd_streams = rte_zmalloc("testpmd: fwd_streams", 19021f84c469SMatan Azrad sizeof(struct fwd_stream *) * nb_fwd_streams, 19031f84c469SMatan Azrad RTE_CACHE_LINE_SIZE); 1904ce8d5614SIntel if (fwd_streams == NULL) 19051f84c469SMatan Azrad rte_exit(EXIT_FAILURE, "rte_zmalloc(%d" 19061f84c469SMatan Azrad " (struct fwd_stream *)) failed\n", 19071f84c469SMatan Azrad nb_fwd_streams); 1908ce8d5614SIntel 1909af75078fSIntel for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) { 19101f84c469SMatan Azrad fwd_streams[sm_id] = rte_zmalloc("testpmd:" 19111f84c469SMatan Azrad " struct fwd_stream", sizeof(struct fwd_stream), 19121f84c469SMatan Azrad RTE_CACHE_LINE_SIZE); 1913ce8d5614SIntel if (fwd_streams[sm_id] == NULL) 19141f84c469SMatan Azrad rte_exit(EXIT_FAILURE, "rte_zmalloc" 19151f84c469SMatan Azrad "(struct fwd_stream) failed\n"); 19161f84c469SMatan Azrad } 1917af75078fSIntel } 1918ce8d5614SIntel 1919ce8d5614SIntel return 0; 1920af75078fSIntel } 1921af75078fSIntel 1922af75078fSIntel static void 1923af75078fSIntel pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs) 1924af75078fSIntel { 19257569b8c1SHonnappa Nagarahalli uint64_t total_burst, sburst; 192685de481aSHonnappa Nagarahalli uint64_t nb_burst; 19277569b8c1SHonnappa Nagarahalli uint64_t burst_stats[4]; 19287569b8c1SHonnappa Nagarahalli uint16_t pktnb_stats[4]; 1929af75078fSIntel uint16_t nb_pkt; 19307569b8c1SHonnappa Nagarahalli int burst_percent[4], sburstp; 19317569b8c1SHonnappa Nagarahalli int i; 1932af75078fSIntel 1933af75078fSIntel /* 1934af75078fSIntel * First compute the total number of packet bursts and the 1935af75078fSIntel * two highest numbers of bursts of the same number of packets. 1936af75078fSIntel */ 19377569b8c1SHonnappa Nagarahalli memset(&burst_stats, 0x0, sizeof(burst_stats)); 19387569b8c1SHonnappa Nagarahalli memset(&pktnb_stats, 0x0, sizeof(pktnb_stats)); 19397569b8c1SHonnappa Nagarahalli 19407569b8c1SHonnappa Nagarahalli /* Show stats for 0 burst size always */ 19417569b8c1SHonnappa Nagarahalli total_burst = pbs->pkt_burst_spread[0]; 19427569b8c1SHonnappa Nagarahalli burst_stats[0] = pbs->pkt_burst_spread[0]; 19437569b8c1SHonnappa Nagarahalli pktnb_stats[0] = 0; 19447569b8c1SHonnappa Nagarahalli 19457569b8c1SHonnappa Nagarahalli /* Find the next 2 burst sizes with highest occurrences. */ 19466a8b64fdSEli Britstein for (nb_pkt = 1; nb_pkt < MAX_PKT_BURST + 1; nb_pkt++) { 1947af75078fSIntel nb_burst = pbs->pkt_burst_spread[nb_pkt]; 19487569b8c1SHonnappa Nagarahalli 1949af75078fSIntel if (nb_burst == 0) 1950af75078fSIntel continue; 19517569b8c1SHonnappa Nagarahalli 1952af75078fSIntel total_burst += nb_burst; 19537569b8c1SHonnappa Nagarahalli 19547569b8c1SHonnappa Nagarahalli if (nb_burst > burst_stats[1]) { 19557569b8c1SHonnappa Nagarahalli burst_stats[2] = burst_stats[1]; 19567569b8c1SHonnappa Nagarahalli pktnb_stats[2] = pktnb_stats[1]; 1957fe613657SDaniel Shelepov burst_stats[1] = nb_burst; 1958fe613657SDaniel Shelepov pktnb_stats[1] = nb_pkt; 19597569b8c1SHonnappa Nagarahalli } else if (nb_burst > burst_stats[2]) { 19607569b8c1SHonnappa Nagarahalli burst_stats[2] = nb_burst; 19617569b8c1SHonnappa Nagarahalli pktnb_stats[2] = nb_pkt; 1962af75078fSIntel } 1963af75078fSIntel } 1964af75078fSIntel if (total_burst == 0) 1965af75078fSIntel return; 19667569b8c1SHonnappa Nagarahalli 19677569b8c1SHonnappa Nagarahalli printf(" %s-bursts : %"PRIu64" [", rx_tx, total_burst); 19687569b8c1SHonnappa Nagarahalli for (i = 0, sburst = 0, sburstp = 0; i < 4; i++) { 19697569b8c1SHonnappa Nagarahalli if (i == 3) { 19707569b8c1SHonnappa Nagarahalli printf("%d%% of other]\n", 100 - sburstp); 1971af75078fSIntel return; 1972af75078fSIntel } 19737569b8c1SHonnappa Nagarahalli 19747569b8c1SHonnappa Nagarahalli sburst += burst_stats[i]; 19757569b8c1SHonnappa Nagarahalli if (sburst == total_burst) { 19767569b8c1SHonnappa Nagarahalli printf("%d%% of %d pkts]\n", 19777569b8c1SHonnappa Nagarahalli 100 - sburstp, (int) pktnb_stats[i]); 1978af75078fSIntel return; 1979af75078fSIntel } 19807569b8c1SHonnappa Nagarahalli 19817569b8c1SHonnappa Nagarahalli burst_percent[i] = 19827569b8c1SHonnappa Nagarahalli (double)burst_stats[i] / total_burst * 100; 19837569b8c1SHonnappa Nagarahalli printf("%d%% of %d pkts + ", 19847569b8c1SHonnappa Nagarahalli burst_percent[i], (int) pktnb_stats[i]); 19857569b8c1SHonnappa Nagarahalli sburstp += burst_percent[i]; 1986af75078fSIntel } 1987af75078fSIntel } 1988af75078fSIntel 1989af75078fSIntel static void 1990af75078fSIntel fwd_stream_stats_display(streamid_t stream_id) 1991af75078fSIntel { 1992af75078fSIntel struct fwd_stream *fs; 1993af75078fSIntel static const char *fwd_top_stats_border = "-------"; 1994af75078fSIntel 1995af75078fSIntel fs = fwd_streams[stream_id]; 1996af75078fSIntel if ((fs->rx_packets == 0) && (fs->tx_packets == 0) && 1997af75078fSIntel (fs->fwd_dropped == 0)) 1998af75078fSIntel return; 1999af75078fSIntel printf("\n %s Forward Stats for RX Port=%2d/Queue=%2d -> " 2000af75078fSIntel "TX Port=%2d/Queue=%2d %s\n", 2001af75078fSIntel fwd_top_stats_border, fs->rx_port, fs->rx_queue, 2002af75078fSIntel fs->tx_port, fs->tx_queue, fwd_top_stats_border); 2003c185d42cSDavid Marchand printf(" RX-packets: %-14"PRIu64" TX-packets: %-14"PRIu64 2004c185d42cSDavid Marchand " TX-dropped: %-14"PRIu64, 2005af75078fSIntel fs->rx_packets, fs->tx_packets, fs->fwd_dropped); 2006af75078fSIntel 2007af75078fSIntel /* if checksum mode */ 2008af75078fSIntel if (cur_fwd_eng == &csum_fwd_engine) { 2009c185d42cSDavid Marchand printf(" RX- bad IP checksum: %-14"PRIu64 2010c185d42cSDavid Marchand " Rx- bad L4 checksum: %-14"PRIu64 2011c185d42cSDavid Marchand " Rx- bad outer L4 checksum: %-14"PRIu64"\n", 201258d475b7SJerin Jacob fs->rx_bad_ip_csum, fs->rx_bad_l4_csum, 201358d475b7SJerin Jacob fs->rx_bad_outer_l4_csum); 2014d139cf23SLance Richardson printf(" RX- bad outer IP checksum: %-14"PRIu64"\n", 2015d139cf23SLance Richardson fs->rx_bad_outer_ip_csum); 201694d65546SDavid Marchand } else { 201794d65546SDavid Marchand printf("\n"); 2018af75078fSIntel } 2019af75078fSIntel 20200e4b1963SDharmik Thakkar if (record_burst_stats) { 2021af75078fSIntel pkt_burst_stats_display("RX", &fs->rx_burst_stats); 2022af75078fSIntel pkt_burst_stats_display("TX", &fs->tx_burst_stats); 20230e4b1963SDharmik Thakkar } 2024af75078fSIntel } 2025af75078fSIntel 202653324971SDavid Marchand void 202753324971SDavid Marchand fwd_stats_display(void) 202853324971SDavid Marchand { 202953324971SDavid Marchand static const char *fwd_stats_border = "----------------------"; 203053324971SDavid Marchand static const char *acc_stats_border = "+++++++++++++++"; 203153324971SDavid Marchand struct { 203253324971SDavid Marchand struct fwd_stream *rx_stream; 203353324971SDavid Marchand struct fwd_stream *tx_stream; 203453324971SDavid Marchand uint64_t tx_dropped; 203553324971SDavid Marchand uint64_t rx_bad_ip_csum; 203653324971SDavid Marchand uint64_t rx_bad_l4_csum; 203753324971SDavid Marchand uint64_t rx_bad_outer_l4_csum; 2038d139cf23SLance Richardson uint64_t rx_bad_outer_ip_csum; 203953324971SDavid Marchand } ports_stats[RTE_MAX_ETHPORTS]; 204053324971SDavid Marchand uint64_t total_rx_dropped = 0; 204153324971SDavid Marchand uint64_t total_tx_dropped = 0; 204253324971SDavid Marchand uint64_t total_rx_nombuf = 0; 204353324971SDavid Marchand struct rte_eth_stats stats; 204453324971SDavid Marchand uint64_t fwd_cycles = 0; 204553324971SDavid Marchand uint64_t total_recv = 0; 204653324971SDavid Marchand uint64_t total_xmit = 0; 204753324971SDavid Marchand struct rte_port *port; 204853324971SDavid Marchand streamid_t sm_id; 204953324971SDavid Marchand portid_t pt_id; 2050baef6bbfSMin Hu (Connor) int ret; 205153324971SDavid Marchand int i; 205253324971SDavid Marchand 205353324971SDavid Marchand memset(ports_stats, 0, sizeof(ports_stats)); 205453324971SDavid Marchand 205553324971SDavid Marchand for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) { 205653324971SDavid Marchand struct fwd_stream *fs = fwd_streams[sm_id]; 205753324971SDavid Marchand 205853324971SDavid Marchand if (cur_fwd_config.nb_fwd_streams > 205953324971SDavid Marchand cur_fwd_config.nb_fwd_ports) { 206053324971SDavid Marchand fwd_stream_stats_display(sm_id); 206153324971SDavid Marchand } else { 206253324971SDavid Marchand ports_stats[fs->tx_port].tx_stream = fs; 206353324971SDavid Marchand ports_stats[fs->rx_port].rx_stream = fs; 206453324971SDavid Marchand } 206553324971SDavid Marchand 206653324971SDavid Marchand ports_stats[fs->tx_port].tx_dropped += fs->fwd_dropped; 206753324971SDavid Marchand 206853324971SDavid Marchand ports_stats[fs->rx_port].rx_bad_ip_csum += fs->rx_bad_ip_csum; 206953324971SDavid Marchand ports_stats[fs->rx_port].rx_bad_l4_csum += fs->rx_bad_l4_csum; 207053324971SDavid Marchand ports_stats[fs->rx_port].rx_bad_outer_l4_csum += 207153324971SDavid Marchand fs->rx_bad_outer_l4_csum; 2072d139cf23SLance Richardson ports_stats[fs->rx_port].rx_bad_outer_ip_csum += 2073d139cf23SLance Richardson fs->rx_bad_outer_ip_csum; 207453324971SDavid Marchand 2075bc700b67SDharmik Thakkar if (record_core_cycles) 207699a4974aSRobin Jarry fwd_cycles += fs->busy_cycles; 207753324971SDavid Marchand } 207853324971SDavid Marchand for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) { 2079c3fd1e60SFerruh Yigit uint64_t tx_dropped = 0; 2080c3fd1e60SFerruh Yigit 208153324971SDavid Marchand pt_id = fwd_ports_ids[i]; 208253324971SDavid Marchand port = &ports[pt_id]; 208353324971SDavid Marchand 2084baef6bbfSMin Hu (Connor) ret = rte_eth_stats_get(pt_id, &stats); 2085baef6bbfSMin Hu (Connor) if (ret != 0) { 2086baef6bbfSMin Hu (Connor) fprintf(stderr, 2087baef6bbfSMin Hu (Connor) "%s: Error: failed to get stats (port %u): %d", 2088baef6bbfSMin Hu (Connor) __func__, pt_id, ret); 2089baef6bbfSMin Hu (Connor) continue; 2090baef6bbfSMin Hu (Connor) } 209153324971SDavid Marchand stats.ipackets -= port->stats.ipackets; 209253324971SDavid Marchand stats.opackets -= port->stats.opackets; 209353324971SDavid Marchand stats.ibytes -= port->stats.ibytes; 209453324971SDavid Marchand stats.obytes -= port->stats.obytes; 209553324971SDavid Marchand stats.imissed -= port->stats.imissed; 209653324971SDavid Marchand stats.oerrors -= port->stats.oerrors; 209753324971SDavid Marchand stats.rx_nombuf -= port->stats.rx_nombuf; 209853324971SDavid Marchand 209953324971SDavid Marchand total_recv += stats.ipackets; 210053324971SDavid Marchand total_xmit += stats.opackets; 210153324971SDavid Marchand total_rx_dropped += stats.imissed; 2102c3fd1e60SFerruh Yigit tx_dropped += ports_stats[pt_id].tx_dropped; 2103c3fd1e60SFerruh Yigit tx_dropped += stats.oerrors; 2104c3fd1e60SFerruh Yigit total_tx_dropped += tx_dropped; 210553324971SDavid Marchand total_rx_nombuf += stats.rx_nombuf; 210653324971SDavid Marchand 210753324971SDavid Marchand printf("\n %s Forward statistics for port %-2d %s\n", 210853324971SDavid Marchand fwd_stats_border, pt_id, fwd_stats_border); 210953324971SDavid Marchand 211008dcd187SHuisong Li printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64 211108dcd187SHuisong Li "RX-total: %-"PRIu64"\n", stats.ipackets, stats.imissed, 211253324971SDavid Marchand stats.ipackets + stats.imissed); 211353324971SDavid Marchand 2114d139cf23SLance Richardson if (cur_fwd_eng == &csum_fwd_engine) { 211553324971SDavid Marchand printf(" Bad-ipcsum: %-14"PRIu64 211653324971SDavid Marchand " Bad-l4csum: %-14"PRIu64 211753324971SDavid Marchand "Bad-outer-l4csum: %-14"PRIu64"\n", 211853324971SDavid Marchand ports_stats[pt_id].rx_bad_ip_csum, 211953324971SDavid Marchand ports_stats[pt_id].rx_bad_l4_csum, 212053324971SDavid Marchand ports_stats[pt_id].rx_bad_outer_l4_csum); 2121d139cf23SLance Richardson printf(" Bad-outer-ipcsum: %-14"PRIu64"\n", 2122d139cf23SLance Richardson ports_stats[pt_id].rx_bad_outer_ip_csum); 2123d139cf23SLance Richardson } 212453324971SDavid Marchand if (stats.ierrors + stats.rx_nombuf > 0) { 212508dcd187SHuisong Li printf(" RX-error: %-"PRIu64"\n", stats.ierrors); 212608dcd187SHuisong Li printf(" RX-nombufs: %-14"PRIu64"\n", stats.rx_nombuf); 212753324971SDavid Marchand } 212853324971SDavid Marchand 212908dcd187SHuisong Li printf(" TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64 213053324971SDavid Marchand "TX-total: %-"PRIu64"\n", 2131c3fd1e60SFerruh Yigit stats.opackets, tx_dropped, 2132c3fd1e60SFerruh Yigit stats.opackets + tx_dropped); 213353324971SDavid Marchand 21340e4b1963SDharmik Thakkar if (record_burst_stats) { 213553324971SDavid Marchand if (ports_stats[pt_id].rx_stream) 213653324971SDavid Marchand pkt_burst_stats_display("RX", 213753324971SDavid Marchand &ports_stats[pt_id].rx_stream->rx_burst_stats); 213853324971SDavid Marchand if (ports_stats[pt_id].tx_stream) 213953324971SDavid Marchand pkt_burst_stats_display("TX", 214053324971SDavid Marchand &ports_stats[pt_id].tx_stream->tx_burst_stats); 21410e4b1963SDharmik Thakkar } 214253324971SDavid Marchand 214353324971SDavid Marchand printf(" %s--------------------------------%s\n", 214453324971SDavid Marchand fwd_stats_border, fwd_stats_border); 214553324971SDavid Marchand } 214653324971SDavid Marchand 214753324971SDavid Marchand printf("\n %s Accumulated forward statistics for all ports" 214853324971SDavid Marchand "%s\n", 214953324971SDavid Marchand acc_stats_border, acc_stats_border); 215053324971SDavid Marchand printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: " 215153324971SDavid Marchand "%-"PRIu64"\n" 215253324971SDavid Marchand " TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: " 215353324971SDavid Marchand "%-"PRIu64"\n", 215453324971SDavid Marchand total_recv, total_rx_dropped, total_recv + total_rx_dropped, 215553324971SDavid Marchand total_xmit, total_tx_dropped, total_xmit + total_tx_dropped); 215653324971SDavid Marchand if (total_rx_nombuf > 0) 215753324971SDavid Marchand printf(" RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf); 215853324971SDavid Marchand printf(" %s++++++++++++++++++++++++++++++++++++++++++++++" 215953324971SDavid Marchand "%s\n", 216053324971SDavid Marchand acc_stats_border, acc_stats_border); 2161bc700b67SDharmik Thakkar if (record_core_cycles) { 21624c0497b1SDharmik Thakkar #define CYC_PER_MHZ 1E6 21633a164e00SPhil Yang if (total_recv > 0 || total_xmit > 0) { 21643a164e00SPhil Yang uint64_t total_pkts = 0; 21653a164e00SPhil Yang if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 || 21663a164e00SPhil Yang strcmp(cur_fwd_eng->fwd_mode_name, "flowgen") == 0) 21673a164e00SPhil Yang total_pkts = total_xmit; 21683a164e00SPhil Yang else 21693a164e00SPhil Yang total_pkts = total_recv; 21703a164e00SPhil Yang 217199a4974aSRobin Jarry printf("\n CPU cycles/packet=%.2F (busy cycles=" 21723a164e00SPhil Yang "%"PRIu64" / total %s packets=%"PRIu64") at %"PRIu64 21734c0497b1SDharmik Thakkar " MHz Clock\n", 21743a164e00SPhil Yang (double) fwd_cycles / total_pkts, 21753a164e00SPhil Yang fwd_cycles, cur_fwd_eng->fwd_mode_name, total_pkts, 21764c0497b1SDharmik Thakkar (uint64_t)(rte_get_tsc_hz() / CYC_PER_MHZ)); 21773a164e00SPhil Yang } 2178bc700b67SDharmik Thakkar } 217953324971SDavid Marchand } 218053324971SDavid Marchand 218153324971SDavid Marchand void 218253324971SDavid Marchand fwd_stats_reset(void) 218353324971SDavid Marchand { 218453324971SDavid Marchand streamid_t sm_id; 218553324971SDavid Marchand portid_t pt_id; 2186baef6bbfSMin Hu (Connor) int ret; 218753324971SDavid Marchand int i; 218853324971SDavid Marchand 218953324971SDavid Marchand for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) { 219053324971SDavid Marchand pt_id = fwd_ports_ids[i]; 2191baef6bbfSMin Hu (Connor) ret = rte_eth_stats_get(pt_id, &ports[pt_id].stats); 2192baef6bbfSMin Hu (Connor) if (ret != 0) 2193baef6bbfSMin Hu (Connor) fprintf(stderr, 2194baef6bbfSMin Hu (Connor) "%s: Error: failed to clear stats (port %u):%d", 2195baef6bbfSMin Hu (Connor) __func__, pt_id, ret); 219653324971SDavid Marchand } 219753324971SDavid Marchand for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) { 219853324971SDavid Marchand struct fwd_stream *fs = fwd_streams[sm_id]; 219953324971SDavid Marchand 220053324971SDavid Marchand fs->rx_packets = 0; 220153324971SDavid Marchand fs->tx_packets = 0; 220253324971SDavid Marchand fs->fwd_dropped = 0; 220353324971SDavid Marchand fs->rx_bad_ip_csum = 0; 220453324971SDavid Marchand fs->rx_bad_l4_csum = 0; 220553324971SDavid Marchand fs->rx_bad_outer_l4_csum = 0; 2206d139cf23SLance Richardson fs->rx_bad_outer_ip_csum = 0; 220753324971SDavid Marchand 220853324971SDavid Marchand memset(&fs->rx_burst_stats, 0, sizeof(fs->rx_burst_stats)); 220953324971SDavid Marchand memset(&fs->tx_burst_stats, 0, sizeof(fs->tx_burst_stats)); 221099a4974aSRobin Jarry fs->busy_cycles = 0; 221153324971SDavid Marchand } 221253324971SDavid Marchand } 221353324971SDavid Marchand 2214af75078fSIntel static void 22157741e4cfSIntel flush_fwd_rx_queues(void) 2216af75078fSIntel { 2217af75078fSIntel struct rte_mbuf *pkts_burst[MAX_PKT_BURST]; 2218af75078fSIntel portid_t rxp; 22197741e4cfSIntel portid_t port_id; 2220af75078fSIntel queueid_t rxq; 2221af75078fSIntel uint16_t nb_rx; 2222af75078fSIntel uint8_t j; 2223f487715fSReshma Pattan uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0; 2224594302c7SJames Poole uint64_t timer_period; 2225f487715fSReshma Pattan 2226a550baf2SMin Hu (Connor) if (num_procs > 1) { 2227a550baf2SMin Hu (Connor) printf("multi-process not support for flushing fwd Rx queues, skip the below lines and return.\n"); 2228a550baf2SMin Hu (Connor) return; 2229a550baf2SMin Hu (Connor) } 2230a550baf2SMin Hu (Connor) 2231f487715fSReshma Pattan /* convert to number of cycles */ 2232594302c7SJames Poole timer_period = rte_get_timer_hz(); /* 1 second timeout */ 2233af75078fSIntel 2234af75078fSIntel for (j = 0; j < 2; j++) { 22357741e4cfSIntel for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) { 2236af75078fSIntel for (rxq = 0; rxq < nb_rxq; rxq++) { 22377741e4cfSIntel port_id = fwd_ports_ids[rxp]; 22383c4426dbSDmitry Kozlyuk 22393c4426dbSDmitry Kozlyuk /* Polling stopped queues is prohibited. */ 22403c4426dbSDmitry Kozlyuk if (ports[port_id].rxq[rxq].state == 22413c4426dbSDmitry Kozlyuk RTE_ETH_QUEUE_STATE_STOPPED) 22423c4426dbSDmitry Kozlyuk continue; 22433c4426dbSDmitry Kozlyuk 2244f487715fSReshma Pattan /** 2245f487715fSReshma Pattan * testpmd can stuck in the below do while loop 2246f487715fSReshma Pattan * if rte_eth_rx_burst() always returns nonzero 2247f487715fSReshma Pattan * packets. So timer is added to exit this loop 2248f487715fSReshma Pattan * after 1sec timer expiry. 2249f487715fSReshma Pattan */ 2250f487715fSReshma Pattan prev_tsc = rte_rdtsc(); 2251af75078fSIntel do { 22527741e4cfSIntel nb_rx = rte_eth_rx_burst(port_id, rxq, 2253013af9b6SIntel pkts_burst, MAX_PKT_BURST); 2254d00fee5dSDavid Marchand rte_pktmbuf_free_bulk(pkts_burst, nb_rx); 2255f487715fSReshma Pattan 2256f487715fSReshma Pattan cur_tsc = rte_rdtsc(); 2257f487715fSReshma Pattan diff_tsc = cur_tsc - prev_tsc; 2258f487715fSReshma Pattan timer_tsc += diff_tsc; 2259f487715fSReshma Pattan } while ((nb_rx > 0) && 2260f487715fSReshma Pattan (timer_tsc < timer_period)); 2261f487715fSReshma Pattan timer_tsc = 0; 2262af75078fSIntel } 2263af75078fSIntel } 2264af75078fSIntel rte_delay_ms(10); /* wait 10 milli-seconds before retrying */ 2265af75078fSIntel } 2266af75078fSIntel } 2267af75078fSIntel 2268af75078fSIntel static void 2269af75078fSIntel run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd) 2270af75078fSIntel { 2271af75078fSIntel struct fwd_stream **fsm; 227299a4974aSRobin Jarry uint64_t prev_tsc; 2273af75078fSIntel streamid_t nb_fs; 2274af75078fSIntel streamid_t sm_id; 2275a8d0d473SBruce Richardson #ifdef RTE_LIB_BITRATESTATS 22767e4441c8SRemy Horton uint64_t tics_per_1sec; 22777e4441c8SRemy Horton uint64_t tics_datum; 22787e4441c8SRemy Horton uint64_t tics_current; 22794918a357SXiaoyun Li uint16_t i, cnt_ports; 2280af75078fSIntel 22814918a357SXiaoyun Li cnt_ports = nb_ports; 22827e4441c8SRemy Horton tics_datum = rte_rdtsc(); 22837e4441c8SRemy Horton tics_per_1sec = rte_get_timer_hz(); 22847e4441c8SRemy Horton #endif 2285af75078fSIntel fsm = &fwd_streams[fc->stream_idx]; 2286af75078fSIntel nb_fs = fc->stream_nb; 228799a4974aSRobin Jarry prev_tsc = rte_rdtsc(); 2288af75078fSIntel do { 228906c20561SDavid Marchand for (sm_id = 0; sm_id < nb_fs; sm_id++) { 229006c20561SDavid Marchand struct fwd_stream *fs = fsm[sm_id]; 229106c20561SDavid Marchand uint64_t start_fs_tsc = 0; 229206c20561SDavid Marchand bool busy; 229306c20561SDavid Marchand 229406c20561SDavid Marchand if (fs->disabled) 229506c20561SDavid Marchand continue; 229606c20561SDavid Marchand if (record_core_cycles) 229706c20561SDavid Marchand start_fs_tsc = rte_rdtsc(); 229806c20561SDavid Marchand busy = (*pkt_fwd)(fs); 229906c20561SDavid Marchand if (record_core_cycles && busy) 230006c20561SDavid Marchand fs->busy_cycles += rte_rdtsc() - start_fs_tsc; 230106c20561SDavid Marchand } 2302a8d0d473SBruce Richardson #ifdef RTE_LIB_BITRATESTATS 2303e25e6c70SRemy Horton if (bitrate_enabled != 0 && 2304e25e6c70SRemy Horton bitrate_lcore_id == rte_lcore_id()) { 23057e4441c8SRemy Horton tics_current = rte_rdtsc(); 23067e4441c8SRemy Horton if (tics_current - tics_datum >= tics_per_1sec) { 23077e4441c8SRemy Horton /* Periodic bitrate calculation */ 23084918a357SXiaoyun Li for (i = 0; i < cnt_ports; i++) 2309e25e6c70SRemy Horton rte_stats_bitrate_calc(bitrate_data, 23104918a357SXiaoyun Li ports_ids[i]); 23117e4441c8SRemy Horton tics_datum = tics_current; 23127e4441c8SRemy Horton } 2313e25e6c70SRemy Horton } 23147e4441c8SRemy Horton #endif 2315a8d0d473SBruce Richardson #ifdef RTE_LIB_LATENCYSTATS 231665eb1e54SPablo de Lara if (latencystats_enabled != 0 && 231765eb1e54SPablo de Lara latencystats_lcore_id == rte_lcore_id()) 231862d3216dSReshma Pattan rte_latencystats_update(); 231962d3216dSReshma Pattan #endif 232099a4974aSRobin Jarry if (record_core_cycles) { 232199a4974aSRobin Jarry uint64_t tsc = rte_rdtsc(); 232262d3216dSReshma Pattan 232399a4974aSRobin Jarry fc->total_cycles += tsc - prev_tsc; 232499a4974aSRobin Jarry prev_tsc = tsc; 232599a4974aSRobin Jarry } 2326af75078fSIntel } while (! fc->stopped); 2327af75078fSIntel } 2328af75078fSIntel 2329af75078fSIntel static int 233099a4974aSRobin Jarry lcore_usage_callback(unsigned int lcore_id, struct rte_lcore_usage *usage) 233199a4974aSRobin Jarry { 233299a4974aSRobin Jarry struct fwd_stream **fsm; 233399a4974aSRobin Jarry struct fwd_lcore *fc; 233499a4974aSRobin Jarry streamid_t nb_fs; 233599a4974aSRobin Jarry streamid_t sm_id; 233699a4974aSRobin Jarry 233799a4974aSRobin Jarry fc = lcore_to_fwd_lcore(lcore_id); 233899a4974aSRobin Jarry if (fc == NULL) 233999a4974aSRobin Jarry return -1; 234099a4974aSRobin Jarry 234199a4974aSRobin Jarry fsm = &fwd_streams[fc->stream_idx]; 234299a4974aSRobin Jarry nb_fs = fc->stream_nb; 234399a4974aSRobin Jarry usage->busy_cycles = 0; 234499a4974aSRobin Jarry usage->total_cycles = fc->total_cycles; 234599a4974aSRobin Jarry 234699a4974aSRobin Jarry for (sm_id = 0; sm_id < nb_fs; sm_id++) { 234799a4974aSRobin Jarry if (!fsm[sm_id]->disabled) 234899a4974aSRobin Jarry usage->busy_cycles += fsm[sm_id]->busy_cycles; 234999a4974aSRobin Jarry } 235099a4974aSRobin Jarry 235199a4974aSRobin Jarry return 0; 235299a4974aSRobin Jarry } 235399a4974aSRobin Jarry 235499a4974aSRobin Jarry static int 2355af75078fSIntel start_pkt_forward_on_core(void *fwd_arg) 2356af75078fSIntel { 2357af75078fSIntel run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg, 2358af75078fSIntel cur_fwd_config.fwd_eng->packet_fwd); 2359af75078fSIntel return 0; 2360af75078fSIntel } 2361af75078fSIntel 2362af75078fSIntel /* 2363af75078fSIntel * Run the TXONLY packet forwarding engine to send a single burst of packets. 2364af75078fSIntel * Used to start communication flows in network loopback test configurations. 2365af75078fSIntel */ 2366af75078fSIntel static int 2367af75078fSIntel run_one_txonly_burst_on_core(void *fwd_arg) 2368af75078fSIntel { 2369af75078fSIntel struct fwd_lcore *fwd_lc; 2370af75078fSIntel struct fwd_lcore tmp_lcore; 2371af75078fSIntel 2372af75078fSIntel fwd_lc = (struct fwd_lcore *) fwd_arg; 2373af75078fSIntel tmp_lcore = *fwd_lc; 2374af75078fSIntel tmp_lcore.stopped = 1; 2375af75078fSIntel run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd); 2376af75078fSIntel return 0; 2377af75078fSIntel } 2378af75078fSIntel 2379af75078fSIntel /* 2380af75078fSIntel * Launch packet forwarding: 2381af75078fSIntel * - Setup per-port forwarding context. 2382af75078fSIntel * - launch logical cores with their forwarding configuration. 2383af75078fSIntel */ 2384af75078fSIntel static void 2385af75078fSIntel launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore) 2386af75078fSIntel { 2387af75078fSIntel unsigned int i; 2388af75078fSIntel unsigned int lc_id; 2389af75078fSIntel int diag; 2390af75078fSIntel 2391af75078fSIntel for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) { 2392af75078fSIntel lc_id = fwd_lcores_cpuids[i]; 2393af75078fSIntel if ((interactive == 0) || (lc_id != rte_lcore_id())) { 2394af75078fSIntel fwd_lcores[i]->stopped = 0; 2395af75078fSIntel diag = rte_eal_remote_launch(pkt_fwd_on_lcore, 2396af75078fSIntel fwd_lcores[i], lc_id); 2397af75078fSIntel if (diag != 0) 239861a3b0e5SAndrew Rybchenko fprintf(stderr, 239961a3b0e5SAndrew Rybchenko "launch lcore %u failed - diag=%d\n", 2400af75078fSIntel lc_id, diag); 2401af75078fSIntel } 2402af75078fSIntel } 2403af75078fSIntel } 2404af75078fSIntel 2405180ba023SDavid Marchand void 2406180ba023SDavid Marchand common_fwd_stream_init(struct fwd_stream *fs) 2407180ba023SDavid Marchand { 2408180ba023SDavid Marchand bool rx_stopped, tx_stopped; 2409180ba023SDavid Marchand 2410180ba023SDavid Marchand rx_stopped = (ports[fs->rx_port].rxq[fs->rx_queue].state == RTE_ETH_QUEUE_STATE_STOPPED); 2411180ba023SDavid Marchand tx_stopped = (ports[fs->tx_port].txq[fs->tx_queue].state == RTE_ETH_QUEUE_STATE_STOPPED); 2412180ba023SDavid Marchand fs->disabled = rx_stopped || tx_stopped; 2413180ba023SDavid Marchand } 2414180ba023SDavid Marchand 24155028f207SShiyang He static void 24165028f207SShiyang He update_rx_queue_state(uint16_t port_id, uint16_t queue_id) 24175028f207SShiyang He { 24185028f207SShiyang He struct rte_eth_rxq_info rx_qinfo; 24195028f207SShiyang He int32_t rc; 24205028f207SShiyang He 24215028f207SShiyang He rc = rte_eth_rx_queue_info_get(port_id, 24225028f207SShiyang He queue_id, &rx_qinfo); 24235028f207SShiyang He if (rc == 0) { 24245028f207SShiyang He ports[port_id].rxq[queue_id].state = 24255028f207SShiyang He rx_qinfo.queue_state; 24265028f207SShiyang He } else if (rc == -ENOTSUP) { 24275028f207SShiyang He /* 24285028f207SShiyang He * Set the rxq state to RTE_ETH_QUEUE_STATE_STARTED 24295028f207SShiyang He * to ensure that the PMDs do not implement 24305028f207SShiyang He * rte_eth_rx_queue_info_get can forward. 24315028f207SShiyang He */ 24325028f207SShiyang He ports[port_id].rxq[queue_id].state = 24335028f207SShiyang He RTE_ETH_QUEUE_STATE_STARTED; 24345028f207SShiyang He } else { 24355028f207SShiyang He TESTPMD_LOG(WARNING, 24365028f207SShiyang He "Failed to get rx queue info\n"); 24375028f207SShiyang He } 24385028f207SShiyang He } 24395028f207SShiyang He 24405028f207SShiyang He static void 24415028f207SShiyang He update_tx_queue_state(uint16_t port_id, uint16_t queue_id) 24425028f207SShiyang He { 24435028f207SShiyang He struct rte_eth_txq_info tx_qinfo; 24445028f207SShiyang He int32_t rc; 24455028f207SShiyang He 24465028f207SShiyang He rc = rte_eth_tx_queue_info_get(port_id, 24475028f207SShiyang He queue_id, &tx_qinfo); 24485028f207SShiyang He if (rc == 0) { 24495028f207SShiyang He ports[port_id].txq[queue_id].state = 24505028f207SShiyang He tx_qinfo.queue_state; 24515028f207SShiyang He } else if (rc == -ENOTSUP) { 24525028f207SShiyang He /* 24535028f207SShiyang He * Set the txq state to RTE_ETH_QUEUE_STATE_STARTED 24545028f207SShiyang He * to ensure that the PMDs do not implement 24555028f207SShiyang He * rte_eth_tx_queue_info_get can forward. 24565028f207SShiyang He */ 24575028f207SShiyang He ports[port_id].txq[queue_id].state = 24585028f207SShiyang He RTE_ETH_QUEUE_STATE_STARTED; 24595028f207SShiyang He } else { 24605028f207SShiyang He TESTPMD_LOG(WARNING, 24615028f207SShiyang He "Failed to get tx queue info\n"); 24625028f207SShiyang He } 24635028f207SShiyang He } 24645028f207SShiyang He 24655028f207SShiyang He static void 24665028f207SShiyang He update_queue_state(void) 24675028f207SShiyang He { 24685028f207SShiyang He portid_t pi; 24695028f207SShiyang He queueid_t qi; 24705028f207SShiyang He 24715028f207SShiyang He RTE_ETH_FOREACH_DEV(pi) { 24725028f207SShiyang He for (qi = 0; qi < nb_rxq; qi++) 24735028f207SShiyang He update_rx_queue_state(pi, qi); 24745028f207SShiyang He for (qi = 0; qi < nb_txq; qi++) 24755028f207SShiyang He update_tx_queue_state(pi, qi); 24765028f207SShiyang He } 24775028f207SShiyang He } 24785028f207SShiyang He 2479af75078fSIntel /* 2480af75078fSIntel * Launch packet forwarding configuration. 2481af75078fSIntel */ 2482af75078fSIntel void 2483af75078fSIntel start_packet_forwarding(int with_tx_first) 2484af75078fSIntel { 2485af75078fSIntel port_fwd_begin_t port_fwd_begin; 2486af75078fSIntel port_fwd_end_t port_fwd_end; 24873c4426dbSDmitry Kozlyuk stream_init_t stream_init = cur_fwd_eng->stream_init; 2488af75078fSIntel unsigned int i; 2489af75078fSIntel 24905a8fb55cSReshma Pattan if (strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") == 0 && !nb_rxq) 24915a8fb55cSReshma Pattan rte_exit(EXIT_FAILURE, "rxq are 0, cannot use rxonly fwd mode\n"); 24925a8fb55cSReshma Pattan 24935a8fb55cSReshma Pattan if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 && !nb_txq) 24945a8fb55cSReshma Pattan rte_exit(EXIT_FAILURE, "txq are 0, cannot use txonly fwd mode\n"); 24955a8fb55cSReshma Pattan 24965a8fb55cSReshma Pattan if ((strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") != 0 && 24975a8fb55cSReshma Pattan strcmp(cur_fwd_eng->fwd_mode_name, "txonly") != 0) && 24985a8fb55cSReshma Pattan (!nb_rxq || !nb_txq)) 24995a8fb55cSReshma Pattan rte_exit(EXIT_FAILURE, 25005a8fb55cSReshma Pattan "Either rxq or txq are 0, cannot use %s fwd mode\n", 25015a8fb55cSReshma Pattan cur_fwd_eng->fwd_mode_name); 25025a8fb55cSReshma Pattan 2503ce8d5614SIntel if (all_ports_started() == 0) { 250461a3b0e5SAndrew Rybchenko fprintf(stderr, "Not all ports were started\n"); 2505ce8d5614SIntel return; 2506ce8d5614SIntel } 2507af75078fSIntel if (test_done == 0) { 250861a3b0e5SAndrew Rybchenko fprintf(stderr, "Packet forwarding already started\n"); 2509af75078fSIntel return; 2510af75078fSIntel } 25117741e4cfSIntel 251247a767b2SMatan Azrad fwd_config_setup(); 251347a767b2SMatan Azrad 251465744833SXueming Li pkt_fwd_config_display(&cur_fwd_config); 251565744833SXueming Li if (!pkt_fwd_shared_rxq_check()) 251665744833SXueming Li return; 251765744833SXueming Li 25185028f207SShiyang He if (stream_init != NULL) { 2519*d7d802daSFerruh Yigit if (rte_eal_process_type() == RTE_PROC_SECONDARY) 25205028f207SShiyang He update_queue_state(); 25213c4426dbSDmitry Kozlyuk for (i = 0; i < cur_fwd_config.nb_fwd_streams; i++) 25223c4426dbSDmitry Kozlyuk stream_init(fwd_streams[i]); 25235028f207SShiyang He } 25243c4426dbSDmitry Kozlyuk 2525a78040c9SAlvin Zhang port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin; 2526a78040c9SAlvin Zhang if (port_fwd_begin != NULL) { 2527a78040c9SAlvin Zhang for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) { 2528a78040c9SAlvin Zhang if (port_fwd_begin(fwd_ports_ids[i])) { 2529a78040c9SAlvin Zhang fprintf(stderr, 2530a78040c9SAlvin Zhang "Packet forwarding is not ready\n"); 2531a78040c9SAlvin Zhang return; 2532a78040c9SAlvin Zhang } 2533a78040c9SAlvin Zhang } 2534a78040c9SAlvin Zhang } 2535a78040c9SAlvin Zhang 2536a78040c9SAlvin Zhang if (with_tx_first) { 2537a78040c9SAlvin Zhang port_fwd_begin = tx_only_engine.port_fwd_begin; 2538a78040c9SAlvin Zhang if (port_fwd_begin != NULL) { 2539a78040c9SAlvin Zhang for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) { 2540a78040c9SAlvin Zhang if (port_fwd_begin(fwd_ports_ids[i])) { 2541a78040c9SAlvin Zhang fprintf(stderr, 2542a78040c9SAlvin Zhang "Packet forwarding is not ready\n"); 2543a78040c9SAlvin Zhang return; 2544a78040c9SAlvin Zhang } 2545a78040c9SAlvin Zhang } 2546a78040c9SAlvin Zhang } 2547a78040c9SAlvin Zhang } 2548a78040c9SAlvin Zhang 2549a78040c9SAlvin Zhang test_done = 0; 2550a78040c9SAlvin Zhang 25517741e4cfSIntel if(!no_flush_rx) 25527741e4cfSIntel flush_fwd_rx_queues(); 25537741e4cfSIntel 2554af75078fSIntel rxtx_config_display(); 2555af75078fSIntel 255653324971SDavid Marchand fwd_stats_reset(); 2557af75078fSIntel if (with_tx_first) { 2558acbf77a6SZhihong Wang while (with_tx_first--) { 2559acbf77a6SZhihong Wang launch_packet_forwarding( 2560acbf77a6SZhihong Wang run_one_txonly_burst_on_core); 2561af75078fSIntel rte_eal_mp_wait_lcore(); 2562acbf77a6SZhihong Wang } 2563af75078fSIntel port_fwd_end = tx_only_engine.port_fwd_end; 2564af75078fSIntel if (port_fwd_end != NULL) { 2565af75078fSIntel for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) 2566af75078fSIntel (*port_fwd_end)(fwd_ports_ids[i]); 2567af75078fSIntel } 2568af75078fSIntel } 2569af75078fSIntel launch_packet_forwarding(start_pkt_forward_on_core); 2570af75078fSIntel } 2571af75078fSIntel 2572af75078fSIntel void 2573af75078fSIntel stop_packet_forwarding(void) 2574af75078fSIntel { 2575af75078fSIntel port_fwd_end_t port_fwd_end; 2576af75078fSIntel lcoreid_t lc_id; 257753324971SDavid Marchand portid_t pt_id; 257853324971SDavid Marchand int i; 2579af75078fSIntel 2580af75078fSIntel if (test_done) { 258161a3b0e5SAndrew Rybchenko fprintf(stderr, "Packet forwarding not started\n"); 2582af75078fSIntel return; 2583af75078fSIntel } 2584af75078fSIntel printf("Telling cores to stop..."); 2585af75078fSIntel for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) 2586af75078fSIntel fwd_lcores[lc_id]->stopped = 1; 2587af75078fSIntel printf("\nWaiting for lcores to finish...\n"); 2588af75078fSIntel rte_eal_mp_wait_lcore(); 2589af75078fSIntel port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end; 2590af75078fSIntel if (port_fwd_end != NULL) { 2591af75078fSIntel for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) { 2592af75078fSIntel pt_id = fwd_ports_ids[i]; 2593af75078fSIntel (*port_fwd_end)(pt_id); 2594af75078fSIntel } 2595af75078fSIntel } 2596c185d42cSDavid Marchand 259753324971SDavid Marchand fwd_stats_display(); 259858d475b7SJerin Jacob 2599af75078fSIntel printf("\nDone.\n"); 2600af75078fSIntel test_done = 1; 2601af75078fSIntel } 2602af75078fSIntel 2603cfae07fdSOuyang Changchun void 2604cfae07fdSOuyang Changchun dev_set_link_up(portid_t pid) 2605cfae07fdSOuyang Changchun { 2606492ab604SZhiyong Yang if (rte_eth_dev_set_link_up(pid) < 0) 260761a3b0e5SAndrew Rybchenko fprintf(stderr, "\nSet link up fail.\n"); 2608cfae07fdSOuyang Changchun } 2609cfae07fdSOuyang Changchun 2610cfae07fdSOuyang Changchun void 2611cfae07fdSOuyang Changchun dev_set_link_down(portid_t pid) 2612cfae07fdSOuyang Changchun { 2613492ab604SZhiyong Yang if (rte_eth_dev_set_link_down(pid) < 0) 261461a3b0e5SAndrew Rybchenko fprintf(stderr, "\nSet link down fail.\n"); 2615cfae07fdSOuyang Changchun } 2616cfae07fdSOuyang Changchun 2617ce8d5614SIntel static int 2618ce8d5614SIntel all_ports_started(void) 2619ce8d5614SIntel { 2620ce8d5614SIntel portid_t pi; 2621ce8d5614SIntel struct rte_port *port; 2622ce8d5614SIntel 26237d89b261SGaetan Rivet RTE_ETH_FOREACH_DEV(pi) { 2624ce8d5614SIntel port = &ports[pi]; 2625ce8d5614SIntel /* Check if there is a port which is not started */ 262641b05095SBernard Iremonger if ((port->port_status != RTE_PORT_STARTED) && 262741b05095SBernard Iremonger (port->slave_flag == 0)) 2628ce8d5614SIntel return 0; 2629ce8d5614SIntel } 2630ce8d5614SIntel 2631ce8d5614SIntel /* No port is not started */ 2632ce8d5614SIntel return 1; 2633ce8d5614SIntel } 2634ce8d5614SIntel 2635148f963fSBruce Richardson int 26366018eb8cSShahaf Shuler port_is_stopped(portid_t port_id) 26376018eb8cSShahaf Shuler { 26386018eb8cSShahaf Shuler struct rte_port *port = &ports[port_id]; 26396018eb8cSShahaf Shuler 26406018eb8cSShahaf Shuler if ((port->port_status != RTE_PORT_STOPPED) && 26416018eb8cSShahaf Shuler (port->slave_flag == 0)) 26426018eb8cSShahaf Shuler return 0; 26436018eb8cSShahaf Shuler return 1; 26446018eb8cSShahaf Shuler } 26456018eb8cSShahaf Shuler 26466018eb8cSShahaf Shuler int 2647edab33b1STetsuya Mukawa all_ports_stopped(void) 2648edab33b1STetsuya Mukawa { 2649edab33b1STetsuya Mukawa portid_t pi; 2650edab33b1STetsuya Mukawa 26517d89b261SGaetan Rivet RTE_ETH_FOREACH_DEV(pi) { 26526018eb8cSShahaf Shuler if (!port_is_stopped(pi)) 2653edab33b1STetsuya Mukawa return 0; 2654edab33b1STetsuya Mukawa } 2655edab33b1STetsuya Mukawa 2656edab33b1STetsuya Mukawa return 1; 2657edab33b1STetsuya Mukawa } 2658edab33b1STetsuya Mukawa 2659edab33b1STetsuya Mukawa int 2660edab33b1STetsuya Mukawa port_is_started(portid_t port_id) 2661edab33b1STetsuya Mukawa { 2662edab33b1STetsuya Mukawa if (port_id_is_invalid(port_id, ENABLED_WARN)) 2663edab33b1STetsuya Mukawa return 0; 2664edab33b1STetsuya Mukawa 2665edab33b1STetsuya Mukawa if (ports[port_id].port_status != RTE_PORT_STARTED) 2666edab33b1STetsuya Mukawa return 0; 2667edab33b1STetsuya Mukawa 2668edab33b1STetsuya Mukawa return 1; 2669edab33b1STetsuya Mukawa } 2670edab33b1STetsuya Mukawa 267123095155SDariusz Sosnowski #define HAIRPIN_MODE_RX_FORCE_MEMORY RTE_BIT32(8) 267223095155SDariusz Sosnowski #define HAIRPIN_MODE_TX_FORCE_MEMORY RTE_BIT32(9) 267323095155SDariusz Sosnowski 267423095155SDariusz Sosnowski #define HAIRPIN_MODE_RX_LOCKED_MEMORY RTE_BIT32(12) 267523095155SDariusz Sosnowski #define HAIRPIN_MODE_RX_RTE_MEMORY RTE_BIT32(13) 267623095155SDariusz Sosnowski 267723095155SDariusz Sosnowski #define HAIRPIN_MODE_TX_LOCKED_MEMORY RTE_BIT32(16) 267823095155SDariusz Sosnowski #define HAIRPIN_MODE_TX_RTE_MEMORY RTE_BIT32(17) 267923095155SDariusz Sosnowski 268023095155SDariusz Sosnowski 26811c69df45SOri Kam /* Configure the Rx and Tx hairpin queues for the selected port. */ 26821c69df45SOri Kam static int 268301817b10SBing Zhao setup_hairpin_queues(portid_t pi, portid_t p_pi, uint16_t cnt_pi) 26841c69df45SOri Kam { 26851c69df45SOri Kam queueid_t qi; 26861c69df45SOri Kam struct rte_eth_hairpin_conf hairpin_conf = { 26871c69df45SOri Kam .peer_count = 1, 26881c69df45SOri Kam }; 26891c69df45SOri Kam int i; 26901c69df45SOri Kam int diag; 26911c69df45SOri Kam struct rte_port *port = &ports[pi]; 269201817b10SBing Zhao uint16_t peer_rx_port = pi; 269301817b10SBing Zhao uint16_t peer_tx_port = pi; 269401817b10SBing Zhao uint32_t manual = 1; 269501817b10SBing Zhao uint32_t tx_exp = hairpin_mode & 0x10; 269623095155SDariusz Sosnowski uint32_t rx_force_memory = hairpin_mode & HAIRPIN_MODE_RX_FORCE_MEMORY; 269723095155SDariusz Sosnowski uint32_t rx_locked_memory = hairpin_mode & HAIRPIN_MODE_RX_LOCKED_MEMORY; 269823095155SDariusz Sosnowski uint32_t rx_rte_memory = hairpin_mode & HAIRPIN_MODE_RX_RTE_MEMORY; 269923095155SDariusz Sosnowski uint32_t tx_force_memory = hairpin_mode & HAIRPIN_MODE_TX_FORCE_MEMORY; 270023095155SDariusz Sosnowski uint32_t tx_locked_memory = hairpin_mode & HAIRPIN_MODE_TX_LOCKED_MEMORY; 270123095155SDariusz Sosnowski uint32_t tx_rte_memory = hairpin_mode & HAIRPIN_MODE_TX_RTE_MEMORY; 270201817b10SBing Zhao 270301817b10SBing Zhao if (!(hairpin_mode & 0xf)) { 270401817b10SBing Zhao peer_rx_port = pi; 270501817b10SBing Zhao peer_tx_port = pi; 270601817b10SBing Zhao manual = 0; 270701817b10SBing Zhao } else if (hairpin_mode & 0x1) { 270801817b10SBing Zhao peer_tx_port = rte_eth_find_next_owned_by(pi + 1, 270901817b10SBing Zhao RTE_ETH_DEV_NO_OWNER); 271001817b10SBing Zhao if (peer_tx_port >= RTE_MAX_ETHPORTS) 271101817b10SBing Zhao peer_tx_port = rte_eth_find_next_owned_by(0, 271201817b10SBing Zhao RTE_ETH_DEV_NO_OWNER); 271301817b10SBing Zhao if (p_pi != RTE_MAX_ETHPORTS) { 271401817b10SBing Zhao peer_rx_port = p_pi; 271501817b10SBing Zhao } else { 271601817b10SBing Zhao uint16_t next_pi; 271701817b10SBing Zhao 271801817b10SBing Zhao /* Last port will be the peer RX port of the first. */ 271901817b10SBing Zhao RTE_ETH_FOREACH_DEV(next_pi) 272001817b10SBing Zhao peer_rx_port = next_pi; 272101817b10SBing Zhao } 272201817b10SBing Zhao manual = 1; 272301817b10SBing Zhao } else if (hairpin_mode & 0x2) { 272401817b10SBing Zhao if (cnt_pi & 0x1) { 272501817b10SBing Zhao peer_rx_port = p_pi; 272601817b10SBing Zhao } else { 272701817b10SBing Zhao peer_rx_port = rte_eth_find_next_owned_by(pi + 1, 272801817b10SBing Zhao RTE_ETH_DEV_NO_OWNER); 272901817b10SBing Zhao if (peer_rx_port >= RTE_MAX_ETHPORTS) 273001817b10SBing Zhao peer_rx_port = pi; 273101817b10SBing Zhao } 273201817b10SBing Zhao peer_tx_port = peer_rx_port; 273301817b10SBing Zhao manual = 1; 273401817b10SBing Zhao } 27351c69df45SOri Kam 27361c69df45SOri Kam for (qi = nb_txq, i = 0; qi < nb_hairpinq + nb_txq; qi++) { 273701817b10SBing Zhao hairpin_conf.peers[0].port = peer_rx_port; 27381c69df45SOri Kam hairpin_conf.peers[0].queue = i + nb_rxq; 273901817b10SBing Zhao hairpin_conf.manual_bind = !!manual; 274001817b10SBing Zhao hairpin_conf.tx_explicit = !!tx_exp; 274123095155SDariusz Sosnowski hairpin_conf.force_memory = !!tx_force_memory; 274223095155SDariusz Sosnowski hairpin_conf.use_locked_device_memory = !!tx_locked_memory; 274323095155SDariusz Sosnowski hairpin_conf.use_rte_memory = !!tx_rte_memory; 27441c69df45SOri Kam diag = rte_eth_tx_hairpin_queue_setup 27451c69df45SOri Kam (pi, qi, nb_txd, &hairpin_conf); 27461c69df45SOri Kam i++; 27471c69df45SOri Kam if (diag == 0) 27481c69df45SOri Kam continue; 27491c69df45SOri Kam 27501c69df45SOri Kam /* Fail to setup rx queue, return */ 2751eac341d3SJoyce Kong if (port->port_status == RTE_PORT_HANDLING) 2752eac341d3SJoyce Kong port->port_status = RTE_PORT_STOPPED; 2753eac341d3SJoyce Kong else 275461a3b0e5SAndrew Rybchenko fprintf(stderr, 275561a3b0e5SAndrew Rybchenko "Port %d can not be set back to stopped\n", pi); 275661a3b0e5SAndrew Rybchenko fprintf(stderr, "Fail to configure port %d hairpin queues\n", 275761a3b0e5SAndrew Rybchenko pi); 27581c69df45SOri Kam /* try to reconfigure queues next time */ 27591c69df45SOri Kam port->need_reconfig_queues = 1; 27601c69df45SOri Kam return -1; 27611c69df45SOri Kam } 27621c69df45SOri Kam for (qi = nb_rxq, i = 0; qi < nb_hairpinq + nb_rxq; qi++) { 276301817b10SBing Zhao hairpin_conf.peers[0].port = peer_tx_port; 27641c69df45SOri Kam hairpin_conf.peers[0].queue = i + nb_txq; 276501817b10SBing Zhao hairpin_conf.manual_bind = !!manual; 276601817b10SBing Zhao hairpin_conf.tx_explicit = !!tx_exp; 276723095155SDariusz Sosnowski hairpin_conf.force_memory = !!rx_force_memory; 276823095155SDariusz Sosnowski hairpin_conf.use_locked_device_memory = !!rx_locked_memory; 276923095155SDariusz Sosnowski hairpin_conf.use_rte_memory = !!rx_rte_memory; 27701c69df45SOri Kam diag = rte_eth_rx_hairpin_queue_setup 27711c69df45SOri Kam (pi, qi, nb_rxd, &hairpin_conf); 27721c69df45SOri Kam i++; 27731c69df45SOri Kam if (diag == 0) 27741c69df45SOri Kam continue; 27751c69df45SOri Kam 27761c69df45SOri Kam /* Fail to setup rx queue, return */ 2777eac341d3SJoyce Kong if (port->port_status == RTE_PORT_HANDLING) 2778eac341d3SJoyce Kong port->port_status = RTE_PORT_STOPPED; 2779eac341d3SJoyce Kong else 278061a3b0e5SAndrew Rybchenko fprintf(stderr, 278161a3b0e5SAndrew Rybchenko "Port %d can not be set back to stopped\n", pi); 278261a3b0e5SAndrew Rybchenko fprintf(stderr, "Fail to configure port %d hairpin queues\n", 278361a3b0e5SAndrew Rybchenko pi); 27841c69df45SOri Kam /* try to reconfigure queues next time */ 27851c69df45SOri Kam port->need_reconfig_queues = 1; 27861c69df45SOri Kam return -1; 27871c69df45SOri Kam } 27881c69df45SOri Kam return 0; 27891c69df45SOri Kam } 27901c69df45SOri Kam 27912befc67fSViacheslav Ovsiienko /* Configure the Rx with optional split. */ 27922befc67fSViacheslav Ovsiienko int 27932befc67fSViacheslav Ovsiienko rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id, 27942befc67fSViacheslav Ovsiienko uint16_t nb_rx_desc, unsigned int socket_id, 27952befc67fSViacheslav Ovsiienko struct rte_eth_rxconf *rx_conf, struct rte_mempool *mp) 27962befc67fSViacheslav Ovsiienko { 27972befc67fSViacheslav Ovsiienko union rte_eth_rxseg rx_useg[MAX_SEGS_BUFFER_SPLIT] = {}; 27984f04edcdSHanumanth Pothula struct rte_mempool *rx_mempool[MAX_MEMPOOL] = {}; 27994f04edcdSHanumanth Pothula struct rte_mempool *mpx; 28002befc67fSViacheslav Ovsiienko unsigned int i, mp_n; 280154a0f4d7SYuan Wang uint32_t prev_hdrs = 0; 28022befc67fSViacheslav Ovsiienko int ret; 28032befc67fSViacheslav Ovsiienko 28044f04edcdSHanumanth Pothula 2805a4bf5421SHanumanth Pothula if ((rx_pkt_nb_segs > 1) && 2806a4bf5421SHanumanth Pothula (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT)) { 28074f04edcdSHanumanth Pothula /* multi-segment configuration */ 28082befc67fSViacheslav Ovsiienko for (i = 0; i < rx_pkt_nb_segs; i++) { 28092befc67fSViacheslav Ovsiienko struct rte_eth_rxseg_split *rx_seg = &rx_useg[i].split; 28102befc67fSViacheslav Ovsiienko /* 28112befc67fSViacheslav Ovsiienko * Use last valid pool for the segments with number 28122befc67fSViacheslav Ovsiienko * exceeding the pool index. 28132befc67fSViacheslav Ovsiienko */ 28141108c33eSRaja Zidane mp_n = (i >= mbuf_data_size_n) ? mbuf_data_size_n - 1 : i; 28152befc67fSViacheslav Ovsiienko mpx = mbuf_pool_find(socket_id, mp_n); 28162befc67fSViacheslav Ovsiienko /* Handle zero as mbuf data buffer size. */ 28172befc67fSViacheslav Ovsiienko rx_seg->offset = i < rx_pkt_nb_offs ? 28182befc67fSViacheslav Ovsiienko rx_pkt_seg_offsets[i] : 0; 28192befc67fSViacheslav Ovsiienko rx_seg->mp = mpx ? mpx : mp; 282052e2e7edSYuan Wang if (rx_pkt_hdr_protos[i] != 0 && rx_pkt_seg_lengths[i] == 0) { 282154a0f4d7SYuan Wang rx_seg->proto_hdr = rx_pkt_hdr_protos[i] & ~prev_hdrs; 282254a0f4d7SYuan Wang prev_hdrs |= rx_seg->proto_hdr; 282352e2e7edSYuan Wang } else { 282452e2e7edSYuan Wang rx_seg->length = rx_pkt_seg_lengths[i] ? 282552e2e7edSYuan Wang rx_pkt_seg_lengths[i] : 282652e2e7edSYuan Wang mbuf_data_size[mp_n]; 282752e2e7edSYuan Wang } 28282befc67fSViacheslav Ovsiienko } 28292befc67fSViacheslav Ovsiienko rx_conf->rx_nseg = rx_pkt_nb_segs; 28302befc67fSViacheslav Ovsiienko rx_conf->rx_seg = rx_useg; 2831a4bf5421SHanumanth Pothula rx_conf->rx_mempools = NULL; 2832a4bf5421SHanumanth Pothula rx_conf->rx_nmempool = 0; 2833a4bf5421SHanumanth Pothula ret = rte_eth_rx_queue_setup(port_id, rx_queue_id, nb_rx_desc, 2834a4bf5421SHanumanth Pothula socket_id, rx_conf, NULL); 2835a4bf5421SHanumanth Pothula rx_conf->rx_seg = NULL; 2836a4bf5421SHanumanth Pothula rx_conf->rx_nseg = 0; 2837a4bf5421SHanumanth Pothula } else if (multi_rx_mempool == 1) { 28384f04edcdSHanumanth Pothula /* multi-pool configuration */ 2839a4bf5421SHanumanth Pothula struct rte_eth_dev_info dev_info; 2840a4bf5421SHanumanth Pothula 2841a4bf5421SHanumanth Pothula if (mbuf_data_size_n <= 1) { 2842a4bf5421SHanumanth Pothula fprintf(stderr, "Invalid number of mempools %u\n", 2843a4bf5421SHanumanth Pothula mbuf_data_size_n); 2844a4bf5421SHanumanth Pothula return -EINVAL; 2845a4bf5421SHanumanth Pothula } 2846a4bf5421SHanumanth Pothula ret = rte_eth_dev_info_get(port_id, &dev_info); 2847a4bf5421SHanumanth Pothula if (ret != 0) 2848a4bf5421SHanumanth Pothula return ret; 2849a4bf5421SHanumanth Pothula if (dev_info.max_rx_mempools == 0) { 2850a4bf5421SHanumanth Pothula fprintf(stderr, 2851a4bf5421SHanumanth Pothula "Port %u doesn't support requested multi-rx-mempool configuration.\n", 2852a4bf5421SHanumanth Pothula port_id); 2853a4bf5421SHanumanth Pothula return -ENOTSUP; 2854a4bf5421SHanumanth Pothula } 28554f04edcdSHanumanth Pothula for (i = 0; i < mbuf_data_size_n; i++) { 28564f04edcdSHanumanth Pothula mpx = mbuf_pool_find(socket_id, i); 28574f04edcdSHanumanth Pothula rx_mempool[i] = mpx ? mpx : mp; 28584f04edcdSHanumanth Pothula } 28594f04edcdSHanumanth Pothula rx_conf->rx_mempools = rx_mempool; 28604f04edcdSHanumanth Pothula rx_conf->rx_nmempool = mbuf_data_size_n; 2861a4bf5421SHanumanth Pothula rx_conf->rx_seg = NULL; 2862a4bf5421SHanumanth Pothula rx_conf->rx_nseg = 0; 28632befc67fSViacheslav Ovsiienko ret = rte_eth_rx_queue_setup(port_id, rx_queue_id, nb_rx_desc, 28642befc67fSViacheslav Ovsiienko socket_id, rx_conf, NULL); 2865a4bf5421SHanumanth Pothula rx_conf->rx_mempools = NULL; 2866a4bf5421SHanumanth Pothula rx_conf->rx_nmempool = 0; 2867a4bf5421SHanumanth Pothula } else { 2868a4bf5421SHanumanth Pothula /* Single pool/segment configuration */ 28692befc67fSViacheslav Ovsiienko rx_conf->rx_seg = NULL; 28702befc67fSViacheslav Ovsiienko rx_conf->rx_nseg = 0; 28714f04edcdSHanumanth Pothula rx_conf->rx_mempools = NULL; 28724f04edcdSHanumanth Pothula rx_conf->rx_nmempool = 0; 2873a4bf5421SHanumanth Pothula ret = rte_eth_rx_queue_setup(port_id, rx_queue_id, nb_rx_desc, 2874a4bf5421SHanumanth Pothula socket_id, rx_conf, mp); 2875a4bf5421SHanumanth Pothula } 2876a4bf5421SHanumanth Pothula 28773c4426dbSDmitry Kozlyuk ports[port_id].rxq[rx_queue_id].state = rx_conf->rx_deferred_start ? 28783c4426dbSDmitry Kozlyuk RTE_ETH_QUEUE_STATE_STOPPED : 28793c4426dbSDmitry Kozlyuk RTE_ETH_QUEUE_STATE_STARTED; 28802befc67fSViacheslav Ovsiienko return ret; 28812befc67fSViacheslav Ovsiienko } 28822befc67fSViacheslav Ovsiienko 288363b72657SIvan Ilchenko static int 288463b72657SIvan Ilchenko alloc_xstats_display_info(portid_t pi) 288563b72657SIvan Ilchenko { 288663b72657SIvan Ilchenko uint64_t **ids_supp = &ports[pi].xstats_info.ids_supp; 288763b72657SIvan Ilchenko uint64_t **prev_values = &ports[pi].xstats_info.prev_values; 288863b72657SIvan Ilchenko uint64_t **curr_values = &ports[pi].xstats_info.curr_values; 288963b72657SIvan Ilchenko 289063b72657SIvan Ilchenko if (xstats_display_num == 0) 289163b72657SIvan Ilchenko return 0; 289263b72657SIvan Ilchenko 289363b72657SIvan Ilchenko *ids_supp = calloc(xstats_display_num, sizeof(**ids_supp)); 289463b72657SIvan Ilchenko if (*ids_supp == NULL) 289563b72657SIvan Ilchenko goto fail_ids_supp; 289663b72657SIvan Ilchenko 289763b72657SIvan Ilchenko *prev_values = calloc(xstats_display_num, 289863b72657SIvan Ilchenko sizeof(**prev_values)); 289963b72657SIvan Ilchenko if (*prev_values == NULL) 290063b72657SIvan Ilchenko goto fail_prev_values; 290163b72657SIvan Ilchenko 290263b72657SIvan Ilchenko *curr_values = calloc(xstats_display_num, 290363b72657SIvan Ilchenko sizeof(**curr_values)); 290463b72657SIvan Ilchenko if (*curr_values == NULL) 290563b72657SIvan Ilchenko goto fail_curr_values; 290663b72657SIvan Ilchenko 290763b72657SIvan Ilchenko ports[pi].xstats_info.allocated = true; 290863b72657SIvan Ilchenko 290963b72657SIvan Ilchenko return 0; 291063b72657SIvan Ilchenko 291163b72657SIvan Ilchenko fail_curr_values: 291263b72657SIvan Ilchenko free(*prev_values); 291363b72657SIvan Ilchenko fail_prev_values: 291463b72657SIvan Ilchenko free(*ids_supp); 291563b72657SIvan Ilchenko fail_ids_supp: 291663b72657SIvan Ilchenko return -ENOMEM; 291763b72657SIvan Ilchenko } 291863b72657SIvan Ilchenko 291963b72657SIvan Ilchenko static void 292063b72657SIvan Ilchenko free_xstats_display_info(portid_t pi) 292163b72657SIvan Ilchenko { 292263b72657SIvan Ilchenko if (!ports[pi].xstats_info.allocated) 292363b72657SIvan Ilchenko return; 292463b72657SIvan Ilchenko free(ports[pi].xstats_info.ids_supp); 292563b72657SIvan Ilchenko free(ports[pi].xstats_info.prev_values); 292663b72657SIvan Ilchenko free(ports[pi].xstats_info.curr_values); 292763b72657SIvan Ilchenko ports[pi].xstats_info.allocated = false; 292863b72657SIvan Ilchenko } 292963b72657SIvan Ilchenko 293063b72657SIvan Ilchenko /** Fill helper structures for specified port to show extended statistics. */ 293163b72657SIvan Ilchenko static void 293263b72657SIvan Ilchenko fill_xstats_display_info_for_port(portid_t pi) 293363b72657SIvan Ilchenko { 293463b72657SIvan Ilchenko unsigned int stat, stat_supp; 293563b72657SIvan Ilchenko const char *xstat_name; 293663b72657SIvan Ilchenko struct rte_port *port; 293763b72657SIvan Ilchenko uint64_t *ids_supp; 293863b72657SIvan Ilchenko int rc; 293963b72657SIvan Ilchenko 294063b72657SIvan Ilchenko if (xstats_display_num == 0) 294163b72657SIvan Ilchenko return; 294263b72657SIvan Ilchenko 294363b72657SIvan Ilchenko if (pi == (portid_t)RTE_PORT_ALL) { 294463b72657SIvan Ilchenko fill_xstats_display_info(); 294563b72657SIvan Ilchenko return; 294663b72657SIvan Ilchenko } 294763b72657SIvan Ilchenko 294863b72657SIvan Ilchenko port = &ports[pi]; 294963b72657SIvan Ilchenko if (port->port_status != RTE_PORT_STARTED) 295063b72657SIvan Ilchenko return; 295163b72657SIvan Ilchenko 295263b72657SIvan Ilchenko if (!port->xstats_info.allocated && alloc_xstats_display_info(pi) != 0) 295363b72657SIvan Ilchenko rte_exit(EXIT_FAILURE, 295463b72657SIvan Ilchenko "Failed to allocate xstats display memory\n"); 295563b72657SIvan Ilchenko 295663b72657SIvan Ilchenko ids_supp = port->xstats_info.ids_supp; 295763b72657SIvan Ilchenko for (stat = stat_supp = 0; stat < xstats_display_num; stat++) { 295863b72657SIvan Ilchenko xstat_name = xstats_display[stat].name; 295963b72657SIvan Ilchenko rc = rte_eth_xstats_get_id_by_name(pi, xstat_name, 296063b72657SIvan Ilchenko ids_supp + stat_supp); 296163b72657SIvan Ilchenko if (rc != 0) { 296263b72657SIvan Ilchenko fprintf(stderr, "No xstat '%s' on port %u - skip it %u\n", 296363b72657SIvan Ilchenko xstat_name, pi, stat); 296463b72657SIvan Ilchenko continue; 296563b72657SIvan Ilchenko } 296663b72657SIvan Ilchenko stat_supp++; 296763b72657SIvan Ilchenko } 296863b72657SIvan Ilchenko 296963b72657SIvan Ilchenko port->xstats_info.ids_supp_sz = stat_supp; 297063b72657SIvan Ilchenko } 297163b72657SIvan Ilchenko 297263b72657SIvan Ilchenko /** Fill helper structures for all ports to show extended statistics. */ 297363b72657SIvan Ilchenko static void 297463b72657SIvan Ilchenko fill_xstats_display_info(void) 297563b72657SIvan Ilchenko { 297663b72657SIvan Ilchenko portid_t pi; 297763b72657SIvan Ilchenko 297863b72657SIvan Ilchenko if (xstats_display_num == 0) 297963b72657SIvan Ilchenko return; 298063b72657SIvan Ilchenko 298163b72657SIvan Ilchenko RTE_ETH_FOREACH_DEV(pi) 298263b72657SIvan Ilchenko fill_xstats_display_info_for_port(pi); 298363b72657SIvan Ilchenko } 298463b72657SIvan Ilchenko 29857c06f1abSHuisong Li /* 29867c06f1abSHuisong Li * Some capabilities (like, rx_offload_capa and tx_offload_capa) of bonding 29877c06f1abSHuisong Li * device in dev_info is zero when no slave is added. And its capability 29887c06f1abSHuisong Li * will be updated when add a new slave device. So adding a slave device need 29897c06f1abSHuisong Li * to update the port configurations of bonding device. 29907c06f1abSHuisong Li */ 29917c06f1abSHuisong Li static void 29927c06f1abSHuisong Li update_bonding_port_dev_conf(portid_t bond_pid) 29937c06f1abSHuisong Li { 29947c06f1abSHuisong Li #ifdef RTE_NET_BOND 29957c06f1abSHuisong Li struct rte_port *port = &ports[bond_pid]; 29967c06f1abSHuisong Li uint16_t i; 29977c06f1abSHuisong Li int ret; 29987c06f1abSHuisong Li 29997c06f1abSHuisong Li ret = eth_dev_info_get_print_err(bond_pid, &port->dev_info); 30007c06f1abSHuisong Li if (ret != 0) { 30017c06f1abSHuisong Li fprintf(stderr, "Failed to get dev info for port = %u\n", 30027c06f1abSHuisong Li bond_pid); 30037c06f1abSHuisong Li return; 30047c06f1abSHuisong Li } 30057c06f1abSHuisong Li 30067c06f1abSHuisong Li if (port->dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE) 30077c06f1abSHuisong Li port->dev_conf.txmode.offloads |= 30087c06f1abSHuisong Li RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE; 30097c06f1abSHuisong Li /* Apply Tx offloads configuration */ 30107c06f1abSHuisong Li for (i = 0; i < port->dev_info.max_tx_queues; i++) 30117c06f1abSHuisong Li port->txq[i].conf.offloads = port->dev_conf.txmode.offloads; 30127c06f1abSHuisong Li 30137c06f1abSHuisong Li port->dev_conf.rx_adv_conf.rss_conf.rss_hf &= 30147c06f1abSHuisong Li port->dev_info.flow_type_rss_offloads; 30157c06f1abSHuisong Li #else 30167c06f1abSHuisong Li RTE_SET_USED(bond_pid); 30177c06f1abSHuisong Li #endif 30187c06f1abSHuisong Li } 30197c06f1abSHuisong Li 3020edab33b1STetsuya Mukawa int 3021ce8d5614SIntel start_port(portid_t pid) 3022ce8d5614SIntel { 3023cdede073SFerruh Yigit int diag; 3024ce8d5614SIntel portid_t pi; 302501817b10SBing Zhao portid_t p_pi = RTE_MAX_ETHPORTS; 302601817b10SBing Zhao portid_t pl[RTE_MAX_ETHPORTS]; 302701817b10SBing Zhao portid_t peer_pl[RTE_MAX_ETHPORTS]; 302801817b10SBing Zhao uint16_t cnt_pi = 0; 302901817b10SBing Zhao uint16_t cfg_pi = 0; 303001817b10SBing Zhao int peer_pi; 3031ce8d5614SIntel queueid_t qi; 3032ce8d5614SIntel struct rte_port *port; 30331c69df45SOri Kam struct rte_eth_hairpin_cap cap; 3034cdede073SFerruh Yigit bool at_least_one_port_exist = false; 3035cdede073SFerruh Yigit bool all_ports_already_started = true; 3036cdede073SFerruh Yigit bool at_least_one_port_successfully_started = false; 3037ce8d5614SIntel 30384468635fSMichael Qiu if (port_id_is_invalid(pid, ENABLED_WARN)) 30394468635fSMichael Qiu return 0; 30404468635fSMichael Qiu 30417d89b261SGaetan Rivet RTE_ETH_FOREACH_DEV(pi) { 3042edab33b1STetsuya Mukawa if (pid != pi && pid != (portid_t)RTE_PORT_ALL) 3043ce8d5614SIntel continue; 3044ce8d5614SIntel 3045d8c079a5SMin Hu (Connor) if (port_is_bonding_slave(pi)) { 3046d8c079a5SMin Hu (Connor) fprintf(stderr, 3047d8c079a5SMin Hu (Connor) "Please remove port %d from bonded device.\n", 3048d8c079a5SMin Hu (Connor) pi); 3049d8c079a5SMin Hu (Connor) continue; 3050d8c079a5SMin Hu (Connor) } 3051d8c079a5SMin Hu (Connor) 3052cdede073SFerruh Yigit at_least_one_port_exist = true; 3053cdede073SFerruh Yigit 3054ce8d5614SIntel port = &ports[pi]; 3055cdede073SFerruh Yigit if (port->port_status == RTE_PORT_STOPPED) { 3056eac341d3SJoyce Kong port->port_status = RTE_PORT_HANDLING; 3057cdede073SFerruh Yigit all_ports_already_started = false; 3058cdede073SFerruh Yigit } else { 305961a3b0e5SAndrew Rybchenko fprintf(stderr, "Port %d is now not stopped\n", pi); 3060ce8d5614SIntel continue; 3061ce8d5614SIntel } 3062ce8d5614SIntel 3063ce8d5614SIntel if (port->need_reconfig > 0) { 3064655eae01SJie Wang struct rte_eth_conf dev_conf; 3065655eae01SJie Wang int k; 3066655eae01SJie Wang 3067ce8d5614SIntel port->need_reconfig = 0; 3068ce8d5614SIntel 30697ee3e944SVasily Philipov if (flow_isolate_all) { 30707ee3e944SVasily Philipov int ret = port_flow_isolate(pi, 1); 30717ee3e944SVasily Philipov if (ret) { 307261a3b0e5SAndrew Rybchenko fprintf(stderr, 307361a3b0e5SAndrew Rybchenko "Failed to apply isolated mode on port %d\n", 307461a3b0e5SAndrew Rybchenko pi); 30757ee3e944SVasily Philipov return -1; 30767ee3e944SVasily Philipov } 30777ee3e944SVasily Philipov } 3078b5b38ed8SRaslan Darawsheh configure_rxtx_dump_callbacks(0); 30795706de65SJulien Cretin printf("Configuring Port %d (socket %u)\n", pi, 308020a0286fSLiu Xiaofeng port->socket_id); 30811c69df45SOri Kam if (nb_hairpinq > 0 && 30821c69df45SOri Kam rte_eth_dev_hairpin_capability_get(pi, &cap)) { 308361a3b0e5SAndrew Rybchenko fprintf(stderr, 308461a3b0e5SAndrew Rybchenko "Port %d doesn't support hairpin queues\n", 308561a3b0e5SAndrew Rybchenko pi); 30861c69df45SOri Kam return -1; 30871c69df45SOri Kam } 30881bb4a528SFerruh Yigit 30897c06f1abSHuisong Li if (port->bond_flag == 1 && port->update_conf == 1) { 30907c06f1abSHuisong Li update_bonding_port_dev_conf(pi); 30917c06f1abSHuisong Li port->update_conf = 0; 30927c06f1abSHuisong Li } 30937c06f1abSHuisong Li 3094ce8d5614SIntel /* configure port */ 3095a550baf2SMin Hu (Connor) diag = eth_dev_configure_mp(pi, nb_rxq + nb_hairpinq, 30961c69df45SOri Kam nb_txq + nb_hairpinq, 3097ce8d5614SIntel &(port->dev_conf)); 3098ce8d5614SIntel if (diag != 0) { 3099eac341d3SJoyce Kong if (port->port_status == RTE_PORT_HANDLING) 3100eac341d3SJoyce Kong port->port_status = RTE_PORT_STOPPED; 3101eac341d3SJoyce Kong else 310261a3b0e5SAndrew Rybchenko fprintf(stderr, 310361a3b0e5SAndrew Rybchenko "Port %d can not be set back to stopped\n", 310461a3b0e5SAndrew Rybchenko pi); 310561a3b0e5SAndrew Rybchenko fprintf(stderr, "Fail to configure port %d\n", 310661a3b0e5SAndrew Rybchenko pi); 3107ce8d5614SIntel /* try to reconfigure port next time */ 3108ce8d5614SIntel port->need_reconfig = 1; 3109148f963fSBruce Richardson return -1; 3110ce8d5614SIntel } 3111655eae01SJie Wang /* get device configuration*/ 3112655eae01SJie Wang if (0 != 3113655eae01SJie Wang eth_dev_conf_get_print_err(pi, &dev_conf)) { 3114655eae01SJie Wang fprintf(stderr, 3115655eae01SJie Wang "port %d can not get device configuration\n", 3116655eae01SJie Wang pi); 3117655eae01SJie Wang return -1; 3118655eae01SJie Wang } 3119655eae01SJie Wang /* Apply Rx offloads configuration */ 3120655eae01SJie Wang if (dev_conf.rxmode.offloads != 3121655eae01SJie Wang port->dev_conf.rxmode.offloads) { 3122655eae01SJie Wang port->dev_conf.rxmode.offloads |= 3123655eae01SJie Wang dev_conf.rxmode.offloads; 3124655eae01SJie Wang for (k = 0; 3125655eae01SJie Wang k < port->dev_info.max_rx_queues; 3126655eae01SJie Wang k++) 31273c4426dbSDmitry Kozlyuk port->rxq[k].conf.offloads |= 3128655eae01SJie Wang dev_conf.rxmode.offloads; 3129655eae01SJie Wang } 3130655eae01SJie Wang /* Apply Tx offloads configuration */ 3131655eae01SJie Wang if (dev_conf.txmode.offloads != 3132655eae01SJie Wang port->dev_conf.txmode.offloads) { 3133655eae01SJie Wang port->dev_conf.txmode.offloads |= 3134655eae01SJie Wang dev_conf.txmode.offloads; 3135655eae01SJie Wang for (k = 0; 3136655eae01SJie Wang k < port->dev_info.max_tx_queues; 3137655eae01SJie Wang k++) 31383c4426dbSDmitry Kozlyuk port->txq[k].conf.offloads |= 3139655eae01SJie Wang dev_conf.txmode.offloads; 3140655eae01SJie Wang } 3141ce8d5614SIntel } 3142a550baf2SMin Hu (Connor) if (port->need_reconfig_queues > 0 && is_proc_primary()) { 3143ce8d5614SIntel port->need_reconfig_queues = 0; 3144ce8d5614SIntel /* setup tx queues */ 3145ce8d5614SIntel for (qi = 0; qi < nb_txq; qi++) { 31463c4426dbSDmitry Kozlyuk struct rte_eth_txconf *conf = 31473c4426dbSDmitry Kozlyuk &port->txq[qi].conf; 31483c4426dbSDmitry Kozlyuk 3149b6ea6408SIntel if ((numa_support) && 3150b6ea6408SIntel (txring_numa[pi] != NUMA_NO_CONFIG)) 3151b6ea6408SIntel diag = rte_eth_tx_queue_setup(pi, qi, 3152d44f8a48SQi Zhang port->nb_tx_desc[qi], 3153d44f8a48SQi Zhang txring_numa[pi], 31543c4426dbSDmitry Kozlyuk &(port->txq[qi].conf)); 3155b6ea6408SIntel else 3156b6ea6408SIntel diag = rte_eth_tx_queue_setup(pi, qi, 3157d44f8a48SQi Zhang port->nb_tx_desc[qi], 3158d44f8a48SQi Zhang port->socket_id, 31593c4426dbSDmitry Kozlyuk &(port->txq[qi].conf)); 3160b6ea6408SIntel 31613c4426dbSDmitry Kozlyuk if (diag == 0) { 31623c4426dbSDmitry Kozlyuk port->txq[qi].state = 31633c4426dbSDmitry Kozlyuk conf->tx_deferred_start ? 31643c4426dbSDmitry Kozlyuk RTE_ETH_QUEUE_STATE_STOPPED : 31653c4426dbSDmitry Kozlyuk RTE_ETH_QUEUE_STATE_STARTED; 3166ce8d5614SIntel continue; 31673c4426dbSDmitry Kozlyuk } 3168ce8d5614SIntel 3169ce8d5614SIntel /* Fail to setup tx queue, return */ 3170eac341d3SJoyce Kong if (port->port_status == RTE_PORT_HANDLING) 3171eac341d3SJoyce Kong port->port_status = RTE_PORT_STOPPED; 3172eac341d3SJoyce Kong else 317361a3b0e5SAndrew Rybchenko fprintf(stderr, 317461a3b0e5SAndrew Rybchenko "Port %d can not be set back to stopped\n", 317561a3b0e5SAndrew Rybchenko pi); 317661a3b0e5SAndrew Rybchenko fprintf(stderr, 317761a3b0e5SAndrew Rybchenko "Fail to configure port %d tx queues\n", 3178d44f8a48SQi Zhang pi); 3179ce8d5614SIntel /* try to reconfigure queues next time */ 3180ce8d5614SIntel port->need_reconfig_queues = 1; 3181148f963fSBruce Richardson return -1; 3182ce8d5614SIntel } 3183ce8d5614SIntel for (qi = 0; qi < nb_rxq; qi++) { 3184d44f8a48SQi Zhang /* setup rx queues */ 3185b6ea6408SIntel if ((numa_support) && 3186b6ea6408SIntel (rxring_numa[pi] != NUMA_NO_CONFIG)) { 3187b6ea6408SIntel struct rte_mempool * mp = 318826cbb419SViacheslav Ovsiienko mbuf_pool_find 318926cbb419SViacheslav Ovsiienko (rxring_numa[pi], 0); 3190b6ea6408SIntel if (mp == NULL) { 319161a3b0e5SAndrew Rybchenko fprintf(stderr, 319261a3b0e5SAndrew Rybchenko "Failed to setup RX queue: No mempool allocation on the socket %d\n", 3193b6ea6408SIntel rxring_numa[pi]); 3194148f963fSBruce Richardson return -1; 3195b6ea6408SIntel } 3196b6ea6408SIntel 31972befc67fSViacheslav Ovsiienko diag = rx_queue_setup(pi, qi, 3198d4930794SFerruh Yigit port->nb_rx_desc[qi], 3199d44f8a48SQi Zhang rxring_numa[pi], 32003c4426dbSDmitry Kozlyuk &(port->rxq[qi].conf), 3201d44f8a48SQi Zhang mp); 32021e1d6bddSBernard Iremonger } else { 32031e1d6bddSBernard Iremonger struct rte_mempool *mp = 320426cbb419SViacheslav Ovsiienko mbuf_pool_find 320526cbb419SViacheslav Ovsiienko (port->socket_id, 0); 32061e1d6bddSBernard Iremonger if (mp == NULL) { 320761a3b0e5SAndrew Rybchenko fprintf(stderr, 320861a3b0e5SAndrew Rybchenko "Failed to setup RX queue: No mempool allocation on the socket %d\n", 32091e1d6bddSBernard Iremonger port->socket_id); 32101e1d6bddSBernard Iremonger return -1; 3211b6ea6408SIntel } 32122befc67fSViacheslav Ovsiienko diag = rx_queue_setup(pi, qi, 3213d4930794SFerruh Yigit port->nb_rx_desc[qi], 3214d44f8a48SQi Zhang port->socket_id, 32153c4426dbSDmitry Kozlyuk &(port->rxq[qi].conf), 3216d44f8a48SQi Zhang mp); 32171e1d6bddSBernard Iremonger } 3218ce8d5614SIntel if (diag == 0) 3219ce8d5614SIntel continue; 3220ce8d5614SIntel 3221ce8d5614SIntel /* Fail to setup rx queue, return */ 3222eac341d3SJoyce Kong if (port->port_status == RTE_PORT_HANDLING) 3223eac341d3SJoyce Kong port->port_status = RTE_PORT_STOPPED; 3224eac341d3SJoyce Kong else 322561a3b0e5SAndrew Rybchenko fprintf(stderr, 322661a3b0e5SAndrew Rybchenko "Port %d can not be set back to stopped\n", 322761a3b0e5SAndrew Rybchenko pi); 322861a3b0e5SAndrew Rybchenko fprintf(stderr, 322961a3b0e5SAndrew Rybchenko "Fail to configure port %d rx queues\n", 3230d44f8a48SQi Zhang pi); 3231ce8d5614SIntel /* try to reconfigure queues next time */ 3232ce8d5614SIntel port->need_reconfig_queues = 1; 3233148f963fSBruce Richardson return -1; 3234ce8d5614SIntel } 32351c69df45SOri Kam /* setup hairpin queues */ 323601817b10SBing Zhao if (setup_hairpin_queues(pi, p_pi, cnt_pi) != 0) 32371c69df45SOri Kam return -1; 3238ce8d5614SIntel } 3239b5b38ed8SRaslan Darawsheh configure_rxtx_dump_callbacks(verbose_level); 3240b0a9354aSPavan Nikhilesh if (clear_ptypes) { 3241b0a9354aSPavan Nikhilesh diag = rte_eth_dev_set_ptypes(pi, RTE_PTYPE_UNKNOWN, 3242b0a9354aSPavan Nikhilesh NULL, 0); 3243b0a9354aSPavan Nikhilesh if (diag < 0) 324461a3b0e5SAndrew Rybchenko fprintf(stderr, 3245b0a9354aSPavan Nikhilesh "Port %d: Failed to disable Ptype parsing\n", 3246b0a9354aSPavan Nikhilesh pi); 3247b0a9354aSPavan Nikhilesh } 3248b0a9354aSPavan Nikhilesh 324901817b10SBing Zhao p_pi = pi; 325001817b10SBing Zhao cnt_pi++; 325101817b10SBing Zhao 3252ce8d5614SIntel /* start port */ 3253a550baf2SMin Hu (Connor) diag = eth_dev_start_mp(pi); 325452f2c6f2SAndrew Rybchenko if (diag < 0) { 325561a3b0e5SAndrew Rybchenko fprintf(stderr, "Fail to start port %d: %s\n", 325661a3b0e5SAndrew Rybchenko pi, rte_strerror(-diag)); 3257ce8d5614SIntel 3258ce8d5614SIntel /* Fail to setup rx queue, return */ 3259eac341d3SJoyce Kong if (port->port_status == RTE_PORT_HANDLING) 3260eac341d3SJoyce Kong port->port_status = RTE_PORT_STOPPED; 3261eac341d3SJoyce Kong else 326261a3b0e5SAndrew Rybchenko fprintf(stderr, 326361a3b0e5SAndrew Rybchenko "Port %d can not be set back to stopped\n", 326461a3b0e5SAndrew Rybchenko pi); 3265ce8d5614SIntel continue; 3266ce8d5614SIntel } 3267ce8d5614SIntel 3268eac341d3SJoyce Kong if (port->port_status == RTE_PORT_HANDLING) 3269eac341d3SJoyce Kong port->port_status = RTE_PORT_STARTED; 3270eac341d3SJoyce Kong else 327161a3b0e5SAndrew Rybchenko fprintf(stderr, "Port %d can not be set into started\n", 327261a3b0e5SAndrew Rybchenko pi); 3273ce8d5614SIntel 32745ffc4a2aSYuying Zhang if (eth_macaddr_get_print_err(pi, &port->eth_addr) == 0) 3275c2c4f87bSAman Deep Singh printf("Port %d: " RTE_ETHER_ADDR_PRT_FMT "\n", pi, 3276a7db3afcSAman Deep Singh RTE_ETHER_ADDR_BYTES(&port->eth_addr)); 3277d8c89163SZijie Pan 3278cdede073SFerruh Yigit at_least_one_port_successfully_started = true; 327901817b10SBing Zhao 328001817b10SBing Zhao pl[cfg_pi++] = pi; 3281ce8d5614SIntel } 3282ce8d5614SIntel 3283*d7d802daSFerruh Yigit if (rte_eal_process_type() == RTE_PROC_SECONDARY) 32845028f207SShiyang He update_queue_state(); 32855028f207SShiyang He 3286cdede073SFerruh Yigit if (at_least_one_port_successfully_started && !no_link_check) 3287edab33b1STetsuya Mukawa check_all_ports_link_status(RTE_PORT_ALL); 3288cdede073SFerruh Yigit else if (at_least_one_port_exist & all_ports_already_started) 328961a3b0e5SAndrew Rybchenko fprintf(stderr, "Please stop the ports first\n"); 3290ce8d5614SIntel 329101817b10SBing Zhao if (hairpin_mode & 0xf) { 329201817b10SBing Zhao uint16_t i; 329301817b10SBing Zhao int j; 329401817b10SBing Zhao 329501817b10SBing Zhao /* bind all started hairpin ports */ 329601817b10SBing Zhao for (i = 0; i < cfg_pi; i++) { 329701817b10SBing Zhao pi = pl[i]; 329801817b10SBing Zhao /* bind current Tx to all peer Rx */ 329901817b10SBing Zhao peer_pi = rte_eth_hairpin_get_peer_ports(pi, peer_pl, 330001817b10SBing Zhao RTE_MAX_ETHPORTS, 1); 330101817b10SBing Zhao if (peer_pi < 0) 330201817b10SBing Zhao return peer_pi; 330301817b10SBing Zhao for (j = 0; j < peer_pi; j++) { 330401817b10SBing Zhao if (!port_is_started(peer_pl[j])) 330501817b10SBing Zhao continue; 330601817b10SBing Zhao diag = rte_eth_hairpin_bind(pi, peer_pl[j]); 330701817b10SBing Zhao if (diag < 0) { 330861a3b0e5SAndrew Rybchenko fprintf(stderr, 330961a3b0e5SAndrew Rybchenko "Error during binding hairpin Tx port %u to %u: %s\n", 331001817b10SBing Zhao pi, peer_pl[j], 331101817b10SBing Zhao rte_strerror(-diag)); 331201817b10SBing Zhao return -1; 331301817b10SBing Zhao } 331401817b10SBing Zhao } 331501817b10SBing Zhao /* bind all peer Tx to current Rx */ 331601817b10SBing Zhao peer_pi = rte_eth_hairpin_get_peer_ports(pi, peer_pl, 331701817b10SBing Zhao RTE_MAX_ETHPORTS, 0); 331801817b10SBing Zhao if (peer_pi < 0) 331901817b10SBing Zhao return peer_pi; 332001817b10SBing Zhao for (j = 0; j < peer_pi; j++) { 332101817b10SBing Zhao if (!port_is_started(peer_pl[j])) 332201817b10SBing Zhao continue; 332301817b10SBing Zhao diag = rte_eth_hairpin_bind(peer_pl[j], pi); 332401817b10SBing Zhao if (diag < 0) { 332561a3b0e5SAndrew Rybchenko fprintf(stderr, 332661a3b0e5SAndrew Rybchenko "Error during binding hairpin Tx port %u to %u: %s\n", 332701817b10SBing Zhao peer_pl[j], pi, 332801817b10SBing Zhao rte_strerror(-diag)); 332901817b10SBing Zhao return -1; 333001817b10SBing Zhao } 333101817b10SBing Zhao } 333201817b10SBing Zhao } 333301817b10SBing Zhao } 333401817b10SBing Zhao 333563b72657SIvan Ilchenko fill_xstats_display_info_for_port(pid); 333663b72657SIvan Ilchenko 3337ce8d5614SIntel printf("Done\n"); 3338148f963fSBruce Richardson return 0; 3339ce8d5614SIntel } 3340ce8d5614SIntel 3341ce8d5614SIntel void 3342ce8d5614SIntel stop_port(portid_t pid) 3343ce8d5614SIntel { 3344ce8d5614SIntel portid_t pi; 3345ce8d5614SIntel struct rte_port *port; 3346ce8d5614SIntel int need_check_link_status = 0; 334701817b10SBing Zhao portid_t peer_pl[RTE_MAX_ETHPORTS]; 334801817b10SBing Zhao int peer_pi; 334947a4e1fbSDariusz Sosnowski int ret; 3350ce8d5614SIntel 33514468635fSMichael Qiu if (port_id_is_invalid(pid, ENABLED_WARN)) 33524468635fSMichael Qiu return; 33534468635fSMichael Qiu 3354ce8d5614SIntel printf("Stopping ports...\n"); 3355ce8d5614SIntel 33567d89b261SGaetan Rivet RTE_ETH_FOREACH_DEV(pi) { 33574468635fSMichael Qiu if (pid != pi && pid != (portid_t)RTE_PORT_ALL) 3358ce8d5614SIntel continue; 3359ce8d5614SIntel 3360a8ef3e3aSBernard Iremonger if (port_is_forwarding(pi) != 0 && test_done == 0) { 336161a3b0e5SAndrew Rybchenko fprintf(stderr, 336261a3b0e5SAndrew Rybchenko "Please remove port %d from forwarding configuration.\n", 336361a3b0e5SAndrew Rybchenko pi); 3364a8ef3e3aSBernard Iremonger continue; 3365a8ef3e3aSBernard Iremonger } 3366a8ef3e3aSBernard Iremonger 33670e545d30SBernard Iremonger if (port_is_bonding_slave(pi)) { 336861a3b0e5SAndrew Rybchenko fprintf(stderr, 336961a3b0e5SAndrew Rybchenko "Please remove port %d from bonded device.\n", 337061a3b0e5SAndrew Rybchenko pi); 33710e545d30SBernard Iremonger continue; 33720e545d30SBernard Iremonger } 33730e545d30SBernard Iremonger 3374ce8d5614SIntel port = &ports[pi]; 3375eac341d3SJoyce Kong if (port->port_status == RTE_PORT_STARTED) 3376eac341d3SJoyce Kong port->port_status = RTE_PORT_HANDLING; 3377eac341d3SJoyce Kong else 3378ce8d5614SIntel continue; 3379ce8d5614SIntel 338001817b10SBing Zhao if (hairpin_mode & 0xf) { 338101817b10SBing Zhao int j; 338201817b10SBing Zhao 338301817b10SBing Zhao rte_eth_hairpin_unbind(pi, RTE_MAX_ETHPORTS); 338401817b10SBing Zhao /* unbind all peer Tx from current Rx */ 338501817b10SBing Zhao peer_pi = rte_eth_hairpin_get_peer_ports(pi, peer_pl, 338601817b10SBing Zhao RTE_MAX_ETHPORTS, 0); 338701817b10SBing Zhao if (peer_pi < 0) 338801817b10SBing Zhao continue; 338901817b10SBing Zhao for (j = 0; j < peer_pi; j++) { 339001817b10SBing Zhao if (!port_is_started(peer_pl[j])) 339101817b10SBing Zhao continue; 339201817b10SBing Zhao rte_eth_hairpin_unbind(peer_pl[j], pi); 339301817b10SBing Zhao } 339401817b10SBing Zhao } 339501817b10SBing Zhao 3396543df472SChengwen Feng if (port->flow_list && !no_flow_flush) 33970f93edbfSGregory Etelson port_flow_flush(pi); 33980f93edbfSGregory Etelson 339947a4e1fbSDariusz Sosnowski ret = eth_dev_stop_mp(pi); 340047a4e1fbSDariusz Sosnowski if (ret != 0) { 3401e62c5a12SIvan Ilchenko RTE_LOG(ERR, EAL, "rte_eth_dev_stop failed for port %u\n", 3402e62c5a12SIvan Ilchenko pi); 340347a4e1fbSDariusz Sosnowski /* Allow to retry stopping the port. */ 340447a4e1fbSDariusz Sosnowski port->port_status = RTE_PORT_STARTED; 340547a4e1fbSDariusz Sosnowski continue; 340647a4e1fbSDariusz Sosnowski } 3407ce8d5614SIntel 3408eac341d3SJoyce Kong if (port->port_status == RTE_PORT_HANDLING) 3409eac341d3SJoyce Kong port->port_status = RTE_PORT_STOPPED; 3410eac341d3SJoyce Kong else 341161a3b0e5SAndrew Rybchenko fprintf(stderr, "Port %d can not be set into stopped\n", 341261a3b0e5SAndrew Rybchenko pi); 3413ce8d5614SIntel need_check_link_status = 1; 3414ce8d5614SIntel } 3415bc202406SDavid Marchand if (need_check_link_status && !no_link_check) 3416edab33b1STetsuya Mukawa check_all_ports_link_status(RTE_PORT_ALL); 3417ce8d5614SIntel 3418ce8d5614SIntel printf("Done\n"); 3419ce8d5614SIntel } 3420ce8d5614SIntel 3421ce6959bfSWisam Jaddo static void 34224f1de450SThomas Monjalon remove_invalid_ports_in(portid_t *array, portid_t *total) 3423ce6959bfSWisam Jaddo { 34244f1de450SThomas Monjalon portid_t i; 34254f1de450SThomas Monjalon portid_t new_total = 0; 3426ce6959bfSWisam Jaddo 34274f1de450SThomas Monjalon for (i = 0; i < *total; i++) 34284f1de450SThomas Monjalon if (!port_id_is_invalid(array[i], DISABLED_WARN)) { 34294f1de450SThomas Monjalon array[new_total] = array[i]; 34304f1de450SThomas Monjalon new_total++; 3431ce6959bfSWisam Jaddo } 34324f1de450SThomas Monjalon *total = new_total; 34334f1de450SThomas Monjalon } 34344f1de450SThomas Monjalon 34354f1de450SThomas Monjalon static void 34364f1de450SThomas Monjalon remove_invalid_ports(void) 34374f1de450SThomas Monjalon { 34384f1de450SThomas Monjalon remove_invalid_ports_in(ports_ids, &nb_ports); 34394f1de450SThomas Monjalon remove_invalid_ports_in(fwd_ports_ids, &nb_fwd_ports); 34404f1de450SThomas Monjalon nb_cfg_ports = nb_fwd_ports; 3441ce6959bfSWisam Jaddo } 3442ce6959bfSWisam Jaddo 34433889a322SHuisong Li static void 34444b27989dSDmitry Kozlyuk flush_port_owned_resources(portid_t pi) 34454b27989dSDmitry Kozlyuk { 34464b27989dSDmitry Kozlyuk mcast_addr_pool_destroy(pi); 34474b27989dSDmitry Kozlyuk port_flow_flush(pi); 34486d736e05SSuanming Mou port_flow_template_table_flush(pi); 34496d736e05SSuanming Mou port_flow_pattern_template_flush(pi); 34506d736e05SSuanming Mou port_flow_actions_template_flush(pi); 3451653c0812SRongwei Liu port_flex_item_flush(pi); 34524b27989dSDmitry Kozlyuk port_action_handle_flush(pi); 34534b27989dSDmitry Kozlyuk } 34544b27989dSDmitry Kozlyuk 34554b27989dSDmitry Kozlyuk static void 34563889a322SHuisong Li clear_bonding_slave_device(portid_t *slave_pids, uint16_t num_slaves) 34573889a322SHuisong Li { 34583889a322SHuisong Li struct rte_port *port; 34593889a322SHuisong Li portid_t slave_pid; 34603889a322SHuisong Li uint16_t i; 34613889a322SHuisong Li 34623889a322SHuisong Li for (i = 0; i < num_slaves; i++) { 34633889a322SHuisong Li slave_pid = slave_pids[i]; 34643889a322SHuisong Li if (port_is_started(slave_pid) == 1) { 34653889a322SHuisong Li if (rte_eth_dev_stop(slave_pid) != 0) 34663889a322SHuisong Li fprintf(stderr, "rte_eth_dev_stop failed for port %u\n", 34673889a322SHuisong Li slave_pid); 34683889a322SHuisong Li 34693889a322SHuisong Li port = &ports[slave_pid]; 34703889a322SHuisong Li port->port_status = RTE_PORT_STOPPED; 34713889a322SHuisong Li } 34723889a322SHuisong Li 34733889a322SHuisong Li clear_port_slave_flag(slave_pid); 34743889a322SHuisong Li 34753889a322SHuisong Li /* Close slave device when testpmd quit or is killed. */ 34763889a322SHuisong Li if (cl_quit == 1 || f_quit == 1) 34773889a322SHuisong Li rte_eth_dev_close(slave_pid); 34783889a322SHuisong Li } 34793889a322SHuisong Li } 34803889a322SHuisong Li 3481ce8d5614SIntel void 3482ce8d5614SIntel close_port(portid_t pid) 3483ce8d5614SIntel { 3484ce8d5614SIntel portid_t pi; 3485ce8d5614SIntel struct rte_port *port; 34863889a322SHuisong Li portid_t slave_pids[RTE_MAX_ETHPORTS]; 34873889a322SHuisong Li int num_slaves = 0; 3488ce8d5614SIntel 34894468635fSMichael Qiu if (port_id_is_invalid(pid, ENABLED_WARN)) 34904468635fSMichael Qiu return; 34914468635fSMichael Qiu 3492ce8d5614SIntel printf("Closing ports...\n"); 3493ce8d5614SIntel 34947d89b261SGaetan Rivet RTE_ETH_FOREACH_DEV(pi) { 34954468635fSMichael Qiu if (pid != pi && pid != (portid_t)RTE_PORT_ALL) 3496ce8d5614SIntel continue; 3497ce8d5614SIntel 3498a8ef3e3aSBernard Iremonger if (port_is_forwarding(pi) != 0 && test_done == 0) { 349961a3b0e5SAndrew Rybchenko fprintf(stderr, 350061a3b0e5SAndrew Rybchenko "Please remove port %d from forwarding configuration.\n", 350161a3b0e5SAndrew Rybchenko pi); 3502a8ef3e3aSBernard Iremonger continue; 3503a8ef3e3aSBernard Iremonger } 3504a8ef3e3aSBernard Iremonger 35050e545d30SBernard Iremonger if (port_is_bonding_slave(pi)) { 350661a3b0e5SAndrew Rybchenko fprintf(stderr, 350761a3b0e5SAndrew Rybchenko "Please remove port %d from bonded device.\n", 350861a3b0e5SAndrew Rybchenko pi); 35090e545d30SBernard Iremonger continue; 35100e545d30SBernard Iremonger } 35110e545d30SBernard Iremonger 3512ce8d5614SIntel port = &ports[pi]; 3513eac341d3SJoyce Kong if (port->port_status == RTE_PORT_CLOSED) { 351461a3b0e5SAndrew Rybchenko fprintf(stderr, "Port %d is already closed\n", pi); 3515d4e8ad64SMichael Qiu continue; 3516d4e8ad64SMichael Qiu } 3517d4e8ad64SMichael Qiu 3518a550baf2SMin Hu (Connor) if (is_proc_primary()) { 35194b27989dSDmitry Kozlyuk flush_port_owned_resources(pi); 35203889a322SHuisong Li #ifdef RTE_NET_BOND 35213889a322SHuisong Li if (port->bond_flag == 1) 35223889a322SHuisong Li num_slaves = rte_eth_bond_slaves_get(pi, 35233889a322SHuisong Li slave_pids, RTE_MAX_ETHPORTS); 35243889a322SHuisong Li #endif 3525ce8d5614SIntel rte_eth_dev_close(pi); 35263889a322SHuisong Li /* 35273889a322SHuisong Li * If this port is bonded device, all slaves under the 35283889a322SHuisong Li * device need to be removed or closed. 35293889a322SHuisong Li */ 35303889a322SHuisong Li if (port->bond_flag == 1 && num_slaves > 0) 35313889a322SHuisong Li clear_bonding_slave_device(slave_pids, 35323889a322SHuisong Li num_slaves); 3533ce8d5614SIntel } 353463b72657SIvan Ilchenko 353563b72657SIvan Ilchenko free_xstats_display_info(pi); 3536a550baf2SMin Hu (Connor) } 3537ce8d5614SIntel 353885c6571cSThomas Monjalon remove_invalid_ports(); 3539ce8d5614SIntel printf("Done\n"); 3540ce8d5614SIntel } 3541ce8d5614SIntel 3542edab33b1STetsuya Mukawa void 354397f1e196SWei Dai reset_port(portid_t pid) 354497f1e196SWei Dai { 354597f1e196SWei Dai int diag; 354697f1e196SWei Dai portid_t pi; 354797f1e196SWei Dai struct rte_port *port; 354897f1e196SWei Dai 354997f1e196SWei Dai if (port_id_is_invalid(pid, ENABLED_WARN)) 355097f1e196SWei Dai return; 355197f1e196SWei Dai 35521cde1b9aSShougang Wang if ((pid == (portid_t)RTE_PORT_ALL && !all_ports_stopped()) || 35531cde1b9aSShougang Wang (pid != (portid_t)RTE_PORT_ALL && !port_is_stopped(pid))) { 355461a3b0e5SAndrew Rybchenko fprintf(stderr, 355561a3b0e5SAndrew Rybchenko "Can not reset port(s), please stop port(s) first.\n"); 35561cde1b9aSShougang Wang return; 35571cde1b9aSShougang Wang } 35581cde1b9aSShougang Wang 355997f1e196SWei Dai printf("Resetting ports...\n"); 356097f1e196SWei Dai 356197f1e196SWei Dai RTE_ETH_FOREACH_DEV(pi) { 356297f1e196SWei Dai if (pid != pi && pid != (portid_t)RTE_PORT_ALL) 356397f1e196SWei Dai continue; 356497f1e196SWei Dai 356597f1e196SWei Dai if (port_is_forwarding(pi) != 0 && test_done == 0) { 356661a3b0e5SAndrew Rybchenko fprintf(stderr, 356761a3b0e5SAndrew Rybchenko "Please remove port %d from forwarding configuration.\n", 356861a3b0e5SAndrew Rybchenko pi); 356997f1e196SWei Dai continue; 357097f1e196SWei Dai } 357197f1e196SWei Dai 357297f1e196SWei Dai if (port_is_bonding_slave(pi)) { 357361a3b0e5SAndrew Rybchenko fprintf(stderr, 357461a3b0e5SAndrew Rybchenko "Please remove port %d from bonded device.\n", 357597f1e196SWei Dai pi); 357697f1e196SWei Dai continue; 357797f1e196SWei Dai } 357897f1e196SWei Dai 3579e9351eaaSQiming Yang if (is_proc_primary()) { 358097f1e196SWei Dai diag = rte_eth_dev_reset(pi); 358197f1e196SWei Dai if (diag == 0) { 358297f1e196SWei Dai port = &ports[pi]; 358397f1e196SWei Dai port->need_reconfig = 1; 358497f1e196SWei Dai port->need_reconfig_queues = 1; 358597f1e196SWei Dai } else { 358661a3b0e5SAndrew Rybchenko fprintf(stderr, "Failed to reset port %d. diag=%d\n", 358761a3b0e5SAndrew Rybchenko pi, diag); 358897f1e196SWei Dai } 358997f1e196SWei Dai } 3590e9351eaaSQiming Yang } 359197f1e196SWei Dai 359297f1e196SWei Dai printf("Done\n"); 359397f1e196SWei Dai } 359497f1e196SWei Dai 359597f1e196SWei Dai void 3596edab33b1STetsuya Mukawa attach_port(char *identifier) 3597ce8d5614SIntel { 35984f1ed78eSThomas Monjalon portid_t pi; 3599c9cce428SThomas Monjalon struct rte_dev_iterator iterator; 3600ce8d5614SIntel 3601edab33b1STetsuya Mukawa printf("Attaching a new port...\n"); 3602edab33b1STetsuya Mukawa 3603edab33b1STetsuya Mukawa if (identifier == NULL) { 360461a3b0e5SAndrew Rybchenko fprintf(stderr, "Invalid parameters are specified\n"); 3605edab33b1STetsuya Mukawa return; 3606ce8d5614SIntel } 3607ce8d5614SIntel 360875b66decSIlya Maximets if (rte_dev_probe(identifier) < 0) { 3609c9cce428SThomas Monjalon TESTPMD_LOG(ERR, "Failed to attach port %s\n", identifier); 3610edab33b1STetsuya Mukawa return; 3611c9cce428SThomas Monjalon } 3612c9cce428SThomas Monjalon 36134f1ed78eSThomas Monjalon /* first attach mode: event */ 36144f1ed78eSThomas Monjalon if (setup_on_probe_event) { 36154f1ed78eSThomas Monjalon /* new ports are detected on RTE_ETH_EVENT_NEW event */ 36164f1ed78eSThomas Monjalon for (pi = 0; pi < RTE_MAX_ETHPORTS; pi++) 36174f1ed78eSThomas Monjalon if (ports[pi].port_status == RTE_PORT_HANDLING && 36184f1ed78eSThomas Monjalon ports[pi].need_setup != 0) 36194f1ed78eSThomas Monjalon setup_attached_port(pi); 36204f1ed78eSThomas Monjalon return; 36214f1ed78eSThomas Monjalon } 36224f1ed78eSThomas Monjalon 36234f1ed78eSThomas Monjalon /* second attach mode: iterator */ 362486fa5de1SThomas Monjalon RTE_ETH_FOREACH_MATCHING_DEV(pi, identifier, &iterator) { 36254f1ed78eSThomas Monjalon /* setup ports matching the devargs used for probing */ 362686fa5de1SThomas Monjalon if (port_is_forwarding(pi)) 362786fa5de1SThomas Monjalon continue; /* port was already attached before */ 3628c9cce428SThomas Monjalon setup_attached_port(pi); 3629c9cce428SThomas Monjalon } 363086fa5de1SThomas Monjalon } 3631c9cce428SThomas Monjalon 3632c9cce428SThomas Monjalon static void 3633c9cce428SThomas Monjalon setup_attached_port(portid_t pi) 3634c9cce428SThomas Monjalon { 3635c9cce428SThomas Monjalon unsigned int socket_id; 363634fc1051SIvan Ilchenko int ret; 3637edab33b1STetsuya Mukawa 3638931126baSBernard Iremonger socket_id = (unsigned)rte_eth_dev_socket_id(pi); 363929841336SPhil Yang /* if socket_id is invalid, set to the first available socket. */ 3640931126baSBernard Iremonger if (check_socket_id(socket_id) < 0) 364129841336SPhil Yang socket_id = socket_ids[0]; 3642931126baSBernard Iremonger reconfig(pi, socket_id); 364334fc1051SIvan Ilchenko ret = rte_eth_promiscuous_enable(pi); 364434fc1051SIvan Ilchenko if (ret != 0) 364561a3b0e5SAndrew Rybchenko fprintf(stderr, 364661a3b0e5SAndrew Rybchenko "Error during enabling promiscuous mode for port %u: %s - ignore\n", 364734fc1051SIvan Ilchenko pi, rte_strerror(-ret)); 3648edab33b1STetsuya Mukawa 36494f1de450SThomas Monjalon ports_ids[nb_ports++] = pi; 36504f1de450SThomas Monjalon fwd_ports_ids[nb_fwd_ports++] = pi; 36514f1de450SThomas Monjalon nb_cfg_ports = nb_fwd_ports; 36524f1ed78eSThomas Monjalon ports[pi].need_setup = 0; 3653edab33b1STetsuya Mukawa ports[pi].port_status = RTE_PORT_STOPPED; 3654edab33b1STetsuya Mukawa 3655edab33b1STetsuya Mukawa printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports); 3656edab33b1STetsuya Mukawa printf("Done\n"); 3657edab33b1STetsuya Mukawa } 3658edab33b1STetsuya Mukawa 36590654d4a8SThomas Monjalon static void 36600654d4a8SThomas Monjalon detach_device(struct rte_device *dev) 36615f4ec54fSChen Jing D(Mark) { 3662f8e5baa2SThomas Monjalon portid_t sibling; 3663f8e5baa2SThomas Monjalon 3664f8e5baa2SThomas Monjalon if (dev == NULL) { 366561a3b0e5SAndrew Rybchenko fprintf(stderr, "Device already removed\n"); 3666f8e5baa2SThomas Monjalon return; 3667f8e5baa2SThomas Monjalon } 3668f8e5baa2SThomas Monjalon 36690654d4a8SThomas Monjalon printf("Removing a device...\n"); 3670938a184aSAdrien Mazarguil 36712a449871SThomas Monjalon RTE_ETH_FOREACH_DEV_OF(sibling, dev) { 36722a449871SThomas Monjalon if (ports[sibling].port_status != RTE_PORT_CLOSED) { 36732a449871SThomas Monjalon if (ports[sibling].port_status != RTE_PORT_STOPPED) { 367461a3b0e5SAndrew Rybchenko fprintf(stderr, "Port %u not stopped\n", 367561a3b0e5SAndrew Rybchenko sibling); 36762a449871SThomas Monjalon return; 36772a449871SThomas Monjalon } 36784b27989dSDmitry Kozlyuk flush_port_owned_resources(sibling); 36792a449871SThomas Monjalon } 36802a449871SThomas Monjalon } 36812a449871SThomas Monjalon 368275b66decSIlya Maximets if (rte_dev_remove(dev) < 0) { 3683ec5ecd7eSDavid Marchand TESTPMD_LOG(ERR, "Failed to detach device %s\n", rte_dev_name(dev)); 3684edab33b1STetsuya Mukawa return; 36853070419eSGaetan Rivet } 36864f1de450SThomas Monjalon remove_invalid_ports(); 368703ce2c53SMatan Azrad 36880654d4a8SThomas Monjalon printf("Device is detached\n"); 3689f8e5baa2SThomas Monjalon printf("Now total ports is %d\n", nb_ports); 3690edab33b1STetsuya Mukawa printf("Done\n"); 3691edab33b1STetsuya Mukawa return; 36925f4ec54fSChen Jing D(Mark) } 36935f4ec54fSChen Jing D(Mark) 3694af75078fSIntel void 36950654d4a8SThomas Monjalon detach_port_device(portid_t port_id) 36960654d4a8SThomas Monjalon { 36970a0821bcSPaulis Gributs int ret; 36980a0821bcSPaulis Gributs struct rte_eth_dev_info dev_info; 36990a0821bcSPaulis Gributs 37000654d4a8SThomas Monjalon if (port_id_is_invalid(port_id, ENABLED_WARN)) 37010654d4a8SThomas Monjalon return; 37020654d4a8SThomas Monjalon 37030654d4a8SThomas Monjalon if (ports[port_id].port_status != RTE_PORT_CLOSED) { 37040654d4a8SThomas Monjalon if (ports[port_id].port_status != RTE_PORT_STOPPED) { 370561a3b0e5SAndrew Rybchenko fprintf(stderr, "Port not stopped\n"); 37060654d4a8SThomas Monjalon return; 37070654d4a8SThomas Monjalon } 370861a3b0e5SAndrew Rybchenko fprintf(stderr, "Port was not closed\n"); 37090654d4a8SThomas Monjalon } 37100654d4a8SThomas Monjalon 37110a0821bcSPaulis Gributs ret = eth_dev_info_get_print_err(port_id, &dev_info); 37120a0821bcSPaulis Gributs if (ret != 0) { 37130a0821bcSPaulis Gributs TESTPMD_LOG(ERR, 37140a0821bcSPaulis Gributs "Failed to get device info for port %d, not detaching\n", 37150a0821bcSPaulis Gributs port_id); 37160a0821bcSPaulis Gributs return; 37170a0821bcSPaulis Gributs } 37180a0821bcSPaulis Gributs detach_device(dev_info.device); 37190654d4a8SThomas Monjalon } 37200654d4a8SThomas Monjalon 37210654d4a8SThomas Monjalon void 37225edee5f6SThomas Monjalon detach_devargs(char *identifier) 372355e51c96SNithin Dabilpuram { 372455e51c96SNithin Dabilpuram struct rte_dev_iterator iterator; 372555e51c96SNithin Dabilpuram struct rte_devargs da; 372655e51c96SNithin Dabilpuram portid_t port_id; 372755e51c96SNithin Dabilpuram 372855e51c96SNithin Dabilpuram printf("Removing a device...\n"); 372955e51c96SNithin Dabilpuram 373055e51c96SNithin Dabilpuram memset(&da, 0, sizeof(da)); 373155e51c96SNithin Dabilpuram if (rte_devargs_parsef(&da, "%s", identifier)) { 373261a3b0e5SAndrew Rybchenko fprintf(stderr, "cannot parse identifier\n"); 373355e51c96SNithin Dabilpuram return; 373455e51c96SNithin Dabilpuram } 373555e51c96SNithin Dabilpuram 373655e51c96SNithin Dabilpuram RTE_ETH_FOREACH_MATCHING_DEV(port_id, identifier, &iterator) { 373755e51c96SNithin Dabilpuram if (ports[port_id].port_status != RTE_PORT_CLOSED) { 373855e51c96SNithin Dabilpuram if (ports[port_id].port_status != RTE_PORT_STOPPED) { 373961a3b0e5SAndrew Rybchenko fprintf(stderr, "Port %u not stopped\n", 374061a3b0e5SAndrew Rybchenko port_id); 3741149677c9SStephen Hemminger rte_eth_iterator_cleanup(&iterator); 374264051bb1SXueming Li rte_devargs_reset(&da); 374355e51c96SNithin Dabilpuram return; 374455e51c96SNithin Dabilpuram } 37454b27989dSDmitry Kozlyuk flush_port_owned_resources(port_id); 374655e51c96SNithin Dabilpuram } 374755e51c96SNithin Dabilpuram } 374855e51c96SNithin Dabilpuram 3749148c51a3SDavid Marchand if (rte_eal_hotplug_remove(rte_bus_name(da.bus), da.name) != 0) { 375055e51c96SNithin Dabilpuram TESTPMD_LOG(ERR, "Failed to detach device %s(%s)\n", 3751148c51a3SDavid Marchand da.name, rte_bus_name(da.bus)); 375264051bb1SXueming Li rte_devargs_reset(&da); 375355e51c96SNithin Dabilpuram return; 375455e51c96SNithin Dabilpuram } 375555e51c96SNithin Dabilpuram 375655e51c96SNithin Dabilpuram remove_invalid_ports(); 375755e51c96SNithin Dabilpuram 375855e51c96SNithin Dabilpuram printf("Device %s is detached\n", identifier); 375955e51c96SNithin Dabilpuram printf("Now total ports is %d\n", nb_ports); 376055e51c96SNithin Dabilpuram printf("Done\n"); 376164051bb1SXueming Li rte_devargs_reset(&da); 376255e51c96SNithin Dabilpuram } 376355e51c96SNithin Dabilpuram 376455e51c96SNithin Dabilpuram void 3765af75078fSIntel pmd_test_exit(void) 3766af75078fSIntel { 3767af75078fSIntel portid_t pt_id; 376826cbb419SViacheslav Ovsiienko unsigned int i; 3769fb73e096SJeff Guo int ret; 3770af75078fSIntel 37718210ec25SPablo de Lara if (test_done == 0) 37728210ec25SPablo de Lara stop_packet_forwarding(); 37738210ec25SPablo de Lara 3774761f7ae1SJie Zhou #ifndef RTE_EXEC_ENV_WINDOWS 377526cbb419SViacheslav Ovsiienko for (i = 0 ; i < RTE_DIM(mempools) ; i++) { 37763a0968c8SShahaf Shuler if (mempools[i]) { 37773a0968c8SShahaf Shuler if (mp_alloc_type == MP_ALLOC_ANON) 37783a0968c8SShahaf Shuler rte_mempool_mem_iter(mempools[i], dma_unmap_cb, 37793a0968c8SShahaf Shuler NULL); 37803a0968c8SShahaf Shuler } 37813a0968c8SShahaf Shuler } 3782761f7ae1SJie Zhou #endif 3783d3a274ceSZhihong Wang if (ports != NULL) { 3784d3a274ceSZhihong Wang no_link_check = 1; 37857d89b261SGaetan Rivet RTE_ETH_FOREACH_DEV(pt_id) { 378608fd782bSCristian Dumitrescu printf("\nStopping port %d...\n", pt_id); 3787af75078fSIntel fflush(stdout); 3788d3a274ceSZhihong Wang stop_port(pt_id); 378908fd782bSCristian Dumitrescu } 379008fd782bSCristian Dumitrescu RTE_ETH_FOREACH_DEV(pt_id) { 379108fd782bSCristian Dumitrescu printf("\nShutting down port %d...\n", pt_id); 379208fd782bSCristian Dumitrescu fflush(stdout); 3793d3a274ceSZhihong Wang close_port(pt_id); 3794af75078fSIntel } 3795d3a274ceSZhihong Wang } 3796fb73e096SJeff Guo 3797fb73e096SJeff Guo if (hot_plug) { 3798fb73e096SJeff Guo ret = rte_dev_event_monitor_stop(); 37992049c511SJeff Guo if (ret) { 3800fb73e096SJeff Guo RTE_LOG(ERR, EAL, 3801fb73e096SJeff Guo "fail to stop device event monitor."); 38022049c511SJeff Guo return; 38032049c511SJeff Guo } 3804fb73e096SJeff Guo 38052049c511SJeff Guo ret = rte_dev_event_callback_unregister(NULL, 3806cc1bf307SJeff Guo dev_event_callback, NULL); 38072049c511SJeff Guo if (ret < 0) { 3808fb73e096SJeff Guo RTE_LOG(ERR, EAL, 38092049c511SJeff Guo "fail to unregister device event callback.\n"); 38102049c511SJeff Guo return; 38112049c511SJeff Guo } 38122049c511SJeff Guo 38132049c511SJeff Guo ret = rte_dev_hotplug_handle_disable(); 38142049c511SJeff Guo if (ret) { 38152049c511SJeff Guo RTE_LOG(ERR, EAL, 38162049c511SJeff Guo "fail to disable hotplug handling.\n"); 38172049c511SJeff Guo return; 38182049c511SJeff Guo } 3819fb73e096SJeff Guo } 382026cbb419SViacheslav Ovsiienko for (i = 0 ; i < RTE_DIM(mempools) ; i++) { 3821401b744dSShahaf Shuler if (mempools[i]) 3822a550baf2SMin Hu (Connor) mempool_free_mp(mempools[i]); 3823401b744dSShahaf Shuler } 382463b72657SIvan Ilchenko free(xstats_display); 3825fb73e096SJeff Guo 3826d3a274ceSZhihong Wang printf("\nBye...\n"); 3827af75078fSIntel } 3828af75078fSIntel 3829af75078fSIntel typedef void (*cmd_func_t)(void); 3830af75078fSIntel struct pmd_test_command { 3831af75078fSIntel const char *cmd_name; 3832af75078fSIntel cmd_func_t cmd_func; 3833af75078fSIntel }; 3834af75078fSIntel 3835ce8d5614SIntel /* Check the link status of all ports in up to 9s, and print them finally */ 3836af75078fSIntel static void 3837edab33b1STetsuya Mukawa check_all_ports_link_status(uint32_t port_mask) 3838af75078fSIntel { 3839ce8d5614SIntel #define CHECK_INTERVAL 100 /* 100ms */ 3840ce8d5614SIntel #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */ 3841f8244c63SZhiyong Yang portid_t portid; 3842f8244c63SZhiyong Yang uint8_t count, all_ports_up, print_flag = 0; 3843ce8d5614SIntel struct rte_eth_link link; 3844e661a08bSIgor Romanov int ret; 3845ba5509a6SIvan Dyukov char link_status[RTE_ETH_LINK_MAX_STR_LEN]; 3846ce8d5614SIntel 3847ce8d5614SIntel printf("Checking link statuses...\n"); 3848ce8d5614SIntel fflush(stdout); 3849ce8d5614SIntel for (count = 0; count <= MAX_CHECK_TIME; count++) { 3850ce8d5614SIntel all_ports_up = 1; 38517d89b261SGaetan Rivet RTE_ETH_FOREACH_DEV(portid) { 3852ce8d5614SIntel if ((port_mask & (1 << portid)) == 0) 3853ce8d5614SIntel continue; 3854ce8d5614SIntel memset(&link, 0, sizeof(link)); 3855e661a08bSIgor Romanov ret = rte_eth_link_get_nowait(portid, &link); 3856e661a08bSIgor Romanov if (ret < 0) { 3857e661a08bSIgor Romanov all_ports_up = 0; 3858e661a08bSIgor Romanov if (print_flag == 1) 385961a3b0e5SAndrew Rybchenko fprintf(stderr, 386061a3b0e5SAndrew Rybchenko "Port %u link get failed: %s\n", 3861e661a08bSIgor Romanov portid, rte_strerror(-ret)); 3862e661a08bSIgor Romanov continue; 3863e661a08bSIgor Romanov } 3864ce8d5614SIntel /* print link status if flag set */ 3865ce8d5614SIntel if (print_flag == 1) { 3866ba5509a6SIvan Dyukov rte_eth_link_to_str(link_status, 3867ba5509a6SIvan Dyukov sizeof(link_status), &link); 3868ba5509a6SIvan Dyukov printf("Port %d %s\n", portid, link_status); 3869ce8d5614SIntel continue; 3870ce8d5614SIntel } 3871ce8d5614SIntel /* clear all_ports_up flag if any link down */ 3872295968d1SFerruh Yigit if (link.link_status == RTE_ETH_LINK_DOWN) { 3873ce8d5614SIntel all_ports_up = 0; 3874ce8d5614SIntel break; 3875ce8d5614SIntel } 3876ce8d5614SIntel } 3877ce8d5614SIntel /* after finally printing all link status, get out */ 3878ce8d5614SIntel if (print_flag == 1) 3879ce8d5614SIntel break; 3880ce8d5614SIntel 3881ce8d5614SIntel if (all_ports_up == 0) { 3882ce8d5614SIntel fflush(stdout); 3883ce8d5614SIntel rte_delay_ms(CHECK_INTERVAL); 3884ce8d5614SIntel } 3885ce8d5614SIntel 3886ce8d5614SIntel /* set the print_flag if all ports up or timeout */ 3887ce8d5614SIntel if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) { 3888ce8d5614SIntel print_flag = 1; 3889ce8d5614SIntel } 38908ea656f8SGaetan Rivet 38918ea656f8SGaetan Rivet if (lsc_interrupt) 38928ea656f8SGaetan Rivet break; 3893ce8d5614SIntel } 3894af75078fSIntel } 3895af75078fSIntel 3896284c908cSGaetan Rivet static void 3897cc1bf307SJeff Guo rmv_port_callback(void *arg) 3898284c908cSGaetan Rivet { 38993b97888aSMatan Azrad int need_to_start = 0; 39000da2a62bSMatan Azrad int org_no_link_check = no_link_check; 390128caa76aSZhiyong Yang portid_t port_id = (intptr_t)arg; 39020a0821bcSPaulis Gributs struct rte_eth_dev_info dev_info; 39030a0821bcSPaulis Gributs int ret; 3904284c908cSGaetan Rivet 3905284c908cSGaetan Rivet RTE_ETH_VALID_PORTID_OR_RET(port_id); 3906284c908cSGaetan Rivet 39073b97888aSMatan Azrad if (!test_done && port_is_forwarding(port_id)) { 39083b97888aSMatan Azrad need_to_start = 1; 39093b97888aSMatan Azrad stop_packet_forwarding(); 39103b97888aSMatan Azrad } 39110da2a62bSMatan Azrad no_link_check = 1; 3912284c908cSGaetan Rivet stop_port(port_id); 39130da2a62bSMatan Azrad no_link_check = org_no_link_check; 39140654d4a8SThomas Monjalon 39150a0821bcSPaulis Gributs ret = eth_dev_info_get_print_err(port_id, &dev_info); 39160a0821bcSPaulis Gributs if (ret != 0) 39170a0821bcSPaulis Gributs TESTPMD_LOG(ERR, 39180a0821bcSPaulis Gributs "Failed to get device info for port %d, not detaching\n", 39190a0821bcSPaulis Gributs port_id); 3920e1d38504SPaulis Gributs else { 3921e1d38504SPaulis Gributs struct rte_device *device = dev_info.device; 3922e1d38504SPaulis Gributs close_port(port_id); 3923e1d38504SPaulis Gributs detach_device(device); /* might be already removed or have more ports */ 3924e1d38504SPaulis Gributs } 39253b97888aSMatan Azrad if (need_to_start) 39263b97888aSMatan Azrad start_packet_forwarding(0); 3927284c908cSGaetan Rivet } 3928284c908cSGaetan Rivet 392976ad4a2dSGaetan Rivet /* This function is used by the interrupt thread */ 3930d6af1a13SBernard Iremonger static int 3931f8244c63SZhiyong Yang eth_event_callback(portid_t port_id, enum rte_eth_event_type type, void *param, 3932d6af1a13SBernard Iremonger void *ret_param) 393376ad4a2dSGaetan Rivet { 393476ad4a2dSGaetan Rivet RTE_SET_USED(param); 3935d6af1a13SBernard Iremonger RTE_SET_USED(ret_param); 393676ad4a2dSGaetan Rivet 393776ad4a2dSGaetan Rivet if (type >= RTE_ETH_EVENT_MAX) { 393861a3b0e5SAndrew Rybchenko fprintf(stderr, 393961a3b0e5SAndrew Rybchenko "\nPort %" PRIu16 ": %s called upon invalid event %d\n", 394076ad4a2dSGaetan Rivet port_id, __func__, type); 394176ad4a2dSGaetan Rivet fflush(stderr); 39423af72783SGaetan Rivet } else if (event_print_mask & (UINT32_C(1) << type)) { 3943f431e010SHerakliusz Lipiec printf("\nPort %" PRIu16 ": %s event\n", port_id, 394497b5d8b5SThomas Monjalon eth_event_desc[type]); 394576ad4a2dSGaetan Rivet fflush(stdout); 394676ad4a2dSGaetan Rivet } 3947284c908cSGaetan Rivet 3948284c908cSGaetan Rivet switch (type) { 39494f1ed78eSThomas Monjalon case RTE_ETH_EVENT_NEW: 39504f1ed78eSThomas Monjalon ports[port_id].need_setup = 1; 39514f1ed78eSThomas Monjalon ports[port_id].port_status = RTE_PORT_HANDLING; 39524f1ed78eSThomas Monjalon break; 3953284c908cSGaetan Rivet case RTE_ETH_EVENT_INTR_RMV: 39544f1ed78eSThomas Monjalon if (port_id_is_invalid(port_id, DISABLED_WARN)) 39554f1ed78eSThomas Monjalon break; 3956284c908cSGaetan Rivet if (rte_eal_alarm_set(100000, 3957cc1bf307SJeff Guo rmv_port_callback, (void *)(intptr_t)port_id)) 395861a3b0e5SAndrew Rybchenko fprintf(stderr, 395961a3b0e5SAndrew Rybchenko "Could not set up deferred device removal\n"); 3960284c908cSGaetan Rivet break; 396185c6571cSThomas Monjalon case RTE_ETH_EVENT_DESTROY: 396285c6571cSThomas Monjalon ports[port_id].port_status = RTE_PORT_CLOSED; 396385c6571cSThomas Monjalon printf("Port %u is closed\n", port_id); 396485c6571cSThomas Monjalon break; 3965bc70e559SSpike Du case RTE_ETH_EVENT_RX_AVAIL_THRESH: { 3966bc70e559SSpike Du uint16_t rxq_id; 3967bc70e559SSpike Du int ret; 3968bc70e559SSpike Du 3969bc70e559SSpike Du /* avail_thresh query API rewinds rxq_id, no need to check max RxQ num */ 3970bc70e559SSpike Du for (rxq_id = 0; ; rxq_id++) { 3971bc70e559SSpike Du ret = rte_eth_rx_avail_thresh_query(port_id, &rxq_id, 3972bc70e559SSpike Du NULL); 3973bc70e559SSpike Du if (ret <= 0) 3974bc70e559SSpike Du break; 3975bc70e559SSpike Du printf("Received avail_thresh event, port: %u, rxq_id: %u\n", 3976bc70e559SSpike Du port_id, rxq_id); 3977f41a5092SSpike Du 3978f41a5092SSpike Du #ifdef RTE_NET_MLX5 3979f41a5092SSpike Du mlx5_test_avail_thresh_event_handler(port_id, rxq_id); 3980f41a5092SSpike Du #endif 3981bc70e559SSpike Du } 3982bc70e559SSpike Du break; 3983bc70e559SSpike Du } 3984284c908cSGaetan Rivet default: 3985284c908cSGaetan Rivet break; 3986284c908cSGaetan Rivet } 3987d6af1a13SBernard Iremonger return 0; 398876ad4a2dSGaetan Rivet } 398976ad4a2dSGaetan Rivet 399097b5d8b5SThomas Monjalon static int 399197b5d8b5SThomas Monjalon register_eth_event_callback(void) 399297b5d8b5SThomas Monjalon { 399397b5d8b5SThomas Monjalon int ret; 399497b5d8b5SThomas Monjalon enum rte_eth_event_type event; 399597b5d8b5SThomas Monjalon 399697b5d8b5SThomas Monjalon for (event = RTE_ETH_EVENT_UNKNOWN; 399797b5d8b5SThomas Monjalon event < RTE_ETH_EVENT_MAX; event++) { 399897b5d8b5SThomas Monjalon ret = rte_eth_dev_callback_register(RTE_ETH_ALL, 399997b5d8b5SThomas Monjalon event, 400097b5d8b5SThomas Monjalon eth_event_callback, 400197b5d8b5SThomas Monjalon NULL); 400297b5d8b5SThomas Monjalon if (ret != 0) { 400397b5d8b5SThomas Monjalon TESTPMD_LOG(ERR, "Failed to register callback for " 400497b5d8b5SThomas Monjalon "%s event\n", eth_event_desc[event]); 400597b5d8b5SThomas Monjalon return -1; 400697b5d8b5SThomas Monjalon } 400797b5d8b5SThomas Monjalon } 400897b5d8b5SThomas Monjalon 400997b5d8b5SThomas Monjalon return 0; 401097b5d8b5SThomas Monjalon } 401197b5d8b5SThomas Monjalon 4012fb73e096SJeff Guo /* This function is used by the interrupt thread */ 4013fb73e096SJeff Guo static void 4014cc1bf307SJeff Guo dev_event_callback(const char *device_name, enum rte_dev_event_type type, 4015fb73e096SJeff Guo __rte_unused void *arg) 4016fb73e096SJeff Guo { 40172049c511SJeff Guo uint16_t port_id; 40182049c511SJeff Guo int ret; 40192049c511SJeff Guo 4020fb73e096SJeff Guo if (type >= RTE_DEV_EVENT_MAX) { 4021fb73e096SJeff Guo fprintf(stderr, "%s called upon invalid event %d\n", 4022fb73e096SJeff Guo __func__, type); 4023fb73e096SJeff Guo fflush(stderr); 4024fb73e096SJeff Guo } 4025fb73e096SJeff Guo 4026fb73e096SJeff Guo switch (type) { 4027fb73e096SJeff Guo case RTE_DEV_EVENT_REMOVE: 4028cc1bf307SJeff Guo RTE_LOG(DEBUG, EAL, "The device: %s has been removed!\n", 4029fb73e096SJeff Guo device_name); 40302049c511SJeff Guo ret = rte_eth_dev_get_port_by_name(device_name, &port_id); 40312049c511SJeff Guo if (ret) { 40322049c511SJeff Guo RTE_LOG(ERR, EAL, "can not get port by device %s!\n", 40332049c511SJeff Guo device_name); 40342049c511SJeff Guo return; 40352049c511SJeff Guo } 4036cc1bf307SJeff Guo /* 4037cc1bf307SJeff Guo * Because the user's callback is invoked in eal interrupt 4038cc1bf307SJeff Guo * callback, the interrupt callback need to be finished before 4039cc1bf307SJeff Guo * it can be unregistered when detaching device. So finish 4040cc1bf307SJeff Guo * callback soon and use a deferred removal to detach device 4041cc1bf307SJeff Guo * is need. It is a workaround, once the device detaching be 4042cc1bf307SJeff Guo * moved into the eal in the future, the deferred removal could 4043cc1bf307SJeff Guo * be deleted. 4044cc1bf307SJeff Guo */ 4045cc1bf307SJeff Guo if (rte_eal_alarm_set(100000, 4046cc1bf307SJeff Guo rmv_port_callback, (void *)(intptr_t)port_id)) 4047cc1bf307SJeff Guo RTE_LOG(ERR, EAL, 4048cc1bf307SJeff Guo "Could not set up deferred device removal\n"); 4049fb73e096SJeff Guo break; 4050fb73e096SJeff Guo case RTE_DEV_EVENT_ADD: 4051fb73e096SJeff Guo RTE_LOG(ERR, EAL, "The device: %s has been added!\n", 4052fb73e096SJeff Guo device_name); 4053fb73e096SJeff Guo /* TODO: After finish kernel driver binding, 4054fb73e096SJeff Guo * begin to attach port. 4055fb73e096SJeff Guo */ 4056fb73e096SJeff Guo break; 4057fb73e096SJeff Guo default: 4058fb73e096SJeff Guo break; 4059fb73e096SJeff Guo } 4060fb73e096SJeff Guo } 4061fb73e096SJeff Guo 4062f2c5125aSPablo de Lara static void 4063f4d178c1SXueming Li rxtx_port_config(portid_t pid) 4064f2c5125aSPablo de Lara { 4065d44f8a48SQi Zhang uint16_t qid; 40665e91aeefSWei Zhao uint64_t offloads; 4067f4d178c1SXueming Li struct rte_port *port = &ports[pid]; 4068f2c5125aSPablo de Lara 4069d44f8a48SQi Zhang for (qid = 0; qid < nb_rxq; qid++) { 40703c4426dbSDmitry Kozlyuk offloads = port->rxq[qid].conf.offloads; 40713c4426dbSDmitry Kozlyuk port->rxq[qid].conf = port->dev_info.default_rxconf; 4072f4d178c1SXueming Li 4073f4d178c1SXueming Li if (rxq_share > 0 && 4074f4d178c1SXueming Li (port->dev_info.dev_capa & RTE_ETH_DEV_CAPA_RXQ_SHARE)) { 4075f4d178c1SXueming Li /* Non-zero share group to enable RxQ share. */ 40763c4426dbSDmitry Kozlyuk port->rxq[qid].conf.share_group = pid / rxq_share + 1; 40773c4426dbSDmitry Kozlyuk port->rxq[qid].conf.share_qid = qid; /* Equal mapping. */ 4078f4d178c1SXueming Li } 4079f4d178c1SXueming Li 4080575e0fd1SWei Zhao if (offloads != 0) 40813c4426dbSDmitry Kozlyuk port->rxq[qid].conf.offloads = offloads; 4082d44f8a48SQi Zhang 4083d44f8a48SQi Zhang /* Check if any Rx parameters have been passed */ 4084f2c5125aSPablo de Lara if (rx_pthresh != RTE_PMD_PARAM_UNSET) 40853c4426dbSDmitry Kozlyuk port->rxq[qid].conf.rx_thresh.pthresh = rx_pthresh; 4086f2c5125aSPablo de Lara 4087f2c5125aSPablo de Lara if (rx_hthresh != RTE_PMD_PARAM_UNSET) 40883c4426dbSDmitry Kozlyuk port->rxq[qid].conf.rx_thresh.hthresh = rx_hthresh; 4089f2c5125aSPablo de Lara 4090f2c5125aSPablo de Lara if (rx_wthresh != RTE_PMD_PARAM_UNSET) 40913c4426dbSDmitry Kozlyuk port->rxq[qid].conf.rx_thresh.wthresh = rx_wthresh; 4092f2c5125aSPablo de Lara 4093f2c5125aSPablo de Lara if (rx_free_thresh != RTE_PMD_PARAM_UNSET) 40943c4426dbSDmitry Kozlyuk port->rxq[qid].conf.rx_free_thresh = rx_free_thresh; 4095f2c5125aSPablo de Lara 4096f2c5125aSPablo de Lara if (rx_drop_en != RTE_PMD_PARAM_UNSET) 40973c4426dbSDmitry Kozlyuk port->rxq[qid].conf.rx_drop_en = rx_drop_en; 4098f2c5125aSPablo de Lara 4099d44f8a48SQi Zhang port->nb_rx_desc[qid] = nb_rxd; 4100d44f8a48SQi Zhang } 4101d44f8a48SQi Zhang 4102d44f8a48SQi Zhang for (qid = 0; qid < nb_txq; qid++) { 41033c4426dbSDmitry Kozlyuk offloads = port->txq[qid].conf.offloads; 41043c4426dbSDmitry Kozlyuk port->txq[qid].conf = port->dev_info.default_txconf; 4105575e0fd1SWei Zhao if (offloads != 0) 41063c4426dbSDmitry Kozlyuk port->txq[qid].conf.offloads = offloads; 4107d44f8a48SQi Zhang 4108d44f8a48SQi Zhang /* Check if any Tx parameters have been passed */ 4109f2c5125aSPablo de Lara if (tx_pthresh != RTE_PMD_PARAM_UNSET) 41103c4426dbSDmitry Kozlyuk port->txq[qid].conf.tx_thresh.pthresh = tx_pthresh; 4111f2c5125aSPablo de Lara 4112f2c5125aSPablo de Lara if (tx_hthresh != RTE_PMD_PARAM_UNSET) 41133c4426dbSDmitry Kozlyuk port->txq[qid].conf.tx_thresh.hthresh = tx_hthresh; 4114f2c5125aSPablo de Lara 4115f2c5125aSPablo de Lara if (tx_wthresh != RTE_PMD_PARAM_UNSET) 41163c4426dbSDmitry Kozlyuk port->txq[qid].conf.tx_thresh.wthresh = tx_wthresh; 4117f2c5125aSPablo de Lara 4118f2c5125aSPablo de Lara if (tx_rs_thresh != RTE_PMD_PARAM_UNSET) 41193c4426dbSDmitry Kozlyuk port->txq[qid].conf.tx_rs_thresh = tx_rs_thresh; 4120f2c5125aSPablo de Lara 4121f2c5125aSPablo de Lara if (tx_free_thresh != RTE_PMD_PARAM_UNSET) 41223c4426dbSDmitry Kozlyuk port->txq[qid].conf.tx_free_thresh = tx_free_thresh; 4123d44f8a48SQi Zhang 4124d44f8a48SQi Zhang port->nb_tx_desc[qid] = nb_txd; 4125d44f8a48SQi Zhang } 4126f2c5125aSPablo de Lara } 4127f2c5125aSPablo de Lara 41280c4abd36SSteve Yang /* 4129b563c142SFerruh Yigit * Helper function to set MTU from frame size 41300c4abd36SSteve Yang * 41310c4abd36SSteve Yang * port->dev_info should be set before calling this function. 41320c4abd36SSteve Yang * 41330c4abd36SSteve Yang * return 0 on success, negative on error 41340c4abd36SSteve Yang */ 41350c4abd36SSteve Yang int 4136b563c142SFerruh Yigit update_mtu_from_frame_size(portid_t portid, uint32_t max_rx_pktlen) 41370c4abd36SSteve Yang { 41380c4abd36SSteve Yang struct rte_port *port = &ports[portid]; 41390c4abd36SSteve Yang uint32_t eth_overhead; 41401bb4a528SFerruh Yigit uint16_t mtu, new_mtu; 41410c4abd36SSteve Yang 41421bb4a528SFerruh Yigit eth_overhead = get_eth_overhead(&port->dev_info); 41431bb4a528SFerruh Yigit 41441bb4a528SFerruh Yigit if (rte_eth_dev_get_mtu(portid, &mtu) != 0) { 41451bb4a528SFerruh Yigit printf("Failed to get MTU for port %u\n", portid); 41461bb4a528SFerruh Yigit return -1; 41471bb4a528SFerruh Yigit } 41481bb4a528SFerruh Yigit 41491bb4a528SFerruh Yigit new_mtu = max_rx_pktlen - eth_overhead; 41500c4abd36SSteve Yang 41511bb4a528SFerruh Yigit if (mtu == new_mtu) 41521bb4a528SFerruh Yigit return 0; 41531bb4a528SFerruh Yigit 41541bb4a528SFerruh Yigit if (eth_dev_set_mtu_mp(portid, new_mtu) != 0) { 415561a3b0e5SAndrew Rybchenko fprintf(stderr, 415661a3b0e5SAndrew Rybchenko "Failed to set MTU to %u for port %u\n", 41571bb4a528SFerruh Yigit new_mtu, portid); 41581bb4a528SFerruh Yigit return -1; 41590c4abd36SSteve Yang } 41600c4abd36SSteve Yang 41611bb4a528SFerruh Yigit port->dev_conf.rxmode.mtu = new_mtu; 41621bb4a528SFerruh Yigit 41630c4abd36SSteve Yang return 0; 41640c4abd36SSteve Yang } 41650c4abd36SSteve Yang 4166013af9b6SIntel void 4167013af9b6SIntel init_port_config(void) 4168013af9b6SIntel { 4169013af9b6SIntel portid_t pid; 4170013af9b6SIntel struct rte_port *port; 4171655eae01SJie Wang int ret, i; 4172013af9b6SIntel 41737d89b261SGaetan Rivet RTE_ETH_FOREACH_DEV(pid) { 4174013af9b6SIntel port = &ports[pid]; 41756f51deb9SIvan Ilchenko 41766f51deb9SIvan Ilchenko ret = eth_dev_info_get_print_err(pid, &port->dev_info); 41776f51deb9SIvan Ilchenko if (ret != 0) 41786f51deb9SIvan Ilchenko return; 41796f51deb9SIvan Ilchenko 41803ce690d3SBruce Richardson if (nb_rxq > 1) { 4181013af9b6SIntel port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL; 418290892962SQi Zhang port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 4183422515b9SAdrien Mazarguil rss_hf & port->dev_info.flow_type_rss_offloads; 4184af75078fSIntel } else { 4185013af9b6SIntel port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL; 4186013af9b6SIntel port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0; 4187af75078fSIntel } 41883ce690d3SBruce Richardson 41895f592039SJingjing Wu if (port->dcb_flag == 0) { 4190655eae01SJie Wang if (port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0) { 4191f9295aa2SXiaoyu Min port->dev_conf.rxmode.mq_mode = 4192f9295aa2SXiaoyu Min (enum rte_eth_rx_mq_mode) 4193295968d1SFerruh Yigit (rx_mq_mode & RTE_ETH_MQ_RX_RSS); 4194655eae01SJie Wang } else { 4195295968d1SFerruh Yigit port->dev_conf.rxmode.mq_mode = RTE_ETH_MQ_RX_NONE; 4196655eae01SJie Wang port->dev_conf.rxmode.offloads &= 4197295968d1SFerruh Yigit ~RTE_ETH_RX_OFFLOAD_RSS_HASH; 4198655eae01SJie Wang 4199655eae01SJie Wang for (i = 0; 4200655eae01SJie Wang i < port->dev_info.nb_rx_queues; 4201655eae01SJie Wang i++) 42023c4426dbSDmitry Kozlyuk port->rxq[i].conf.offloads &= 4203295968d1SFerruh Yigit ~RTE_ETH_RX_OFFLOAD_RSS_HASH; 4204655eae01SJie Wang } 42053ce690d3SBruce Richardson } 42063ce690d3SBruce Richardson 4207f4d178c1SXueming Li rxtx_port_config(pid); 4208013af9b6SIntel 4209a5279d25SIgor Romanov ret = eth_macaddr_get_print_err(pid, &port->eth_addr); 4210a5279d25SIgor Romanov if (ret != 0) 4211a5279d25SIgor Romanov return; 4212013af9b6SIntel 42130a0821bcSPaulis Gributs if (lsc_interrupt && (*port->dev_info.dev_flags & RTE_ETH_DEV_INTR_LSC)) 42148ea656f8SGaetan Rivet port->dev_conf.intr_conf.lsc = 1; 42150a0821bcSPaulis Gributs if (rmv_interrupt && (*port->dev_info.dev_flags & RTE_ETH_DEV_INTR_RMV)) 4216284c908cSGaetan Rivet port->dev_conf.intr_conf.rmv = 1; 4217013af9b6SIntel } 4218013af9b6SIntel } 4219013af9b6SIntel 422041b05095SBernard Iremonger void set_port_slave_flag(portid_t slave_pid) 422141b05095SBernard Iremonger { 422241b05095SBernard Iremonger struct rte_port *port; 422341b05095SBernard Iremonger 422441b05095SBernard Iremonger port = &ports[slave_pid]; 422541b05095SBernard Iremonger port->slave_flag = 1; 422641b05095SBernard Iremonger } 422741b05095SBernard Iremonger 422841b05095SBernard Iremonger void clear_port_slave_flag(portid_t slave_pid) 422941b05095SBernard Iremonger { 423041b05095SBernard Iremonger struct rte_port *port; 423141b05095SBernard Iremonger 423241b05095SBernard Iremonger port = &ports[slave_pid]; 423341b05095SBernard Iremonger port->slave_flag = 0; 423441b05095SBernard Iremonger } 423541b05095SBernard Iremonger 42360e545d30SBernard Iremonger uint8_t port_is_bonding_slave(portid_t slave_pid) 42370e545d30SBernard Iremonger { 42380e545d30SBernard Iremonger struct rte_port *port; 42390a0821bcSPaulis Gributs struct rte_eth_dev_info dev_info; 42400a0821bcSPaulis Gributs int ret; 42410e545d30SBernard Iremonger 42420e545d30SBernard Iremonger port = &ports[slave_pid]; 42430a0821bcSPaulis Gributs ret = eth_dev_info_get_print_err(slave_pid, &dev_info); 42440a0821bcSPaulis Gributs if (ret != 0) { 42450a0821bcSPaulis Gributs TESTPMD_LOG(ERR, 42460a0821bcSPaulis Gributs "Failed to get device info for port id %d," 42470a0821bcSPaulis Gributs "cannot determine if the port is a bonded slave", 42480a0821bcSPaulis Gributs slave_pid); 42490a0821bcSPaulis Gributs return 0; 42500a0821bcSPaulis Gributs } 42510a0821bcSPaulis Gributs if ((*dev_info.dev_flags & RTE_ETH_DEV_BONDED_SLAVE) || (port->slave_flag == 1)) 4252b8b8b344SMatan Azrad return 1; 4253b8b8b344SMatan Azrad return 0; 42540e545d30SBernard Iremonger } 42550e545d30SBernard Iremonger 4256013af9b6SIntel const uint16_t vlan_tags[] = { 4257013af9b6SIntel 0, 1, 2, 3, 4, 5, 6, 7, 4258013af9b6SIntel 8, 9, 10, 11, 12, 13, 14, 15, 4259013af9b6SIntel 16, 17, 18, 19, 20, 21, 22, 23, 4260013af9b6SIntel 24, 25, 26, 27, 28, 29, 30, 31 4261013af9b6SIntel }; 4262013af9b6SIntel 4263013af9b6SIntel static int 4264ac7c491cSKonstantin Ananyev get_eth_dcb_conf(portid_t pid, struct rte_eth_conf *eth_conf, 42651a572499SJingjing Wu enum dcb_mode_enable dcb_mode, 42661a572499SJingjing Wu enum rte_eth_nb_tcs num_tcs, 42671a572499SJingjing Wu uint8_t pfc_en) 4268013af9b6SIntel { 4269013af9b6SIntel uint8_t i; 4270ac7c491cSKonstantin Ananyev int32_t rc; 4271ac7c491cSKonstantin Ananyev struct rte_eth_rss_conf rss_conf; 4272af75078fSIntel 4273af75078fSIntel /* 4274013af9b6SIntel * Builds up the correct configuration for dcb+vt based on the vlan tags array 4275013af9b6SIntel * given above, and the number of traffic classes available for use. 4276af75078fSIntel */ 42771a572499SJingjing Wu if (dcb_mode == DCB_VT_ENABLED) { 42781a572499SJingjing Wu struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf = 42791a572499SJingjing Wu ð_conf->rx_adv_conf.vmdq_dcb_conf; 42801a572499SJingjing Wu struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf = 42811a572499SJingjing Wu ð_conf->tx_adv_conf.vmdq_dcb_tx_conf; 4282013af9b6SIntel 4283547d946cSNirmoy Das /* VMDQ+DCB RX and TX configurations */ 42841a572499SJingjing Wu vmdq_rx_conf->enable_default_pool = 0; 42851a572499SJingjing Wu vmdq_rx_conf->default_pool = 0; 42861a572499SJingjing Wu vmdq_rx_conf->nb_queue_pools = 4287295968d1SFerruh Yigit (num_tcs == RTE_ETH_4_TCS ? RTE_ETH_32_POOLS : RTE_ETH_16_POOLS); 42881a572499SJingjing Wu vmdq_tx_conf->nb_queue_pools = 4289295968d1SFerruh Yigit (num_tcs == RTE_ETH_4_TCS ? RTE_ETH_32_POOLS : RTE_ETH_16_POOLS); 4290013af9b6SIntel 42911a572499SJingjing Wu vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools; 42921a572499SJingjing Wu for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) { 42931a572499SJingjing Wu vmdq_rx_conf->pool_map[i].vlan_id = vlan_tags[i]; 42941a572499SJingjing Wu vmdq_rx_conf->pool_map[i].pools = 42951a572499SJingjing Wu 1 << (i % vmdq_rx_conf->nb_queue_pools); 4296af75078fSIntel } 4297295968d1SFerruh Yigit for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++) { 4298f59908feSWei Dai vmdq_rx_conf->dcb_tc[i] = i % num_tcs; 4299f59908feSWei Dai vmdq_tx_conf->dcb_tc[i] = i % num_tcs; 4300013af9b6SIntel } 4301013af9b6SIntel 4302013af9b6SIntel /* set DCB mode of RX and TX of multiple queues */ 4303f9295aa2SXiaoyu Min eth_conf->rxmode.mq_mode = 4304f9295aa2SXiaoyu Min (enum rte_eth_rx_mq_mode) 4305295968d1SFerruh Yigit (rx_mq_mode & RTE_ETH_MQ_RX_VMDQ_DCB); 4306295968d1SFerruh Yigit eth_conf->txmode.mq_mode = RTE_ETH_MQ_TX_VMDQ_DCB; 43071a572499SJingjing Wu } else { 43081a572499SJingjing Wu struct rte_eth_dcb_rx_conf *rx_conf = 43091a572499SJingjing Wu ð_conf->rx_adv_conf.dcb_rx_conf; 43101a572499SJingjing Wu struct rte_eth_dcb_tx_conf *tx_conf = 43111a572499SJingjing Wu ð_conf->tx_adv_conf.dcb_tx_conf; 4312013af9b6SIntel 43135139bc12STing Xu memset(&rss_conf, 0, sizeof(struct rte_eth_rss_conf)); 43145139bc12STing Xu 4315ac7c491cSKonstantin Ananyev rc = rte_eth_dev_rss_hash_conf_get(pid, &rss_conf); 4316ac7c491cSKonstantin Ananyev if (rc != 0) 4317ac7c491cSKonstantin Ananyev return rc; 4318ac7c491cSKonstantin Ananyev 43191a572499SJingjing Wu rx_conf->nb_tcs = num_tcs; 43201a572499SJingjing Wu tx_conf->nb_tcs = num_tcs; 43211a572499SJingjing Wu 4322295968d1SFerruh Yigit for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++) { 4323bcd0e432SJingjing Wu rx_conf->dcb_tc[i] = i % num_tcs; 4324bcd0e432SJingjing Wu tx_conf->dcb_tc[i] = i % num_tcs; 4325013af9b6SIntel } 4326ac7c491cSKonstantin Ananyev 4327f9295aa2SXiaoyu Min eth_conf->rxmode.mq_mode = 4328f9295aa2SXiaoyu Min (enum rte_eth_rx_mq_mode) 4329295968d1SFerruh Yigit (rx_mq_mode & RTE_ETH_MQ_RX_DCB_RSS); 4330ac7c491cSKonstantin Ananyev eth_conf->rx_adv_conf.rss_conf = rss_conf; 4331295968d1SFerruh Yigit eth_conf->txmode.mq_mode = RTE_ETH_MQ_TX_DCB; 43321a572499SJingjing Wu } 43331a572499SJingjing Wu 43341a572499SJingjing Wu if (pfc_en) 43351a572499SJingjing Wu eth_conf->dcb_capability_en = 4336295968d1SFerruh Yigit RTE_ETH_DCB_PG_SUPPORT | RTE_ETH_DCB_PFC_SUPPORT; 4337013af9b6SIntel else 4338295968d1SFerruh Yigit eth_conf->dcb_capability_en = RTE_ETH_DCB_PG_SUPPORT; 4339013af9b6SIntel 4340013af9b6SIntel return 0; 4341013af9b6SIntel } 4342013af9b6SIntel 4343013af9b6SIntel int 43441a572499SJingjing Wu init_port_dcb_config(portid_t pid, 43451a572499SJingjing Wu enum dcb_mode_enable dcb_mode, 43461a572499SJingjing Wu enum rte_eth_nb_tcs num_tcs, 43471a572499SJingjing Wu uint8_t pfc_en) 4348013af9b6SIntel { 4349013af9b6SIntel struct rte_eth_conf port_conf; 4350013af9b6SIntel struct rte_port *rte_port; 4351013af9b6SIntel int retval; 4352013af9b6SIntel uint16_t i; 4353013af9b6SIntel 4354a550baf2SMin Hu (Connor) if (num_procs > 1) { 4355a550baf2SMin Hu (Connor) printf("The multi-process feature doesn't support dcb.\n"); 4356a550baf2SMin Hu (Connor) return -ENOTSUP; 4357a550baf2SMin Hu (Connor) } 43582a977b89SWenzhuo Lu rte_port = &ports[pid]; 4359013af9b6SIntel 4360c1ba6c32SHuisong Li /* retain the original device configuration. */ 4361c1ba6c32SHuisong Li memcpy(&port_conf, &rte_port->dev_conf, sizeof(struct rte_eth_conf)); 4362d5354e89SYanglong Wu 4363013af9b6SIntel /*set configuration of DCB in vt mode and DCB in non-vt mode*/ 4364ac7c491cSKonstantin Ananyev retval = get_eth_dcb_conf(pid, &port_conf, dcb_mode, num_tcs, pfc_en); 4365013af9b6SIntel if (retval < 0) 4366013af9b6SIntel return retval; 4367295968d1SFerruh Yigit port_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_VLAN_FILTER; 4368cbe70fdeSJie Wang /* remove RSS HASH offload for DCB in vt mode */ 4369cbe70fdeSJie Wang if (port_conf.rxmode.mq_mode == RTE_ETH_MQ_RX_VMDQ_DCB) { 4370cbe70fdeSJie Wang port_conf.rxmode.offloads &= ~RTE_ETH_RX_OFFLOAD_RSS_HASH; 4371cbe70fdeSJie Wang for (i = 0; i < nb_rxq; i++) 43723c4426dbSDmitry Kozlyuk rte_port->rxq[i].conf.offloads &= 4373cbe70fdeSJie Wang ~RTE_ETH_RX_OFFLOAD_RSS_HASH; 4374cbe70fdeSJie Wang } 4375013af9b6SIntel 43762f203d44SQi Zhang /* re-configure the device . */ 43772b0e0ebaSChenbo Xia retval = rte_eth_dev_configure(pid, nb_rxq, nb_rxq, &port_conf); 43782b0e0ebaSChenbo Xia if (retval < 0) 43792b0e0ebaSChenbo Xia return retval; 43806f51deb9SIvan Ilchenko 43816f51deb9SIvan Ilchenko retval = eth_dev_info_get_print_err(pid, &rte_port->dev_info); 43826f51deb9SIvan Ilchenko if (retval != 0) 43836f51deb9SIvan Ilchenko return retval; 43842a977b89SWenzhuo Lu 43852a977b89SWenzhuo Lu /* If dev_info.vmdq_pool_base is greater than 0, 43862a977b89SWenzhuo Lu * the queue id of vmdq pools is started after pf queues. 43872a977b89SWenzhuo Lu */ 43882a977b89SWenzhuo Lu if (dcb_mode == DCB_VT_ENABLED && 43892a977b89SWenzhuo Lu rte_port->dev_info.vmdq_pool_base > 0) { 439061a3b0e5SAndrew Rybchenko fprintf(stderr, 439161a3b0e5SAndrew Rybchenko "VMDQ_DCB multi-queue mode is nonsensical for port %d.\n", 439261a3b0e5SAndrew Rybchenko pid); 43932a977b89SWenzhuo Lu return -1; 43942a977b89SWenzhuo Lu } 43952a977b89SWenzhuo Lu 43962a977b89SWenzhuo Lu /* Assume the ports in testpmd have the same dcb capability 43972a977b89SWenzhuo Lu * and has the same number of rxq and txq in dcb mode 43982a977b89SWenzhuo Lu */ 43992a977b89SWenzhuo Lu if (dcb_mode == DCB_VT_ENABLED) { 440086ef65eeSBernard Iremonger if (rte_port->dev_info.max_vfs > 0) { 440186ef65eeSBernard Iremonger nb_rxq = rte_port->dev_info.nb_rx_queues; 440286ef65eeSBernard Iremonger nb_txq = rte_port->dev_info.nb_tx_queues; 440386ef65eeSBernard Iremonger } else { 44042a977b89SWenzhuo Lu nb_rxq = rte_port->dev_info.max_rx_queues; 44052a977b89SWenzhuo Lu nb_txq = rte_port->dev_info.max_tx_queues; 440686ef65eeSBernard Iremonger } 44072a977b89SWenzhuo Lu } else { 44082a977b89SWenzhuo Lu /*if vt is disabled, use all pf queues */ 44092a977b89SWenzhuo Lu if (rte_port->dev_info.vmdq_pool_base == 0) { 44102a977b89SWenzhuo Lu nb_rxq = rte_port->dev_info.max_rx_queues; 44112a977b89SWenzhuo Lu nb_txq = rte_port->dev_info.max_tx_queues; 44122a977b89SWenzhuo Lu } else { 44132a977b89SWenzhuo Lu nb_rxq = (queueid_t)num_tcs; 44142a977b89SWenzhuo Lu nb_txq = (queueid_t)num_tcs; 44152a977b89SWenzhuo Lu 44162a977b89SWenzhuo Lu } 44172a977b89SWenzhuo Lu } 44182a977b89SWenzhuo Lu rx_free_thresh = 64; 44192a977b89SWenzhuo Lu 4420013af9b6SIntel memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf)); 4421013af9b6SIntel 4422f4d178c1SXueming Li rxtx_port_config(pid); 4423013af9b6SIntel /* VLAN filter */ 4424295968d1SFerruh Yigit rte_port->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_VLAN_FILTER; 44251a572499SJingjing Wu for (i = 0; i < RTE_DIM(vlan_tags); i++) 4426013af9b6SIntel rx_vft_set(pid, vlan_tags[i], 1); 4427013af9b6SIntel 4428a5279d25SIgor Romanov retval = eth_macaddr_get_print_err(pid, &rte_port->eth_addr); 4429a5279d25SIgor Romanov if (retval != 0) 4430a5279d25SIgor Romanov return retval; 4431a5279d25SIgor Romanov 44327741e4cfSIntel rte_port->dcb_flag = 1; 44337741e4cfSIntel 4434a690a070SHuisong Li /* Enter DCB configuration status */ 4435a690a070SHuisong Li dcb_config = 1; 4436a690a070SHuisong Li 4437013af9b6SIntel return 0; 4438af75078fSIntel } 4439af75078fSIntel 4440ffc468ffSTetsuya Mukawa static void 4441ffc468ffSTetsuya Mukawa init_port(void) 4442ffc468ffSTetsuya Mukawa { 44431b9f2746SGregory Etelson int i; 44441b9f2746SGregory Etelson 4445ffc468ffSTetsuya Mukawa /* Configuration of Ethernet ports. */ 4446ffc468ffSTetsuya Mukawa ports = rte_zmalloc("testpmd: ports", 4447ffc468ffSTetsuya Mukawa sizeof(struct rte_port) * RTE_MAX_ETHPORTS, 4448ffc468ffSTetsuya Mukawa RTE_CACHE_LINE_SIZE); 4449ffc468ffSTetsuya Mukawa if (ports == NULL) { 4450ffc468ffSTetsuya Mukawa rte_exit(EXIT_FAILURE, 4451ffc468ffSTetsuya Mukawa "rte_zmalloc(%d struct rte_port) failed\n", 4452ffc468ffSTetsuya Mukawa RTE_MAX_ETHPORTS); 4453ffc468ffSTetsuya Mukawa } 4454236bc417SGregory Etelson for (i = 0; i < RTE_MAX_ETHPORTS; i++) { 4455236bc417SGregory Etelson ports[i].fwd_mac_swap = 1; 445663b72657SIvan Ilchenko ports[i].xstats_info.allocated = false; 44571b9f2746SGregory Etelson LIST_INIT(&ports[i].flow_tunnel_list); 4458236bc417SGregory Etelson } 445929841336SPhil Yang /* Initialize ports NUMA structures */ 446029841336SPhil Yang memset(port_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS); 446129841336SPhil Yang memset(rxring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS); 446229841336SPhil Yang memset(txring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS); 4463ffc468ffSTetsuya Mukawa } 4464ffc468ffSTetsuya Mukawa 4465d3a274ceSZhihong Wang static void 4466cfea1f30SPablo de Lara print_stats(void) 4467cfea1f30SPablo de Lara { 4468cfea1f30SPablo de Lara uint8_t i; 4469cfea1f30SPablo de Lara const char clr[] = { 27, '[', '2', 'J', '\0' }; 4470cfea1f30SPablo de Lara const char top_left[] = { 27, '[', '1', ';', '1', 'H', '\0' }; 4471cfea1f30SPablo de Lara 4472cfea1f30SPablo de Lara /* Clear screen and move to top left */ 4473cfea1f30SPablo de Lara printf("%s%s", clr, top_left); 4474cfea1f30SPablo de Lara 4475cfea1f30SPablo de Lara printf("\nPort statistics ===================================="); 4476cfea1f30SPablo de Lara for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) 4477cfea1f30SPablo de Lara nic_stats_display(fwd_ports_ids[i]); 4478683d1e82SIgor Romanov 4479683d1e82SIgor Romanov fflush(stdout); 4480cfea1f30SPablo de Lara } 4481cfea1f30SPablo de Lara 4482cfea1f30SPablo de Lara static void 44830fd1386cSStephen Hemminger signal_handler(int signum __rte_unused) 4484d3a274ceSZhihong Wang { 4485d9a191a0SPhil Yang f_quit = 1; 4486f1d0993eSStephen Hemminger prompt_exit(); 4487d3a274ceSZhihong Wang } 4488d3a274ceSZhihong Wang 4489af75078fSIntel int 4490af75078fSIntel main(int argc, char** argv) 4491af75078fSIntel { 4492af75078fSIntel int diag; 4493f8244c63SZhiyong Yang portid_t port_id; 44944918a357SXiaoyun Li uint16_t count; 4495fb73e096SJeff Guo int ret; 4496af75078fSIntel 4497f1d0993eSStephen Hemminger #ifdef RTE_EXEC_ENV_WINDOWS 4498d3a274ceSZhihong Wang signal(SIGINT, signal_handler); 4499d3a274ceSZhihong Wang signal(SIGTERM, signal_handler); 4500f1d0993eSStephen Hemminger #else 4501f1d0993eSStephen Hemminger /* Want read() not to be restarted on signal */ 4502f1d0993eSStephen Hemminger struct sigaction action = { 4503f1d0993eSStephen Hemminger .sa_handler = signal_handler, 4504f1d0993eSStephen Hemminger }; 4505f1d0993eSStephen Hemminger 4506f1d0993eSStephen Hemminger sigaction(SIGINT, &action, NULL); 4507f1d0993eSStephen Hemminger sigaction(SIGTERM, &action, NULL); 4508f1d0993eSStephen Hemminger #endif 4509d3a274ceSZhihong Wang 4510285fd101SOlivier Matz testpmd_logtype = rte_log_register("testpmd"); 4511285fd101SOlivier Matz if (testpmd_logtype < 0) 451216267ceeSStephen Hemminger rte_exit(EXIT_FAILURE, "Cannot register log type"); 4513285fd101SOlivier Matz rte_log_set_level(testpmd_logtype, RTE_LOG_DEBUG); 4514285fd101SOlivier Matz 45159201806eSStephen Hemminger diag = rte_eal_init(argc, argv); 45169201806eSStephen Hemminger if (diag < 0) 451716267ceeSStephen Hemminger rte_exit(EXIT_FAILURE, "Cannot init EAL: %s\n", 451816267ceeSStephen Hemminger rte_strerror(rte_errno)); 45199201806eSStephen Hemminger 4520563fbd08SDavid Marchand /* allocate port structures, and init them */ 4521563fbd08SDavid Marchand init_port(); 4522563fbd08SDavid Marchand 452397b5d8b5SThomas Monjalon ret = register_eth_event_callback(); 452497b5d8b5SThomas Monjalon if (ret != 0) 452516267ceeSStephen Hemminger rte_exit(EXIT_FAILURE, "Cannot register for ethdev events"); 452697b5d8b5SThomas Monjalon 4527a8d0d473SBruce Richardson #ifdef RTE_LIB_PDUMP 45284aa0d012SAnatoly Burakov /* initialize packet capture framework */ 4529e9436f54STiwei Bie rte_pdump_init(); 45304aa0d012SAnatoly Burakov #endif 45314aa0d012SAnatoly Burakov 45324918a357SXiaoyun Li count = 0; 45334918a357SXiaoyun Li RTE_ETH_FOREACH_DEV(port_id) { 45344918a357SXiaoyun Li ports_ids[count] = port_id; 45354918a357SXiaoyun Li count++; 45364918a357SXiaoyun Li } 45374918a357SXiaoyun Li nb_ports = (portid_t) count; 45384aa0d012SAnatoly Burakov if (nb_ports == 0) 45394aa0d012SAnatoly Burakov TESTPMD_LOG(WARNING, "No probed ethernet devices\n"); 45404aa0d012SAnatoly Burakov 45414aa0d012SAnatoly Burakov set_def_fwd_config(); 45424aa0d012SAnatoly Burakov if (nb_lcores == 0) 454316267ceeSStephen Hemminger rte_exit(EXIT_FAILURE, "No cores defined for forwarding\n" 454416267ceeSStephen Hemminger "Check the core mask argument\n"); 45454aa0d012SAnatoly Burakov 4546e505d84cSAnatoly Burakov /* Bitrate/latency stats disabled by default */ 4547a8d0d473SBruce Richardson #ifdef RTE_LIB_BITRATESTATS 4548e505d84cSAnatoly Burakov bitrate_enabled = 0; 4549e505d84cSAnatoly Burakov #endif 4550a8d0d473SBruce Richardson #ifdef RTE_LIB_LATENCYSTATS 4551e505d84cSAnatoly Burakov latencystats_enabled = 0; 4552e505d84cSAnatoly Burakov #endif 4553e505d84cSAnatoly Burakov 4554fb7b8b32SAnatoly Burakov /* on FreeBSD, mlockall() is disabled by default */ 45555fbc1d49SBruce Richardson #ifdef RTE_EXEC_ENV_FREEBSD 4556fb7b8b32SAnatoly Burakov do_mlockall = 0; 4557fb7b8b32SAnatoly Burakov #else 4558fb7b8b32SAnatoly Burakov do_mlockall = 1; 4559fb7b8b32SAnatoly Burakov #endif 4560fb7b8b32SAnatoly Burakov 4561e505d84cSAnatoly Burakov argc -= diag; 4562e505d84cSAnatoly Burakov argv += diag; 4563e505d84cSAnatoly Burakov if (argc > 1) 4564e505d84cSAnatoly Burakov launch_args_parse(argc, argv); 4565e505d84cSAnatoly Burakov 4566761f7ae1SJie Zhou #ifndef RTE_EXEC_ENV_WINDOWS 4567e505d84cSAnatoly Burakov if (do_mlockall && mlockall(MCL_CURRENT | MCL_FUTURE)) { 4568285fd101SOlivier Matz TESTPMD_LOG(NOTICE, "mlockall() failed with error \"%s\"\n", 45691c036b16SEelco Chaudron strerror(errno)); 45701c036b16SEelco Chaudron } 4571761f7ae1SJie Zhou #endif 45721c036b16SEelco Chaudron 457399cabef0SPablo de Lara if (tx_first && interactive) 457499cabef0SPablo de Lara rte_exit(EXIT_FAILURE, "--tx-first cannot be used on " 457599cabef0SPablo de Lara "interactive mode.\n"); 45768820cba4SDavid Hunt 45778820cba4SDavid Hunt if (tx_first && lsc_interrupt) { 457861a3b0e5SAndrew Rybchenko fprintf(stderr, 457961a3b0e5SAndrew Rybchenko "Warning: lsc_interrupt needs to be off when using tx_first. Disabling.\n"); 45808820cba4SDavid Hunt lsc_interrupt = 0; 45818820cba4SDavid Hunt } 45828820cba4SDavid Hunt 45835a8fb55cSReshma Pattan if (!nb_rxq && !nb_txq) 458461a3b0e5SAndrew Rybchenko fprintf(stderr, 458561a3b0e5SAndrew Rybchenko "Warning: Either rx or tx queues should be non-zero\n"); 45865a8fb55cSReshma Pattan 45875a8fb55cSReshma Pattan if (nb_rxq > 1 && nb_rxq > nb_txq) 458861a3b0e5SAndrew Rybchenko fprintf(stderr, 458961a3b0e5SAndrew Rybchenko "Warning: nb_rxq=%d enables RSS configuration, but nb_txq=%d will prevent to fully test it.\n", 4590af75078fSIntel nb_rxq, nb_txq); 4591af75078fSIntel 4592af75078fSIntel init_config(); 4593fb73e096SJeff Guo 4594fb73e096SJeff Guo if (hot_plug) { 45952049c511SJeff Guo ret = rte_dev_hotplug_handle_enable(); 4596fb73e096SJeff Guo if (ret) { 45972049c511SJeff Guo RTE_LOG(ERR, EAL, 45982049c511SJeff Guo "fail to enable hotplug handling."); 4599fb73e096SJeff Guo return -1; 4600fb73e096SJeff Guo } 4601fb73e096SJeff Guo 46022049c511SJeff Guo ret = rte_dev_event_monitor_start(); 46032049c511SJeff Guo if (ret) { 46042049c511SJeff Guo RTE_LOG(ERR, EAL, 46052049c511SJeff Guo "fail to start device event monitoring."); 46062049c511SJeff Guo return -1; 46072049c511SJeff Guo } 46082049c511SJeff Guo 46092049c511SJeff Guo ret = rte_dev_event_callback_register(NULL, 4610cc1bf307SJeff Guo dev_event_callback, NULL); 46112049c511SJeff Guo if (ret) { 46122049c511SJeff Guo RTE_LOG(ERR, EAL, 46132049c511SJeff Guo "fail to register device event callback\n"); 46142049c511SJeff Guo return -1; 46152049c511SJeff Guo } 4616fb73e096SJeff Guo } 4617fb73e096SJeff Guo 46187e403725SGregory Etelson if (!no_device_start && start_port(RTE_PORT_ALL) != 0) { 46197e403725SGregory Etelson if (!interactive) { 46207e403725SGregory Etelson rte_eal_cleanup(); 4621148f963fSBruce Richardson rte_exit(EXIT_FAILURE, "Start ports failed\n"); 46227e403725SGregory Etelson } 46237e403725SGregory Etelson fprintf(stderr, "Start ports failed\n"); 46247e403725SGregory Etelson } 4625af75078fSIntel 4626ce8d5614SIntel /* set all ports to promiscuous mode by default */ 462734fc1051SIvan Ilchenko RTE_ETH_FOREACH_DEV(port_id) { 462834fc1051SIvan Ilchenko ret = rte_eth_promiscuous_enable(port_id); 462934fc1051SIvan Ilchenko if (ret != 0) 463061a3b0e5SAndrew Rybchenko fprintf(stderr, 463161a3b0e5SAndrew Rybchenko "Error during enabling promiscuous mode for port %u: %s - ignore\n", 463234fc1051SIvan Ilchenko port_id, rte_strerror(-ret)); 463334fc1051SIvan Ilchenko } 4634af75078fSIntel 4635bb9be9a4SDavid Marchand #ifdef RTE_LIB_METRICS 46367e4441c8SRemy Horton /* Init metrics library */ 46377e4441c8SRemy Horton rte_metrics_init(rte_socket_id()); 4638bb9be9a4SDavid Marchand #endif 46397e4441c8SRemy Horton 4640a8d0d473SBruce Richardson #ifdef RTE_LIB_LATENCYSTATS 464162d3216dSReshma Pattan if (latencystats_enabled != 0) { 464262d3216dSReshma Pattan int ret = rte_latencystats_init(1, NULL); 464362d3216dSReshma Pattan if (ret) 464461a3b0e5SAndrew Rybchenko fprintf(stderr, 464561a3b0e5SAndrew Rybchenko "Warning: latencystats init() returned error %d\n", 464661a3b0e5SAndrew Rybchenko ret); 464761a3b0e5SAndrew Rybchenko fprintf(stderr, "Latencystats running on lcore %d\n", 464862d3216dSReshma Pattan latencystats_lcore_id); 464962d3216dSReshma Pattan } 465062d3216dSReshma Pattan #endif 465162d3216dSReshma Pattan 46527e4441c8SRemy Horton /* Setup bitrate stats */ 4653a8d0d473SBruce Richardson #ifdef RTE_LIB_BITRATESTATS 4654e25e6c70SRemy Horton if (bitrate_enabled != 0) { 46557e4441c8SRemy Horton bitrate_data = rte_stats_bitrate_create(); 46567e4441c8SRemy Horton if (bitrate_data == NULL) 4657e25e6c70SRemy Horton rte_exit(EXIT_FAILURE, 4658e25e6c70SRemy Horton "Could not allocate bitrate data.\n"); 46597e4441c8SRemy Horton rte_stats_bitrate_reg(bitrate_data); 4660e25e6c70SRemy Horton } 46617e4441c8SRemy Horton #endif 466299a4974aSRobin Jarry 466399a4974aSRobin Jarry if (record_core_cycles) 466499a4974aSRobin Jarry rte_lcore_register_usage_cb(lcore_usage_callback); 466599a4974aSRobin Jarry 4666a8d0d473SBruce Richardson #ifdef RTE_LIB_CMDLINE 4667592ab76fSDavid Marchand if (init_cmdline() != 0) 4668592ab76fSDavid Marchand rte_exit(EXIT_FAILURE, 4669592ab76fSDavid Marchand "Could not initialise cmdline context.\n"); 4670592ab76fSDavid Marchand 467181ef862bSAllain Legacy if (strlen(cmdline_filename) != 0) 467281ef862bSAllain Legacy cmdline_read_from_file(cmdline_filename); 467381ef862bSAllain Legacy 4674ca7feb22SCyril Chemparathy if (interactive == 1) { 4675ca7feb22SCyril Chemparathy if (auto_start) { 4676ca7feb22SCyril Chemparathy printf("Start automatic packet forwarding\n"); 4677ca7feb22SCyril Chemparathy start_packet_forwarding(0); 4678ca7feb22SCyril Chemparathy } 4679af75078fSIntel prompt(); 4680ca7feb22SCyril Chemparathy } else 46810d56cb81SThomas Monjalon #endif 46820d56cb81SThomas Monjalon { 4683af75078fSIntel printf("No commandline core given, start packet forwarding\n"); 468499cabef0SPablo de Lara start_packet_forwarding(tx_first); 4685cfea1f30SPablo de Lara if (stats_period != 0) { 4686cfea1f30SPablo de Lara uint64_t prev_time = 0, cur_time, diff_time = 0; 4687cfea1f30SPablo de Lara uint64_t timer_period; 4688cfea1f30SPablo de Lara 4689cfea1f30SPablo de Lara /* Convert to number of cycles */ 4690cfea1f30SPablo de Lara timer_period = stats_period * rte_get_timer_hz(); 4691cfea1f30SPablo de Lara 4692d9a191a0SPhil Yang while (f_quit == 0) { 4693cfea1f30SPablo de Lara cur_time = rte_get_timer_cycles(); 4694cfea1f30SPablo de Lara diff_time += cur_time - prev_time; 4695cfea1f30SPablo de Lara 4696cfea1f30SPablo de Lara if (diff_time >= timer_period) { 4697cfea1f30SPablo de Lara print_stats(); 4698cfea1f30SPablo de Lara /* Reset the timer */ 4699cfea1f30SPablo de Lara diff_time = 0; 4700cfea1f30SPablo de Lara } 4701cfea1f30SPablo de Lara /* Sleep to avoid unnecessary checks */ 4702cfea1f30SPablo de Lara prev_time = cur_time; 4703761f7ae1SJie Zhou rte_delay_us_sleep(US_PER_S); 4704cfea1f30SPablo de Lara } 47050fd1386cSStephen Hemminger } else { 47060fd1386cSStephen Hemminger char c; 47070fd1386cSStephen Hemminger fd_set fds; 4708cfea1f30SPablo de Lara 4709af75078fSIntel printf("Press enter to exit\n"); 47100fd1386cSStephen Hemminger 47110fd1386cSStephen Hemminger FD_ZERO(&fds); 47120fd1386cSStephen Hemminger FD_SET(0, &fds); 47130fd1386cSStephen Hemminger 47140fd1386cSStephen Hemminger /* wait for signal or enter */ 47150fd1386cSStephen Hemminger ret = select(1, &fds, NULL, NULL, NULL); 47160fd1386cSStephen Hemminger if (ret < 0 && errno != EINTR) 47170fd1386cSStephen Hemminger rte_exit(EXIT_FAILURE, 47180fd1386cSStephen Hemminger "Select failed: %s\n", 47190fd1386cSStephen Hemminger strerror(errno)); 47200fd1386cSStephen Hemminger 47210fd1386cSStephen Hemminger /* if got enter then consume it */ 47220fd1386cSStephen Hemminger if (ret == 1 && read(0, &c, 1) < 0) 47230fd1386cSStephen Hemminger rte_exit(EXIT_FAILURE, 47240fd1386cSStephen Hemminger "Read failed: %s\n", 47250fd1386cSStephen Hemminger strerror(errno)); 4726af75078fSIntel } 47270fd1386cSStephen Hemminger } 47280fd1386cSStephen Hemminger 47290fd1386cSStephen Hemminger pmd_test_exit(); 47300fd1386cSStephen Hemminger 47310fd1386cSStephen Hemminger #ifdef RTE_LIB_PDUMP 47320fd1386cSStephen Hemminger /* uninitialize packet capture framework */ 47330fd1386cSStephen Hemminger rte_pdump_uninit(); 47340fd1386cSStephen Hemminger #endif 47350fd1386cSStephen Hemminger #ifdef RTE_LIB_LATENCYSTATS 47360fd1386cSStephen Hemminger if (latencystats_enabled != 0) 47370fd1386cSStephen Hemminger rte_latencystats_uninit(); 47380fd1386cSStephen Hemminger #endif 4739af75078fSIntel 47405e516c89SStephen Hemminger ret = rte_eal_cleanup(); 47415e516c89SStephen Hemminger if (ret != 0) 47425e516c89SStephen Hemminger rte_exit(EXIT_FAILURE, 47435e516c89SStephen Hemminger "EAL cleanup failed: %s\n", strerror(-ret)); 47445e516c89SStephen Hemminger 47455e516c89SStephen Hemminger return EXIT_SUCCESS; 4746af75078fSIntel } 4747