1174a1631SBruce Richardson /* SPDX-License-Identifier: BSD-3-Clause 2174a1631SBruce Richardson * Copyright(c) 2010-2017 Intel Corporation 3af75078fSIntel */ 4af75078fSIntel 5af75078fSIntel #include <stdarg.h> 6af75078fSIntel #include <stdio.h> 7af75078fSIntel #include <stdlib.h> 8af75078fSIntel #include <signal.h> 9af75078fSIntel #include <string.h> 10af75078fSIntel #include <time.h> 11af75078fSIntel #include <fcntl.h> 12761f7ae1SJie Zhou #ifndef RTE_EXEC_ENV_WINDOWS 131c036b16SEelco Chaudron #include <sys/mman.h> 14761f7ae1SJie Zhou #endif 15af75078fSIntel #include <sys/types.h> 16af75078fSIntel #include <errno.h> 17fb73e096SJeff Guo #include <stdbool.h> 18af75078fSIntel 19af75078fSIntel #include <sys/queue.h> 20af75078fSIntel #include <sys/stat.h> 21af75078fSIntel 22af75078fSIntel #include <stdint.h> 23af75078fSIntel #include <unistd.h> 24af75078fSIntel #include <inttypes.h> 25af75078fSIntel 26af75078fSIntel #include <rte_common.h> 27d1eb542eSOlivier Matz #include <rte_errno.h> 28af75078fSIntel #include <rte_byteorder.h> 29af75078fSIntel #include <rte_log.h> 30af75078fSIntel #include <rte_debug.h> 31af75078fSIntel #include <rte_cycles.h> 32af75078fSIntel #include <rte_memory.h> 33af75078fSIntel #include <rte_memcpy.h> 34af75078fSIntel #include <rte_launch.h> 35770ebc06SDavid Marchand #include <rte_bus.h> 36af75078fSIntel #include <rte_eal.h> 37284c908cSGaetan Rivet #include <rte_alarm.h> 38af75078fSIntel #include <rte_per_lcore.h> 39af75078fSIntel #include <rte_lcore.h> 40af75078fSIntel #include <rte_branch_prediction.h> 41af75078fSIntel #include <rte_mempool.h> 42af75078fSIntel #include <rte_malloc.h> 43af75078fSIntel #include <rte_mbuf.h> 440e798567SPavan Nikhilesh #include <rte_mbuf_pool_ops.h> 45af75078fSIntel #include <rte_interrupts.h> 46af75078fSIntel #include <rte_ether.h> 47af75078fSIntel #include <rte_ethdev.h> 48edab33b1STetsuya Mukawa #include <rte_dev.h> 49af75078fSIntel #include <rte_string_fns.h> 50a8d0d473SBruce Richardson #ifdef RTE_NET_IXGBE 51e261265eSRadu Nicolau #include <rte_pmd_ixgbe.h> 52e261265eSRadu Nicolau #endif 53a8d0d473SBruce Richardson #ifdef RTE_LIB_PDUMP 54102b7329SReshma Pattan #include <rte_pdump.h> 55102b7329SReshma Pattan #endif 56938a184aSAdrien Mazarguil #include <rte_flow.h> 57bb9be9a4SDavid Marchand #ifdef RTE_LIB_METRICS 587e4441c8SRemy Horton #include <rte_metrics.h> 59bb9be9a4SDavid Marchand #endif 60a8d0d473SBruce Richardson #ifdef RTE_LIB_BITRATESTATS 617e4441c8SRemy Horton #include <rte_bitrate.h> 627e4441c8SRemy Horton #endif 63a8d0d473SBruce Richardson #ifdef RTE_LIB_LATENCYSTATS 6462d3216dSReshma Pattan #include <rte_latencystats.h> 6562d3216dSReshma Pattan #endif 66761f7ae1SJie Zhou #ifdef RTE_EXEC_ENV_WINDOWS 67761f7ae1SJie Zhou #include <process.h> 68761f7ae1SJie Zhou #endif 69e46372d7SHuisong Li #ifdef RTE_NET_BOND 70e46372d7SHuisong Li #include <rte_eth_bond.h> 71e46372d7SHuisong Li #endif 72f41a5092SSpike Du #ifdef RTE_NET_MLX5 73f41a5092SSpike Du #include "mlx5_testpmd.h" 74f41a5092SSpike Du #endif 75af75078fSIntel 76af75078fSIntel #include "testpmd.h" 77af75078fSIntel 78c7f5dba7SAnatoly Burakov #ifndef MAP_HUGETLB 79c7f5dba7SAnatoly Burakov /* FreeBSD may not have MAP_HUGETLB (in fact, it probably doesn't) */ 80c7f5dba7SAnatoly Burakov #define HUGE_FLAG (0x40000) 81c7f5dba7SAnatoly Burakov #else 82c7f5dba7SAnatoly Burakov #define HUGE_FLAG MAP_HUGETLB 83c7f5dba7SAnatoly Burakov #endif 84c7f5dba7SAnatoly Burakov 85c7f5dba7SAnatoly Burakov #ifndef MAP_HUGE_SHIFT 86c7f5dba7SAnatoly Burakov /* older kernels (or FreeBSD) will not have this define */ 87c7f5dba7SAnatoly Burakov #define HUGE_SHIFT (26) 88c7f5dba7SAnatoly Burakov #else 89c7f5dba7SAnatoly Burakov #define HUGE_SHIFT MAP_HUGE_SHIFT 90c7f5dba7SAnatoly Burakov #endif 91c7f5dba7SAnatoly Burakov 92c7f5dba7SAnatoly Burakov #define EXTMEM_HEAP_NAME "extmem" 9313b19642SDmitry Kozlyuk /* 9413b19642SDmitry Kozlyuk * Zone size with the malloc overhead (max of debug and release variants) 9513b19642SDmitry Kozlyuk * must fit into the smallest supported hugepage size (2M), 9613b19642SDmitry Kozlyuk * so that an IOVA-contiguous zone of this size can always be allocated 9713b19642SDmitry Kozlyuk * if there are free 2M hugepages. 9813b19642SDmitry Kozlyuk */ 9913b19642SDmitry Kozlyuk #define EXTBUF_ZONE_SIZE (RTE_PGSIZE_2M - 4 * RTE_CACHE_LINE_SIZE) 100c7f5dba7SAnatoly Burakov 101af75078fSIntel uint16_t verbose_level = 0; /**< Silent by default. */ 102285fd101SOlivier Matz int testpmd_logtype; /**< Log type for testpmd logs */ 103af75078fSIntel 104cb056611SStephen Hemminger /* use main core for command line ? */ 105af75078fSIntel uint8_t interactive = 0; 106ca7feb22SCyril Chemparathy uint8_t auto_start = 0; 10799cabef0SPablo de Lara uint8_t tx_first; 10881ef862bSAllain Legacy char cmdline_filename[PATH_MAX] = {0}; 109af75078fSIntel 110af75078fSIntel /* 111af75078fSIntel * NUMA support configuration. 112af75078fSIntel * When set, the NUMA support attempts to dispatch the allocation of the 113af75078fSIntel * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the 114af75078fSIntel * probed ports among the CPU sockets 0 and 1. 115af75078fSIntel * Otherwise, all memory is allocated from CPU socket 0. 116af75078fSIntel */ 117999b2ee0SBruce Richardson uint8_t numa_support = 1; /**< numa enabled by default */ 118af75078fSIntel 119af75078fSIntel /* 120b6ea6408SIntel * In UMA mode,all memory is allocated from socket 0 if --socket-num is 121b6ea6408SIntel * not configured. 122b6ea6408SIntel */ 123b6ea6408SIntel uint8_t socket_num = UMA_NO_CONFIG; 124b6ea6408SIntel 125b6ea6408SIntel /* 126c7f5dba7SAnatoly Burakov * Select mempool allocation type: 127c7f5dba7SAnatoly Burakov * - native: use regular DPDK memory 128c7f5dba7SAnatoly Burakov * - anon: use regular DPDK memory to create mempool, but populate using 129c7f5dba7SAnatoly Burakov * anonymous memory (may not be IOVA-contiguous) 130c7f5dba7SAnatoly Burakov * - xmem: use externally allocated hugepage memory 131148f963fSBruce Richardson */ 132c7f5dba7SAnatoly Burakov uint8_t mp_alloc_type = MP_ALLOC_NATIVE; 133148f963fSBruce Richardson 134148f963fSBruce Richardson /* 13563531389SGeorgios Katsikas * Store specified sockets on which memory pool to be used by ports 13663531389SGeorgios Katsikas * is allocated. 13763531389SGeorgios Katsikas */ 13863531389SGeorgios Katsikas uint8_t port_numa[RTE_MAX_ETHPORTS]; 13963531389SGeorgios Katsikas 14063531389SGeorgios Katsikas /* 14163531389SGeorgios Katsikas * Store specified sockets on which RX ring to be used by ports 14263531389SGeorgios Katsikas * is allocated. 14363531389SGeorgios Katsikas */ 14463531389SGeorgios Katsikas uint8_t rxring_numa[RTE_MAX_ETHPORTS]; 14563531389SGeorgios Katsikas 14663531389SGeorgios Katsikas /* 14763531389SGeorgios Katsikas * Store specified sockets on which TX ring to be used by ports 14863531389SGeorgios Katsikas * is allocated. 14963531389SGeorgios Katsikas */ 15063531389SGeorgios Katsikas uint8_t txring_numa[RTE_MAX_ETHPORTS]; 15163531389SGeorgios Katsikas 15263531389SGeorgios Katsikas /* 153af75078fSIntel * Record the Ethernet address of peer target ports to which packets are 154af75078fSIntel * forwarded. 155547d946cSNirmoy Das * Must be instantiated with the ethernet addresses of peer traffic generator 156af75078fSIntel * ports. 157af75078fSIntel */ 1586d13ea8eSOlivier Matz struct rte_ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS]; 159af75078fSIntel portid_t nb_peer_eth_addrs = 0; 160af75078fSIntel 161af75078fSIntel /* 162af75078fSIntel * Probed Target Environment. 163af75078fSIntel */ 164af75078fSIntel struct rte_port *ports; /**< For all probed ethernet ports. */ 165af75078fSIntel portid_t nb_ports; /**< Number of probed ethernet ports. */ 166af75078fSIntel struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */ 167af75078fSIntel lcoreid_t nb_lcores; /**< Number of probed logical cores. */ 168af75078fSIntel 1694918a357SXiaoyun Li portid_t ports_ids[RTE_MAX_ETHPORTS]; /**< Store all port ids. */ 1704918a357SXiaoyun Li 171af75078fSIntel /* 172af75078fSIntel * Test Forwarding Configuration. 173af75078fSIntel * nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores 174af75078fSIntel * nb_fwd_ports <= nb_cfg_ports <= nb_ports 175af75078fSIntel */ 176af75078fSIntel lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */ 177af75078fSIntel lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */ 178af75078fSIntel portid_t nb_cfg_ports; /**< Number of configured ports. */ 179af75078fSIntel portid_t nb_fwd_ports; /**< Number of forwarding ports. */ 180af75078fSIntel 181af75078fSIntel unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */ 182af75078fSIntel portid_t fwd_ports_ids[RTE_MAX_ETHPORTS]; /**< Port ids configuration. */ 183af75078fSIntel 184af75078fSIntel struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */ 185af75078fSIntel streamid_t nb_fwd_streams; /**< Is equal to (nb_ports * nb_rxq). */ 186af75078fSIntel 187af75078fSIntel /* 188af75078fSIntel * Forwarding engines. 189af75078fSIntel */ 190af75078fSIntel struct fwd_engine * fwd_engines[] = { 191af75078fSIntel &io_fwd_engine, 192af75078fSIntel &mac_fwd_engine, 193d47388f1SCyril Chemparathy &mac_swap_engine, 194e9e23a61SCyril Chemparathy &flow_gen_engine, 195af75078fSIntel &rx_only_engine, 196af75078fSIntel &tx_only_engine, 197af75078fSIntel &csum_fwd_engine, 198168dfa61SIvan Boule &icmp_echo_engine, 1993c156061SJens Freimann &noisy_vnf_engine, 2002564abdaSShiri Kuzin &five_tuple_swap_fwd_engine, 201af75078fSIntel #ifdef RTE_LIBRTE_IEEE1588 202af75078fSIntel &ieee1588_fwd_engine, 203af75078fSIntel #endif 20459840375SXueming Li &shared_rxq_engine, 205af75078fSIntel NULL, 206af75078fSIntel }; 207af75078fSIntel 20826cbb419SViacheslav Ovsiienko struct rte_mempool *mempools[RTE_MAX_NUMA_NODES * MAX_SEGS_BUFFER_SPLIT]; 20959fcf854SShahaf Shuler uint16_t mempool_flags; 210401b744dSShahaf Shuler 211af75078fSIntel struct fwd_config cur_fwd_config; 212af75078fSIntel struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */ 213bf56fce1SZhihong Wang uint32_t retry_enabled; 214bf56fce1SZhihong Wang uint32_t burst_tx_delay_time = BURST_TX_WAIT_US; 215bf56fce1SZhihong Wang uint32_t burst_tx_retry_num = BURST_TX_RETRIES; 216af75078fSIntel 21726cbb419SViacheslav Ovsiienko uint32_t mbuf_data_size_n = 1; /* Number of specified mbuf sizes. */ 21826cbb419SViacheslav Ovsiienko uint16_t mbuf_data_size[MAX_SEGS_BUFFER_SPLIT] = { 21926cbb419SViacheslav Ovsiienko DEFAULT_MBUF_DATA_SIZE 22026cbb419SViacheslav Ovsiienko }; /**< Mbuf data space size. */ 221c8798818SIntel uint32_t param_total_num_mbufs = 0; /**< number of mbufs in all pools - if 222c8798818SIntel * specified on command-line. */ 223cfea1f30SPablo de Lara uint16_t stats_period; /**< Period to show statistics (disabled by default) */ 224d9a191a0SPhil Yang 22563b72657SIvan Ilchenko /** Extended statistics to show. */ 22663b72657SIvan Ilchenko struct rte_eth_xstat_name *xstats_display; 22763b72657SIvan Ilchenko 22863b72657SIvan Ilchenko unsigned int xstats_display_num; /**< Size of extended statistics to show */ 22963b72657SIvan Ilchenko 230d9a191a0SPhil Yang /* 231d9a191a0SPhil Yang * In container, it cannot terminate the process which running with 'stats-period' 232d9a191a0SPhil Yang * option. Set flag to exit stats period loop after received SIGINT/SIGTERM. 233d9a191a0SPhil Yang */ 234d9a191a0SPhil Yang uint8_t f_quit; 2353889a322SHuisong Li uint8_t cl_quit; /* Quit testpmd from cmdline. */ 236d9a191a0SPhil Yang 237af75078fSIntel /* 2381bb4a528SFerruh Yigit * Max Rx frame size, set by '--max-pkt-len' parameter. 2391bb4a528SFerruh Yigit */ 2401bb4a528SFerruh Yigit uint32_t max_rx_pkt_len; 2411bb4a528SFerruh Yigit 2421bb4a528SFerruh Yigit /* 2430f2096d7SViacheslav Ovsiienko * Configuration of packet segments used to scatter received packets 2440f2096d7SViacheslav Ovsiienko * if some of split features is configured. 2450f2096d7SViacheslav Ovsiienko */ 2460f2096d7SViacheslav Ovsiienko uint16_t rx_pkt_seg_lengths[MAX_SEGS_BUFFER_SPLIT]; 2470f2096d7SViacheslav Ovsiienko uint8_t rx_pkt_nb_segs; /**< Number of segments to split */ 24891c78e09SViacheslav Ovsiienko uint16_t rx_pkt_seg_offsets[MAX_SEGS_BUFFER_SPLIT]; 24991c78e09SViacheslav Ovsiienko uint8_t rx_pkt_nb_offs; /**< Number of specified offsets */ 25052e2e7edSYuan Wang uint32_t rx_pkt_hdr_protos[MAX_SEGS_BUFFER_SPLIT]; 2510f2096d7SViacheslav Ovsiienko 2520f2096d7SViacheslav Ovsiienko /* 253af75078fSIntel * Configuration of packet segments used by the "txonly" processing engine. 254af75078fSIntel */ 255af75078fSIntel uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */ 256af75078fSIntel uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = { 257af75078fSIntel TXONLY_DEF_PACKET_LEN, 258af75078fSIntel }; 259af75078fSIntel uint8_t tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */ 260af75078fSIntel 26179bec05bSKonstantin Ananyev enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF; 26279bec05bSKonstantin Ananyev /**< Split policy for packets to TX. */ 26379bec05bSKonstantin Ananyev 26482010ef5SYongseok Koh uint8_t txonly_multi_flow; 26582010ef5SYongseok Koh /**< Whether multiple flows are generated in TXONLY mode. */ 26682010ef5SYongseok Koh 2674940344dSViacheslav Ovsiienko uint32_t tx_pkt_times_inter; 2684940344dSViacheslav Ovsiienko /**< Timings for send scheduling in TXONLY mode, time between bursts. */ 2694940344dSViacheslav Ovsiienko 2704940344dSViacheslav Ovsiienko uint32_t tx_pkt_times_intra; 2714940344dSViacheslav Ovsiienko /**< Timings for send scheduling in TXONLY mode, time between packets. */ 2724940344dSViacheslav Ovsiienko 273af75078fSIntel uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */ 2746c02043eSIgor Russkikh uint16_t nb_pkt_flowgen_clones; /**< Number of Tx packet clones to send in flowgen mode. */ 275861e7684SZhihong Wang int nb_flows_flowgen = 1024; /**< Number of flows in flowgen mode. */ 276e9378bbcSCunming Liang uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */ 277af75078fSIntel 278900550deSIntel /* current configuration is in DCB or not,0 means it is not in DCB mode */ 279900550deSIntel uint8_t dcb_config = 0; 280900550deSIntel 281af75078fSIntel /* 282af75078fSIntel * Configurable number of RX/TX queues. 283af75078fSIntel */ 2841c69df45SOri Kam queueid_t nb_hairpinq; /**< Number of hairpin queues per port. */ 285af75078fSIntel queueid_t nb_rxq = 1; /**< Number of RX queues per port. */ 286af75078fSIntel queueid_t nb_txq = 1; /**< Number of TX queues per port. */ 287af75078fSIntel 288af75078fSIntel /* 289af75078fSIntel * Configurable number of RX/TX ring descriptors. 2908599ed31SRemy Horton * Defaults are supplied by drivers via ethdev. 291af75078fSIntel */ 2924ed89049SDavid Marchand #define RX_DESC_DEFAULT 0 2934ed89049SDavid Marchand #define TX_DESC_DEFAULT 0 2944ed89049SDavid Marchand uint16_t nb_rxd = RX_DESC_DEFAULT; /**< Number of RX descriptors. */ 2954ed89049SDavid Marchand uint16_t nb_txd = TX_DESC_DEFAULT; /**< Number of TX descriptors. */ 296af75078fSIntel 297f2c5125aSPablo de Lara #define RTE_PMD_PARAM_UNSET -1 298af75078fSIntel /* 299af75078fSIntel * Configurable values of RX and TX ring threshold registers. 300af75078fSIntel */ 301af75078fSIntel 302f2c5125aSPablo de Lara int8_t rx_pthresh = RTE_PMD_PARAM_UNSET; 303f2c5125aSPablo de Lara int8_t rx_hthresh = RTE_PMD_PARAM_UNSET; 304f2c5125aSPablo de Lara int8_t rx_wthresh = RTE_PMD_PARAM_UNSET; 305af75078fSIntel 306f2c5125aSPablo de Lara int8_t tx_pthresh = RTE_PMD_PARAM_UNSET; 307f2c5125aSPablo de Lara int8_t tx_hthresh = RTE_PMD_PARAM_UNSET; 308f2c5125aSPablo de Lara int8_t tx_wthresh = RTE_PMD_PARAM_UNSET; 309af75078fSIntel 310af75078fSIntel /* 311af75078fSIntel * Configurable value of RX free threshold. 312af75078fSIntel */ 313f2c5125aSPablo de Lara int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET; 314af75078fSIntel 315af75078fSIntel /* 316ce8d5614SIntel * Configurable value of RX drop enable. 317ce8d5614SIntel */ 318f2c5125aSPablo de Lara int8_t rx_drop_en = RTE_PMD_PARAM_UNSET; 319ce8d5614SIntel 320ce8d5614SIntel /* 321af75078fSIntel * Configurable value of TX free threshold. 322af75078fSIntel */ 323f2c5125aSPablo de Lara int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET; 324af75078fSIntel 325af75078fSIntel /* 326af75078fSIntel * Configurable value of TX RS bit threshold. 327af75078fSIntel */ 328f2c5125aSPablo de Lara int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET; 329af75078fSIntel 330af75078fSIntel /* 3313c156061SJens Freimann * Configurable value of buffered packets before sending. 3323c156061SJens Freimann */ 3333c156061SJens Freimann uint16_t noisy_tx_sw_bufsz; 3343c156061SJens Freimann 3353c156061SJens Freimann /* 3363c156061SJens Freimann * Configurable value of packet buffer timeout. 3373c156061SJens Freimann */ 3383c156061SJens Freimann uint16_t noisy_tx_sw_buf_flush_time; 3393c156061SJens Freimann 3403c156061SJens Freimann /* 3413c156061SJens Freimann * Configurable value for size of VNF internal memory area 3423c156061SJens Freimann * used for simulating noisy neighbour behaviour 3433c156061SJens Freimann */ 3443c156061SJens Freimann uint64_t noisy_lkup_mem_sz; 3453c156061SJens Freimann 3463c156061SJens Freimann /* 3473c156061SJens Freimann * Configurable value of number of random writes done in 3483c156061SJens Freimann * VNF simulation memory area. 3493c156061SJens Freimann */ 3503c156061SJens Freimann uint64_t noisy_lkup_num_writes; 3513c156061SJens Freimann 3523c156061SJens Freimann /* 3533c156061SJens Freimann * Configurable value of number of random reads done in 3543c156061SJens Freimann * VNF simulation memory area. 3553c156061SJens Freimann */ 3563c156061SJens Freimann uint64_t noisy_lkup_num_reads; 3573c156061SJens Freimann 3583c156061SJens Freimann /* 3593c156061SJens Freimann * Configurable value of number of random reads/writes done in 3603c156061SJens Freimann * VNF simulation memory area. 3613c156061SJens Freimann */ 3623c156061SJens Freimann uint64_t noisy_lkup_num_reads_writes; 3633c156061SJens Freimann 3643c156061SJens Freimann /* 365af75078fSIntel * Receive Side Scaling (RSS) configuration. 366af75078fSIntel */ 367295968d1SFerruh Yigit uint64_t rss_hf = RTE_ETH_RSS_IP; /* RSS IP by default. */ 368af75078fSIntel 369af75078fSIntel /* 370af75078fSIntel * Port topology configuration 371af75078fSIntel */ 372af75078fSIntel uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */ 373af75078fSIntel 3747741e4cfSIntel /* 3757741e4cfSIntel * Avoids to flush all the RX streams before starts forwarding. 3767741e4cfSIntel */ 3777741e4cfSIntel uint8_t no_flush_rx = 0; /* flush by default */ 3787741e4cfSIntel 379af75078fSIntel /* 3807ee3e944SVasily Philipov * Flow API isolated mode. 3817ee3e944SVasily Philipov */ 3827ee3e944SVasily Philipov uint8_t flow_isolate_all; 3837ee3e944SVasily Philipov 3847ee3e944SVasily Philipov /* 385bc202406SDavid Marchand * Avoids to check link status when starting/stopping a port. 386bc202406SDavid Marchand */ 387bc202406SDavid Marchand uint8_t no_link_check = 0; /* check by default */ 388bc202406SDavid Marchand 389bc202406SDavid Marchand /* 3906937d210SStephen Hemminger * Don't automatically start all ports in interactive mode. 3916937d210SStephen Hemminger */ 3926937d210SStephen Hemminger uint8_t no_device_start = 0; 3936937d210SStephen Hemminger 3946937d210SStephen Hemminger /* 3958ea656f8SGaetan Rivet * Enable link status change notification 3968ea656f8SGaetan Rivet */ 3978ea656f8SGaetan Rivet uint8_t lsc_interrupt = 1; /* enabled by default */ 3988ea656f8SGaetan Rivet 3998ea656f8SGaetan Rivet /* 400284c908cSGaetan Rivet * Enable device removal notification. 401284c908cSGaetan Rivet */ 402284c908cSGaetan Rivet uint8_t rmv_interrupt = 1; /* enabled by default */ 403284c908cSGaetan Rivet 404fb73e096SJeff Guo uint8_t hot_plug = 0; /**< hotplug disabled by default. */ 405fb73e096SJeff Guo 4064f1ed78eSThomas Monjalon /* After attach, port setup is called on event or by iterator */ 4074f1ed78eSThomas Monjalon bool setup_on_probe_event = true; 4084f1ed78eSThomas Monjalon 409b0a9354aSPavan Nikhilesh /* Clear ptypes on port initialization. */ 410b0a9354aSPavan Nikhilesh uint8_t clear_ptypes = true; 411b0a9354aSPavan Nikhilesh 41201817b10SBing Zhao /* Hairpin ports configuration mode. */ 41323095155SDariusz Sosnowski uint32_t hairpin_mode; 41401817b10SBing Zhao 41597b5d8b5SThomas Monjalon /* Pretty printing of ethdev events */ 41697b5d8b5SThomas Monjalon static const char * const eth_event_desc[] = { 41797b5d8b5SThomas Monjalon [RTE_ETH_EVENT_UNKNOWN] = "unknown", 41897b5d8b5SThomas Monjalon [RTE_ETH_EVENT_INTR_LSC] = "link state change", 41997b5d8b5SThomas Monjalon [RTE_ETH_EVENT_QUEUE_STATE] = "queue state", 42097b5d8b5SThomas Monjalon [RTE_ETH_EVENT_INTR_RESET] = "reset", 42197b5d8b5SThomas Monjalon [RTE_ETH_EVENT_VF_MBOX] = "VF mbox", 42297b5d8b5SThomas Monjalon [RTE_ETH_EVENT_IPSEC] = "IPsec", 42397b5d8b5SThomas Monjalon [RTE_ETH_EVENT_MACSEC] = "MACsec", 42497b5d8b5SThomas Monjalon [RTE_ETH_EVENT_INTR_RMV] = "device removal", 42597b5d8b5SThomas Monjalon [RTE_ETH_EVENT_NEW] = "device probed", 42697b5d8b5SThomas Monjalon [RTE_ETH_EVENT_DESTROY] = "device released", 4270e459ffaSDong Zhou [RTE_ETH_EVENT_FLOW_AGED] = "flow aged", 428bc70e559SSpike Du [RTE_ETH_EVENT_RX_AVAIL_THRESH] = "RxQ available descriptors threshold reached", 429eb0d471aSKalesh AP [RTE_ETH_EVENT_ERR_RECOVERING] = "error recovering", 430eb0d471aSKalesh AP [RTE_ETH_EVENT_RECOVERY_SUCCESS] = "error recovery successful", 431eb0d471aSKalesh AP [RTE_ETH_EVENT_RECOVERY_FAILED] = "error recovery failed", 43297b5d8b5SThomas Monjalon [RTE_ETH_EVENT_MAX] = NULL, 43397b5d8b5SThomas Monjalon }; 43497b5d8b5SThomas Monjalon 435284c908cSGaetan Rivet /* 4363af72783SGaetan Rivet * Display or mask ether events 4373af72783SGaetan Rivet * Default to all events except VF_MBOX 4383af72783SGaetan Rivet */ 4393af72783SGaetan Rivet uint32_t event_print_mask = (UINT32_C(1) << RTE_ETH_EVENT_UNKNOWN) | 4403af72783SGaetan Rivet (UINT32_C(1) << RTE_ETH_EVENT_INTR_LSC) | 4413af72783SGaetan Rivet (UINT32_C(1) << RTE_ETH_EVENT_QUEUE_STATE) | 4423af72783SGaetan Rivet (UINT32_C(1) << RTE_ETH_EVENT_INTR_RESET) | 443badb87c1SAnoob Joseph (UINT32_C(1) << RTE_ETH_EVENT_IPSEC) | 4443af72783SGaetan Rivet (UINT32_C(1) << RTE_ETH_EVENT_MACSEC) | 4450e459ffaSDong Zhou (UINT32_C(1) << RTE_ETH_EVENT_INTR_RMV) | 446eb0d471aSKalesh AP (UINT32_C(1) << RTE_ETH_EVENT_FLOW_AGED) | 447eb0d471aSKalesh AP (UINT32_C(1) << RTE_ETH_EVENT_ERR_RECOVERING) | 448eb0d471aSKalesh AP (UINT32_C(1) << RTE_ETH_EVENT_RECOVERY_SUCCESS) | 449eb0d471aSKalesh AP (UINT32_C(1) << RTE_ETH_EVENT_RECOVERY_FAILED); 450e505d84cSAnatoly Burakov /* 451e505d84cSAnatoly Burakov * Decide if all memory are locked for performance. 452e505d84cSAnatoly Burakov */ 453e505d84cSAnatoly Burakov int do_mlockall = 0; 4543af72783SGaetan Rivet 455a8d0d473SBruce Richardson #ifdef RTE_LIB_LATENCYSTATS 45662d3216dSReshma Pattan 45762d3216dSReshma Pattan /* 45862d3216dSReshma Pattan * Set when latency stats is enabled in the commandline 45962d3216dSReshma Pattan */ 46062d3216dSReshma Pattan uint8_t latencystats_enabled; 46162d3216dSReshma Pattan 46262d3216dSReshma Pattan /* 4637be78d02SJosh Soref * Lcore ID to service latency statistics. 46462d3216dSReshma Pattan */ 46562d3216dSReshma Pattan lcoreid_t latencystats_lcore_id = -1; 46662d3216dSReshma Pattan 46762d3216dSReshma Pattan #endif 46862d3216dSReshma Pattan 4697b7e5ba7SIntel /* 470af75078fSIntel * Ethernet device configuration. 471af75078fSIntel */ 4721bb4a528SFerruh Yigit struct rte_eth_rxmode rx_mode; 473af75078fSIntel 47407e5f7bdSShahaf Shuler struct rte_eth_txmode tx_mode = { 475295968d1SFerruh Yigit .offloads = RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE, 47607e5f7bdSShahaf Shuler }; 477fd8c20aaSShahaf Shuler 4782950a769SDeclan Doherty volatile int test_done = 1; /* stop packet forwarding when set to 1. */ 479af75078fSIntel 480a4fd5eeeSElza Mathew /* 481a4fd5eeeSElza Mathew * Display zero values by default for xstats 482a4fd5eeeSElza Mathew */ 483a4fd5eeeSElza Mathew uint8_t xstats_hide_zero; 484a4fd5eeeSElza Mathew 485bc700b67SDharmik Thakkar /* 486bc700b67SDharmik Thakkar * Measure of CPU cycles disabled by default 487bc700b67SDharmik Thakkar */ 488bc700b67SDharmik Thakkar uint8_t record_core_cycles; 489bc700b67SDharmik Thakkar 4900e4b1963SDharmik Thakkar /* 4910e4b1963SDharmik Thakkar * Display of RX and TX bursts disabled by default 4920e4b1963SDharmik Thakkar */ 4930e4b1963SDharmik Thakkar uint8_t record_burst_stats; 4940e4b1963SDharmik Thakkar 495f4d178c1SXueming Li /* 496f4d178c1SXueming Li * Number of ports per shared Rx queue group, 0 disable. 497f4d178c1SXueming Li */ 498f4d178c1SXueming Li uint32_t rxq_share; 499f4d178c1SXueming Li 500c9cafcc8SShahaf Shuler unsigned int num_sockets = 0; 501c9cafcc8SShahaf Shuler unsigned int socket_ids[RTE_MAX_NUMA_NODES]; 5027acf894dSStephen Hurd 503a8d0d473SBruce Richardson #ifdef RTE_LIB_BITRATESTATS 5047e4441c8SRemy Horton /* Bitrate statistics */ 5057e4441c8SRemy Horton struct rte_stats_bitrates *bitrate_data; 506e25e6c70SRemy Horton lcoreid_t bitrate_lcore_id; 507e25e6c70SRemy Horton uint8_t bitrate_enabled; 508e25e6c70SRemy Horton #endif 5097e4441c8SRemy Horton 5106970401eSDavid Marchand #ifdef RTE_LIB_GRO 511b40f8d78SJiayu Hu struct gro_status gro_ports[RTE_MAX_ETHPORTS]; 512b7091f1dSJiayu Hu uint8_t gro_flush_cycles = GRO_DEFAULT_FLUSH_CYCLES; 5136970401eSDavid Marchand #endif 514b40f8d78SJiayu Hu 515f9295aa2SXiaoyu Min /* 516f9295aa2SXiaoyu Min * hexadecimal bitmask of RX mq mode can be enabled. 517f9295aa2SXiaoyu Min */ 518295968d1SFerruh Yigit enum rte_eth_rx_mq_mode rx_mq_mode = RTE_ETH_MQ_RX_VMDQ_DCB_RSS; 519f9295aa2SXiaoyu Min 520b7b78a08SAjit Khaparde /* 521b7b78a08SAjit Khaparde * Used to set forced link speed 522b7b78a08SAjit Khaparde */ 523b7b78a08SAjit Khaparde uint32_t eth_link_speed; 524b7b78a08SAjit Khaparde 525a550baf2SMin Hu (Connor) /* 526a550baf2SMin Hu (Connor) * ID of the current process in multi-process, used to 527a550baf2SMin Hu (Connor) * configure the queues to be polled. 528a550baf2SMin Hu (Connor) */ 529a550baf2SMin Hu (Connor) int proc_id; 530a550baf2SMin Hu (Connor) 531a550baf2SMin Hu (Connor) /* 532a550baf2SMin Hu (Connor) * Number of processes in multi-process, used to 533a550baf2SMin Hu (Connor) * configure the queues to be polled. 534a550baf2SMin Hu (Connor) */ 535a550baf2SMin Hu (Connor) unsigned int num_procs = 1; 536a550baf2SMin Hu (Connor) 537f6d8a6d3SIvan Malov static void 538f6d8a6d3SIvan Malov eth_rx_metadata_negotiate_mp(uint16_t port_id) 539f6d8a6d3SIvan Malov { 540f6d8a6d3SIvan Malov uint64_t rx_meta_features = 0; 541f6d8a6d3SIvan Malov int ret; 542f6d8a6d3SIvan Malov 543f6d8a6d3SIvan Malov if (!is_proc_primary()) 544f6d8a6d3SIvan Malov return; 545f6d8a6d3SIvan Malov 546f6d8a6d3SIvan Malov rx_meta_features |= RTE_ETH_RX_METADATA_USER_FLAG; 547f6d8a6d3SIvan Malov rx_meta_features |= RTE_ETH_RX_METADATA_USER_MARK; 548f6d8a6d3SIvan Malov rx_meta_features |= RTE_ETH_RX_METADATA_TUNNEL_ID; 549f6d8a6d3SIvan Malov 550f6d8a6d3SIvan Malov ret = rte_eth_rx_metadata_negotiate(port_id, &rx_meta_features); 551f6d8a6d3SIvan Malov if (ret == 0) { 552f6d8a6d3SIvan Malov if (!(rx_meta_features & RTE_ETH_RX_METADATA_USER_FLAG)) { 553f6d8a6d3SIvan Malov TESTPMD_LOG(DEBUG, "Flow action FLAG will not affect Rx mbufs on port %u\n", 554f6d8a6d3SIvan Malov port_id); 555f6d8a6d3SIvan Malov } 556f6d8a6d3SIvan Malov 557f6d8a6d3SIvan Malov if (!(rx_meta_features & RTE_ETH_RX_METADATA_USER_MARK)) { 558f6d8a6d3SIvan Malov TESTPMD_LOG(DEBUG, "Flow action MARK will not affect Rx mbufs on port %u\n", 559f6d8a6d3SIvan Malov port_id); 560f6d8a6d3SIvan Malov } 561f6d8a6d3SIvan Malov 562f6d8a6d3SIvan Malov if (!(rx_meta_features & RTE_ETH_RX_METADATA_TUNNEL_ID)) { 563f6d8a6d3SIvan Malov TESTPMD_LOG(DEBUG, "Flow tunnel offload support might be limited or unavailable on port %u\n", 564f6d8a6d3SIvan Malov port_id); 565f6d8a6d3SIvan Malov } 566f6d8a6d3SIvan Malov } else if (ret != -ENOTSUP) { 567f6d8a6d3SIvan Malov rte_exit(EXIT_FAILURE, "Error when negotiating Rx meta features on port %u: %s\n", 568f6d8a6d3SIvan Malov port_id, rte_strerror(-ret)); 569f6d8a6d3SIvan Malov } 570f6d8a6d3SIvan Malov } 571f6d8a6d3SIvan Malov 572a550baf2SMin Hu (Connor) static int 573a550baf2SMin Hu (Connor) eth_dev_configure_mp(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q, 574a550baf2SMin Hu (Connor) const struct rte_eth_conf *dev_conf) 575a550baf2SMin Hu (Connor) { 576a550baf2SMin Hu (Connor) if (is_proc_primary()) 577a550baf2SMin Hu (Connor) return rte_eth_dev_configure(port_id, nb_rx_q, nb_tx_q, 578a550baf2SMin Hu (Connor) dev_conf); 579a550baf2SMin Hu (Connor) return 0; 580a550baf2SMin Hu (Connor) } 581a550baf2SMin Hu (Connor) 582a550baf2SMin Hu (Connor) static int 583e46372d7SHuisong Li change_bonding_slave_port_status(portid_t bond_pid, bool is_stop) 584e46372d7SHuisong Li { 585e46372d7SHuisong Li #ifdef RTE_NET_BOND 586e46372d7SHuisong Li 587e46372d7SHuisong Li portid_t slave_pids[RTE_MAX_ETHPORTS]; 588e46372d7SHuisong Li struct rte_port *port; 589e46372d7SHuisong Li int num_slaves; 590e46372d7SHuisong Li portid_t slave_pid; 591e46372d7SHuisong Li int i; 592e46372d7SHuisong Li 593e46372d7SHuisong Li num_slaves = rte_eth_bond_slaves_get(bond_pid, slave_pids, 594e46372d7SHuisong Li RTE_MAX_ETHPORTS); 595e46372d7SHuisong Li if (num_slaves < 0) { 596e46372d7SHuisong Li fprintf(stderr, "Failed to get slave list for port = %u\n", 597e46372d7SHuisong Li bond_pid); 598e46372d7SHuisong Li return num_slaves; 599e46372d7SHuisong Li } 600e46372d7SHuisong Li 601e46372d7SHuisong Li for (i = 0; i < num_slaves; i++) { 602e46372d7SHuisong Li slave_pid = slave_pids[i]; 603e46372d7SHuisong Li port = &ports[slave_pid]; 604e46372d7SHuisong Li port->port_status = 605e46372d7SHuisong Li is_stop ? RTE_PORT_STOPPED : RTE_PORT_STARTED; 606e46372d7SHuisong Li } 607e46372d7SHuisong Li #else 608e46372d7SHuisong Li RTE_SET_USED(bond_pid); 609e46372d7SHuisong Li RTE_SET_USED(is_stop); 610e46372d7SHuisong Li #endif 611e46372d7SHuisong Li return 0; 612e46372d7SHuisong Li } 613e46372d7SHuisong Li 614e46372d7SHuisong Li static int 615a550baf2SMin Hu (Connor) eth_dev_start_mp(uint16_t port_id) 616a550baf2SMin Hu (Connor) { 617e46372d7SHuisong Li int ret; 618e46372d7SHuisong Li 619e46372d7SHuisong Li if (is_proc_primary()) { 620e46372d7SHuisong Li ret = rte_eth_dev_start(port_id); 621e46372d7SHuisong Li if (ret != 0) 622e46372d7SHuisong Li return ret; 623e46372d7SHuisong Li 624e46372d7SHuisong Li struct rte_port *port = &ports[port_id]; 625e46372d7SHuisong Li 626e46372d7SHuisong Li /* 627e46372d7SHuisong Li * Starting a bonded port also starts all slaves under the bonded 628e46372d7SHuisong Li * device. So if this port is bond device, we need to modify the 629e46372d7SHuisong Li * port status of these slaves. 630e46372d7SHuisong Li */ 631e46372d7SHuisong Li if (port->bond_flag == 1) 632e46372d7SHuisong Li return change_bonding_slave_port_status(port_id, false); 633e46372d7SHuisong Li } 634a550baf2SMin Hu (Connor) 635a550baf2SMin Hu (Connor) return 0; 636a550baf2SMin Hu (Connor) } 637a550baf2SMin Hu (Connor) 638a550baf2SMin Hu (Connor) static int 639a550baf2SMin Hu (Connor) eth_dev_stop_mp(uint16_t port_id) 640a550baf2SMin Hu (Connor) { 641e46372d7SHuisong Li int ret; 642e46372d7SHuisong Li 643e46372d7SHuisong Li if (is_proc_primary()) { 644e46372d7SHuisong Li ret = rte_eth_dev_stop(port_id); 645e46372d7SHuisong Li if (ret != 0) 646e46372d7SHuisong Li return ret; 647e46372d7SHuisong Li 648e46372d7SHuisong Li struct rte_port *port = &ports[port_id]; 649e46372d7SHuisong Li 650e46372d7SHuisong Li /* 651e46372d7SHuisong Li * Stopping a bonded port also stops all slaves under the bonded 652e46372d7SHuisong Li * device. So if this port is bond device, we need to modify the 653e46372d7SHuisong Li * port status of these slaves. 654e46372d7SHuisong Li */ 655e46372d7SHuisong Li if (port->bond_flag == 1) 656e46372d7SHuisong Li return change_bonding_slave_port_status(port_id, true); 657e46372d7SHuisong Li } 658a550baf2SMin Hu (Connor) 659a550baf2SMin Hu (Connor) return 0; 660a550baf2SMin Hu (Connor) } 661a550baf2SMin Hu (Connor) 662a550baf2SMin Hu (Connor) static void 663a550baf2SMin Hu (Connor) mempool_free_mp(struct rte_mempool *mp) 664a550baf2SMin Hu (Connor) { 665a550baf2SMin Hu (Connor) if (is_proc_primary()) 666a550baf2SMin Hu (Connor) rte_mempool_free(mp); 667a550baf2SMin Hu (Connor) } 668a550baf2SMin Hu (Connor) 669a550baf2SMin Hu (Connor) static int 670a550baf2SMin Hu (Connor) eth_dev_set_mtu_mp(uint16_t port_id, uint16_t mtu) 671a550baf2SMin Hu (Connor) { 672a550baf2SMin Hu (Connor) if (is_proc_primary()) 673a550baf2SMin Hu (Connor) return rte_eth_dev_set_mtu(port_id, mtu); 674a550baf2SMin Hu (Connor) 675a550baf2SMin Hu (Connor) return 0; 676a550baf2SMin Hu (Connor) } 677a550baf2SMin Hu (Connor) 678ed30d9b6SIntel /* Forward function declarations */ 679c9cce428SThomas Monjalon static void setup_attached_port(portid_t pi); 680edab33b1STetsuya Mukawa static void check_all_ports_link_status(uint32_t port_mask); 681f8244c63SZhiyong Yang static int eth_event_callback(portid_t port_id, 68276ad4a2dSGaetan Rivet enum rte_eth_event_type type, 683d6af1a13SBernard Iremonger void *param, void *ret_param); 684cc1bf307SJeff Guo static void dev_event_callback(const char *device_name, 685fb73e096SJeff Guo enum rte_dev_event_type type, 686fb73e096SJeff Guo void *param); 68763b72657SIvan Ilchenko static void fill_xstats_display_info(void); 688ce8d5614SIntel 689ce8d5614SIntel /* 690ce8d5614SIntel * Check if all the ports are started. 691ce8d5614SIntel * If yes, return positive value. If not, return zero. 692ce8d5614SIntel */ 693ce8d5614SIntel static int all_ports_started(void); 694ed30d9b6SIntel 6956970401eSDavid Marchand #ifdef RTE_LIB_GSO 69652f38a20SJiayu Hu struct gso_status gso_ports[RTE_MAX_ETHPORTS]; 69735b2d13fSOlivier Matz uint16_t gso_max_segment_size = RTE_ETHER_MAX_LEN - RTE_ETHER_CRC_LEN; 6986970401eSDavid Marchand #endif 69952f38a20SJiayu Hu 700b57b66a9SOri Kam /* Holds the registered mbuf dynamic flags names. */ 701b57b66a9SOri Kam char dynf_names[64][RTE_MBUF_DYN_NAMESIZE]; 702b57b66a9SOri Kam 70363b72657SIvan Ilchenko 704af75078fSIntel /* 70598a7ea33SJerin Jacob * Helper function to check if socket is already discovered. 706c9cafcc8SShahaf Shuler * If yes, return positive value. If not, return zero. 707c9cafcc8SShahaf Shuler */ 708c9cafcc8SShahaf Shuler int 709c9cafcc8SShahaf Shuler new_socket_id(unsigned int socket_id) 710c9cafcc8SShahaf Shuler { 711c9cafcc8SShahaf Shuler unsigned int i; 712c9cafcc8SShahaf Shuler 713c9cafcc8SShahaf Shuler for (i = 0; i < num_sockets; i++) { 714c9cafcc8SShahaf Shuler if (socket_ids[i] == socket_id) 715c9cafcc8SShahaf Shuler return 0; 716c9cafcc8SShahaf Shuler } 717c9cafcc8SShahaf Shuler return 1; 718c9cafcc8SShahaf Shuler } 719c9cafcc8SShahaf Shuler 720c9cafcc8SShahaf Shuler /* 721af75078fSIntel * Setup default configuration. 722af75078fSIntel */ 723af75078fSIntel static void 724af75078fSIntel set_default_fwd_lcores_config(void) 725af75078fSIntel { 726af75078fSIntel unsigned int i; 727af75078fSIntel unsigned int nb_lc; 7287acf894dSStephen Hurd unsigned int sock_num; 729af75078fSIntel 730af75078fSIntel nb_lc = 0; 731af75078fSIntel for (i = 0; i < RTE_MAX_LCORE; i++) { 732dbfb8ec7SPhil Yang if (!rte_lcore_is_enabled(i)) 733dbfb8ec7SPhil Yang continue; 734c9cafcc8SShahaf Shuler sock_num = rte_lcore_to_socket_id(i); 735c9cafcc8SShahaf Shuler if (new_socket_id(sock_num)) { 736c9cafcc8SShahaf Shuler if (num_sockets >= RTE_MAX_NUMA_NODES) { 737c9cafcc8SShahaf Shuler rte_exit(EXIT_FAILURE, 738c9cafcc8SShahaf Shuler "Total sockets greater than %u\n", 739c9cafcc8SShahaf Shuler RTE_MAX_NUMA_NODES); 740c9cafcc8SShahaf Shuler } 741c9cafcc8SShahaf Shuler socket_ids[num_sockets++] = sock_num; 7427acf894dSStephen Hurd } 743cb056611SStephen Hemminger if (i == rte_get_main_lcore()) 744f54fe5eeSStephen Hurd continue; 745f54fe5eeSStephen Hurd fwd_lcores_cpuids[nb_lc++] = i; 746af75078fSIntel } 747af75078fSIntel nb_lcores = (lcoreid_t) nb_lc; 748af75078fSIntel nb_cfg_lcores = nb_lcores; 749af75078fSIntel nb_fwd_lcores = 1; 750af75078fSIntel } 751af75078fSIntel 752af75078fSIntel static void 753af75078fSIntel set_def_peer_eth_addrs(void) 754af75078fSIntel { 755af75078fSIntel portid_t i; 756af75078fSIntel 757af75078fSIntel for (i = 0; i < RTE_MAX_ETHPORTS; i++) { 75835b2d13fSOlivier Matz peer_eth_addrs[i].addr_bytes[0] = RTE_ETHER_LOCAL_ADMIN_ADDR; 759af75078fSIntel peer_eth_addrs[i].addr_bytes[5] = i; 760af75078fSIntel } 761af75078fSIntel } 762af75078fSIntel 763af75078fSIntel static void 764af75078fSIntel set_default_fwd_ports_config(void) 765af75078fSIntel { 766af75078fSIntel portid_t pt_id; 76765a7360cSMatan Azrad int i = 0; 768af75078fSIntel 769effdb8bbSPhil Yang RTE_ETH_FOREACH_DEV(pt_id) { 77065a7360cSMatan Azrad fwd_ports_ids[i++] = pt_id; 771af75078fSIntel 772effdb8bbSPhil Yang /* Update sockets info according to the attached device */ 773effdb8bbSPhil Yang int socket_id = rte_eth_dev_socket_id(pt_id); 774effdb8bbSPhil Yang if (socket_id >= 0 && new_socket_id(socket_id)) { 775effdb8bbSPhil Yang if (num_sockets >= RTE_MAX_NUMA_NODES) { 776effdb8bbSPhil Yang rte_exit(EXIT_FAILURE, 777effdb8bbSPhil Yang "Total sockets greater than %u\n", 778effdb8bbSPhil Yang RTE_MAX_NUMA_NODES); 779effdb8bbSPhil Yang } 780effdb8bbSPhil Yang socket_ids[num_sockets++] = socket_id; 781effdb8bbSPhil Yang } 782effdb8bbSPhil Yang } 783effdb8bbSPhil Yang 784af75078fSIntel nb_cfg_ports = nb_ports; 785af75078fSIntel nb_fwd_ports = nb_ports; 786af75078fSIntel } 787af75078fSIntel 788af75078fSIntel void 789af75078fSIntel set_def_fwd_config(void) 790af75078fSIntel { 791af75078fSIntel set_default_fwd_lcores_config(); 792af75078fSIntel set_def_peer_eth_addrs(); 793af75078fSIntel set_default_fwd_ports_config(); 794af75078fSIntel } 795af75078fSIntel 796761f7ae1SJie Zhou #ifndef RTE_EXEC_ENV_WINDOWS 797c7f5dba7SAnatoly Burakov /* extremely pessimistic estimation of memory required to create a mempool */ 798c7f5dba7SAnatoly Burakov static int 799c7f5dba7SAnatoly Burakov calc_mem_size(uint32_t nb_mbufs, uint32_t mbuf_sz, size_t pgsz, size_t *out) 800c7f5dba7SAnatoly Burakov { 801c7f5dba7SAnatoly Burakov unsigned int n_pages, mbuf_per_pg, leftover; 802c7f5dba7SAnatoly Burakov uint64_t total_mem, mbuf_mem, obj_sz; 803c7f5dba7SAnatoly Burakov 804c7f5dba7SAnatoly Burakov /* there is no good way to predict how much space the mempool will 805c7f5dba7SAnatoly Burakov * occupy because it will allocate chunks on the fly, and some of those 806c7f5dba7SAnatoly Burakov * will come from default DPDK memory while some will come from our 807c7f5dba7SAnatoly Burakov * external memory, so just assume 128MB will be enough for everyone. 808c7f5dba7SAnatoly Burakov */ 809c7f5dba7SAnatoly Burakov uint64_t hdr_mem = 128 << 20; 810c7f5dba7SAnatoly Burakov 811c7f5dba7SAnatoly Burakov /* account for possible non-contiguousness */ 812c7f5dba7SAnatoly Burakov obj_sz = rte_mempool_calc_obj_size(mbuf_sz, 0, NULL); 813c7f5dba7SAnatoly Burakov if (obj_sz > pgsz) { 814c7f5dba7SAnatoly Burakov TESTPMD_LOG(ERR, "Object size is bigger than page size\n"); 815c7f5dba7SAnatoly Burakov return -1; 816c7f5dba7SAnatoly Burakov } 817c7f5dba7SAnatoly Burakov 818c7f5dba7SAnatoly Burakov mbuf_per_pg = pgsz / obj_sz; 819c7f5dba7SAnatoly Burakov leftover = (nb_mbufs % mbuf_per_pg) > 0; 820c7f5dba7SAnatoly Burakov n_pages = (nb_mbufs / mbuf_per_pg) + leftover; 821c7f5dba7SAnatoly Burakov 822c7f5dba7SAnatoly Burakov mbuf_mem = n_pages * pgsz; 823c7f5dba7SAnatoly Burakov 824c7f5dba7SAnatoly Burakov total_mem = RTE_ALIGN(hdr_mem + mbuf_mem, pgsz); 825c7f5dba7SAnatoly Burakov 826c7f5dba7SAnatoly Burakov if (total_mem > SIZE_MAX) { 827c7f5dba7SAnatoly Burakov TESTPMD_LOG(ERR, "Memory size too big\n"); 828c7f5dba7SAnatoly Burakov return -1; 829c7f5dba7SAnatoly Burakov } 830c7f5dba7SAnatoly Burakov *out = (size_t)total_mem; 831c7f5dba7SAnatoly Burakov 832c7f5dba7SAnatoly Burakov return 0; 833c7f5dba7SAnatoly Burakov } 834c7f5dba7SAnatoly Burakov 835c7f5dba7SAnatoly Burakov static int 836c7f5dba7SAnatoly Burakov pagesz_flags(uint64_t page_sz) 837c7f5dba7SAnatoly Burakov { 838c7f5dba7SAnatoly Burakov /* as per mmap() manpage, all page sizes are log2 of page size 839c7f5dba7SAnatoly Burakov * shifted by MAP_HUGE_SHIFT 840c7f5dba7SAnatoly Burakov */ 8419d650537SAnatoly Burakov int log2 = rte_log2_u64(page_sz); 842c7f5dba7SAnatoly Burakov 843c7f5dba7SAnatoly Burakov return (log2 << HUGE_SHIFT); 844c7f5dba7SAnatoly Burakov } 845c7f5dba7SAnatoly Burakov 846c7f5dba7SAnatoly Burakov static void * 847c7f5dba7SAnatoly Burakov alloc_mem(size_t memsz, size_t pgsz, bool huge) 848c7f5dba7SAnatoly Burakov { 849c7f5dba7SAnatoly Burakov void *addr; 850c7f5dba7SAnatoly Burakov int flags; 851c7f5dba7SAnatoly Burakov 852c7f5dba7SAnatoly Burakov /* allocate anonymous hugepages */ 853c7f5dba7SAnatoly Burakov flags = MAP_ANONYMOUS | MAP_PRIVATE; 854c7f5dba7SAnatoly Burakov if (huge) 855c7f5dba7SAnatoly Burakov flags |= HUGE_FLAG | pagesz_flags(pgsz); 856c7f5dba7SAnatoly Burakov 857c7f5dba7SAnatoly Burakov addr = mmap(NULL, memsz, PROT_READ | PROT_WRITE, flags, -1, 0); 858c7f5dba7SAnatoly Burakov if (addr == MAP_FAILED) 859c7f5dba7SAnatoly Burakov return NULL; 860c7f5dba7SAnatoly Burakov 861c7f5dba7SAnatoly Burakov return addr; 862c7f5dba7SAnatoly Burakov } 863c7f5dba7SAnatoly Burakov 864c7f5dba7SAnatoly Burakov struct extmem_param { 865c7f5dba7SAnatoly Burakov void *addr; 866c7f5dba7SAnatoly Burakov size_t len; 867c7f5dba7SAnatoly Burakov size_t pgsz; 868c7f5dba7SAnatoly Burakov rte_iova_t *iova_table; 869c7f5dba7SAnatoly Burakov unsigned int iova_table_len; 870c7f5dba7SAnatoly Burakov }; 871c7f5dba7SAnatoly Burakov 872c7f5dba7SAnatoly Burakov static int 873c7f5dba7SAnatoly Burakov create_extmem(uint32_t nb_mbufs, uint32_t mbuf_sz, struct extmem_param *param, 874c7f5dba7SAnatoly Burakov bool huge) 875c7f5dba7SAnatoly Burakov { 876c7f5dba7SAnatoly Burakov uint64_t pgsizes[] = {RTE_PGSIZE_2M, RTE_PGSIZE_1G, /* x86_64, ARM */ 877c7f5dba7SAnatoly Burakov RTE_PGSIZE_16M, RTE_PGSIZE_16G}; /* POWER */ 878c7f5dba7SAnatoly Burakov unsigned int cur_page, n_pages, pgsz_idx; 879c7f5dba7SAnatoly Burakov size_t mem_sz, cur_pgsz; 880c7f5dba7SAnatoly Burakov rte_iova_t *iovas = NULL; 881c7f5dba7SAnatoly Burakov void *addr; 882c7f5dba7SAnatoly Burakov int ret; 883c7f5dba7SAnatoly Burakov 884c7f5dba7SAnatoly Burakov for (pgsz_idx = 0; pgsz_idx < RTE_DIM(pgsizes); pgsz_idx++) { 885c7f5dba7SAnatoly Burakov /* skip anything that is too big */ 886c7f5dba7SAnatoly Burakov if (pgsizes[pgsz_idx] > SIZE_MAX) 887c7f5dba7SAnatoly Burakov continue; 888c7f5dba7SAnatoly Burakov 889c7f5dba7SAnatoly Burakov cur_pgsz = pgsizes[pgsz_idx]; 890c7f5dba7SAnatoly Burakov 891c7f5dba7SAnatoly Burakov /* if we were told not to allocate hugepages, override */ 892c7f5dba7SAnatoly Burakov if (!huge) 893c7f5dba7SAnatoly Burakov cur_pgsz = sysconf(_SC_PAGESIZE); 894c7f5dba7SAnatoly Burakov 895c7f5dba7SAnatoly Burakov ret = calc_mem_size(nb_mbufs, mbuf_sz, cur_pgsz, &mem_sz); 896c7f5dba7SAnatoly Burakov if (ret < 0) { 897c7f5dba7SAnatoly Burakov TESTPMD_LOG(ERR, "Cannot calculate memory size\n"); 898c7f5dba7SAnatoly Burakov return -1; 899c7f5dba7SAnatoly Burakov } 900c7f5dba7SAnatoly Burakov 901c7f5dba7SAnatoly Burakov /* allocate our memory */ 902c7f5dba7SAnatoly Burakov addr = alloc_mem(mem_sz, cur_pgsz, huge); 903c7f5dba7SAnatoly Burakov 904c7f5dba7SAnatoly Burakov /* if we couldn't allocate memory with a specified page size, 905c7f5dba7SAnatoly Burakov * that doesn't mean we can't do it with other page sizes, so 906c7f5dba7SAnatoly Burakov * try another one. 907c7f5dba7SAnatoly Burakov */ 908c7f5dba7SAnatoly Burakov if (addr == NULL) 909c7f5dba7SAnatoly Burakov continue; 910c7f5dba7SAnatoly Burakov 911c7f5dba7SAnatoly Burakov /* store IOVA addresses for every page in this memory area */ 912c7f5dba7SAnatoly Burakov n_pages = mem_sz / cur_pgsz; 913c7f5dba7SAnatoly Burakov 914c7f5dba7SAnatoly Burakov iovas = malloc(sizeof(*iovas) * n_pages); 915c7f5dba7SAnatoly Burakov 916c7f5dba7SAnatoly Burakov if (iovas == NULL) { 917c7f5dba7SAnatoly Burakov TESTPMD_LOG(ERR, "Cannot allocate memory for iova addresses\n"); 918c7f5dba7SAnatoly Burakov goto fail; 919c7f5dba7SAnatoly Burakov } 920c7f5dba7SAnatoly Burakov /* lock memory if it's not huge pages */ 921c7f5dba7SAnatoly Burakov if (!huge) 922c7f5dba7SAnatoly Burakov mlock(addr, mem_sz); 923c7f5dba7SAnatoly Burakov 924c7f5dba7SAnatoly Burakov /* populate IOVA addresses */ 925c7f5dba7SAnatoly Burakov for (cur_page = 0; cur_page < n_pages; cur_page++) { 926c7f5dba7SAnatoly Burakov rte_iova_t iova; 927c7f5dba7SAnatoly Burakov size_t offset; 928c7f5dba7SAnatoly Burakov void *cur; 929c7f5dba7SAnatoly Burakov 930c7f5dba7SAnatoly Burakov offset = cur_pgsz * cur_page; 931c7f5dba7SAnatoly Burakov cur = RTE_PTR_ADD(addr, offset); 932c7f5dba7SAnatoly Burakov 933c7f5dba7SAnatoly Burakov /* touch the page before getting its IOVA */ 934c7f5dba7SAnatoly Burakov *(volatile char *)cur = 0; 935c7f5dba7SAnatoly Burakov 936c7f5dba7SAnatoly Burakov iova = rte_mem_virt2iova(cur); 937c7f5dba7SAnatoly Burakov 938c7f5dba7SAnatoly Burakov iovas[cur_page] = iova; 939c7f5dba7SAnatoly Burakov } 940c7f5dba7SAnatoly Burakov 941c7f5dba7SAnatoly Burakov break; 942c7f5dba7SAnatoly Burakov } 943c7f5dba7SAnatoly Burakov /* if we couldn't allocate anything */ 944c7f5dba7SAnatoly Burakov if (iovas == NULL) 945c7f5dba7SAnatoly Burakov return -1; 946c7f5dba7SAnatoly Burakov 947c7f5dba7SAnatoly Burakov param->addr = addr; 948c7f5dba7SAnatoly Burakov param->len = mem_sz; 949c7f5dba7SAnatoly Burakov param->pgsz = cur_pgsz; 950c7f5dba7SAnatoly Burakov param->iova_table = iovas; 951c7f5dba7SAnatoly Burakov param->iova_table_len = n_pages; 952c7f5dba7SAnatoly Burakov 953c7f5dba7SAnatoly Burakov return 0; 954c7f5dba7SAnatoly Burakov fail: 955c7f5dba7SAnatoly Burakov free(iovas); 956c7f5dba7SAnatoly Burakov if (addr) 957c7f5dba7SAnatoly Burakov munmap(addr, mem_sz); 958c7f5dba7SAnatoly Burakov 959c7f5dba7SAnatoly Burakov return -1; 960c7f5dba7SAnatoly Burakov } 961c7f5dba7SAnatoly Burakov 962c7f5dba7SAnatoly Burakov static int 963c7f5dba7SAnatoly Burakov setup_extmem(uint32_t nb_mbufs, uint32_t mbuf_sz, bool huge) 964c7f5dba7SAnatoly Burakov { 965c7f5dba7SAnatoly Burakov struct extmem_param param; 966c7f5dba7SAnatoly Burakov int socket_id, ret; 967c7f5dba7SAnatoly Burakov 968c7f5dba7SAnatoly Burakov memset(¶m, 0, sizeof(param)); 969c7f5dba7SAnatoly Burakov 970c7f5dba7SAnatoly Burakov /* check if our heap exists */ 971c7f5dba7SAnatoly Burakov socket_id = rte_malloc_heap_get_socket(EXTMEM_HEAP_NAME); 972c7f5dba7SAnatoly Burakov if (socket_id < 0) { 973c7f5dba7SAnatoly Burakov /* create our heap */ 974c7f5dba7SAnatoly Burakov ret = rte_malloc_heap_create(EXTMEM_HEAP_NAME); 975c7f5dba7SAnatoly Burakov if (ret < 0) { 976c7f5dba7SAnatoly Burakov TESTPMD_LOG(ERR, "Cannot create heap\n"); 977c7f5dba7SAnatoly Burakov return -1; 978c7f5dba7SAnatoly Burakov } 979c7f5dba7SAnatoly Burakov } 980c7f5dba7SAnatoly Burakov 981c7f5dba7SAnatoly Burakov ret = create_extmem(nb_mbufs, mbuf_sz, ¶m, huge); 982c7f5dba7SAnatoly Burakov if (ret < 0) { 983c7f5dba7SAnatoly Burakov TESTPMD_LOG(ERR, "Cannot create memory area\n"); 984c7f5dba7SAnatoly Burakov return -1; 985c7f5dba7SAnatoly Burakov } 986c7f5dba7SAnatoly Burakov 987c7f5dba7SAnatoly Burakov /* we now have a valid memory area, so add it to heap */ 988c7f5dba7SAnatoly Burakov ret = rte_malloc_heap_memory_add(EXTMEM_HEAP_NAME, 989c7f5dba7SAnatoly Burakov param.addr, param.len, param.iova_table, 990c7f5dba7SAnatoly Burakov param.iova_table_len, param.pgsz); 991c7f5dba7SAnatoly Burakov 992c7f5dba7SAnatoly Burakov /* when using VFIO, memory is automatically mapped for DMA by EAL */ 993c7f5dba7SAnatoly Burakov 994c7f5dba7SAnatoly Burakov /* not needed any more */ 995c7f5dba7SAnatoly Burakov free(param.iova_table); 996c7f5dba7SAnatoly Burakov 997c7f5dba7SAnatoly Burakov if (ret < 0) { 998c7f5dba7SAnatoly Burakov TESTPMD_LOG(ERR, "Cannot add memory to heap\n"); 999c7f5dba7SAnatoly Burakov munmap(param.addr, param.len); 1000c7f5dba7SAnatoly Burakov return -1; 1001c7f5dba7SAnatoly Burakov } 1002c7f5dba7SAnatoly Burakov 1003c7f5dba7SAnatoly Burakov /* success */ 1004c7f5dba7SAnatoly Burakov 1005c7f5dba7SAnatoly Burakov TESTPMD_LOG(DEBUG, "Allocated %zuMB of external memory\n", 1006c7f5dba7SAnatoly Burakov param.len >> 20); 1007c7f5dba7SAnatoly Burakov 1008c7f5dba7SAnatoly Burakov return 0; 1009c7f5dba7SAnatoly Burakov } 10103a0968c8SShahaf Shuler static void 10113a0968c8SShahaf Shuler dma_unmap_cb(struct rte_mempool *mp __rte_unused, void *opaque __rte_unused, 10123a0968c8SShahaf Shuler struct rte_mempool_memhdr *memhdr, unsigned mem_idx __rte_unused) 10133a0968c8SShahaf Shuler { 10143a0968c8SShahaf Shuler uint16_t pid = 0; 10153a0968c8SShahaf Shuler int ret; 10163a0968c8SShahaf Shuler 10173a0968c8SShahaf Shuler RTE_ETH_FOREACH_DEV(pid) { 10180a0821bcSPaulis Gributs struct rte_eth_dev_info dev_info; 10193a0968c8SShahaf Shuler 10200a0821bcSPaulis Gributs ret = eth_dev_info_get_print_err(pid, &dev_info); 10210a0821bcSPaulis Gributs if (ret != 0) { 10220a0821bcSPaulis Gributs TESTPMD_LOG(DEBUG, 10230a0821bcSPaulis Gributs "unable to get device info for port %d on addr 0x%p," 10240a0821bcSPaulis Gributs "mempool unmapping will not be performed\n", 10250a0821bcSPaulis Gributs pid, memhdr->addr); 10260a0821bcSPaulis Gributs continue; 10270a0821bcSPaulis Gributs } 10280a0821bcSPaulis Gributs 10290a0821bcSPaulis Gributs ret = rte_dev_dma_unmap(dev_info.device, memhdr->addr, 0, memhdr->len); 10303a0968c8SShahaf Shuler if (ret) { 10313a0968c8SShahaf Shuler TESTPMD_LOG(DEBUG, 10323a0968c8SShahaf Shuler "unable to DMA unmap addr 0x%p " 10333a0968c8SShahaf Shuler "for device %s\n", 1034ec5ecd7eSDavid Marchand memhdr->addr, rte_dev_name(dev_info.device)); 10353a0968c8SShahaf Shuler } 10363a0968c8SShahaf Shuler } 10373a0968c8SShahaf Shuler ret = rte_extmem_unregister(memhdr->addr, memhdr->len); 10383a0968c8SShahaf Shuler if (ret) { 10393a0968c8SShahaf Shuler TESTPMD_LOG(DEBUG, 10403a0968c8SShahaf Shuler "unable to un-register addr 0x%p\n", memhdr->addr); 10413a0968c8SShahaf Shuler } 10423a0968c8SShahaf Shuler } 10433a0968c8SShahaf Shuler 10443a0968c8SShahaf Shuler static void 10453a0968c8SShahaf Shuler dma_map_cb(struct rte_mempool *mp __rte_unused, void *opaque __rte_unused, 10463a0968c8SShahaf Shuler struct rte_mempool_memhdr *memhdr, unsigned mem_idx __rte_unused) 10473a0968c8SShahaf Shuler { 10483a0968c8SShahaf Shuler uint16_t pid = 0; 10493a0968c8SShahaf Shuler size_t page_size = sysconf(_SC_PAGESIZE); 10503a0968c8SShahaf Shuler int ret; 10513a0968c8SShahaf Shuler 10523a0968c8SShahaf Shuler ret = rte_extmem_register(memhdr->addr, memhdr->len, NULL, 0, 10533a0968c8SShahaf Shuler page_size); 10543a0968c8SShahaf Shuler if (ret) { 10553a0968c8SShahaf Shuler TESTPMD_LOG(DEBUG, 10563a0968c8SShahaf Shuler "unable to register addr 0x%p\n", memhdr->addr); 10573a0968c8SShahaf Shuler return; 10583a0968c8SShahaf Shuler } 10593a0968c8SShahaf Shuler RTE_ETH_FOREACH_DEV(pid) { 10600a0821bcSPaulis Gributs struct rte_eth_dev_info dev_info; 10613a0968c8SShahaf Shuler 10620a0821bcSPaulis Gributs ret = eth_dev_info_get_print_err(pid, &dev_info); 10630a0821bcSPaulis Gributs if (ret != 0) { 10640a0821bcSPaulis Gributs TESTPMD_LOG(DEBUG, 10650a0821bcSPaulis Gributs "unable to get device info for port %d on addr 0x%p," 10660a0821bcSPaulis Gributs "mempool mapping will not be performed\n", 10670a0821bcSPaulis Gributs pid, memhdr->addr); 10680a0821bcSPaulis Gributs continue; 10690a0821bcSPaulis Gributs } 10700a0821bcSPaulis Gributs ret = rte_dev_dma_map(dev_info.device, memhdr->addr, 0, memhdr->len); 10713a0968c8SShahaf Shuler if (ret) { 10723a0968c8SShahaf Shuler TESTPMD_LOG(DEBUG, 10733a0968c8SShahaf Shuler "unable to DMA map addr 0x%p " 10743a0968c8SShahaf Shuler "for device %s\n", 1075ec5ecd7eSDavid Marchand memhdr->addr, rte_dev_name(dev_info.device)); 10763a0968c8SShahaf Shuler } 10773a0968c8SShahaf Shuler } 10783a0968c8SShahaf Shuler } 1079761f7ae1SJie Zhou #endif 1080c7f5dba7SAnatoly Burakov 108172512e18SViacheslav Ovsiienko static unsigned int 108272512e18SViacheslav Ovsiienko setup_extbuf(uint32_t nb_mbufs, uint16_t mbuf_sz, unsigned int socket_id, 108372512e18SViacheslav Ovsiienko char *pool_name, struct rte_pktmbuf_extmem **ext_mem) 108472512e18SViacheslav Ovsiienko { 108572512e18SViacheslav Ovsiienko struct rte_pktmbuf_extmem *xmem; 108672512e18SViacheslav Ovsiienko unsigned int ext_num, zone_num, elt_num; 108772512e18SViacheslav Ovsiienko uint16_t elt_size; 108872512e18SViacheslav Ovsiienko 108972512e18SViacheslav Ovsiienko elt_size = RTE_ALIGN_CEIL(mbuf_sz, RTE_CACHE_LINE_SIZE); 109072512e18SViacheslav Ovsiienko elt_num = EXTBUF_ZONE_SIZE / elt_size; 109172512e18SViacheslav Ovsiienko zone_num = (nb_mbufs + elt_num - 1) / elt_num; 109272512e18SViacheslav Ovsiienko 109372512e18SViacheslav Ovsiienko xmem = malloc(sizeof(struct rte_pktmbuf_extmem) * zone_num); 109472512e18SViacheslav Ovsiienko if (xmem == NULL) { 109572512e18SViacheslav Ovsiienko TESTPMD_LOG(ERR, "Cannot allocate memory for " 109672512e18SViacheslav Ovsiienko "external buffer descriptors\n"); 109772512e18SViacheslav Ovsiienko *ext_mem = NULL; 109872512e18SViacheslav Ovsiienko return 0; 109972512e18SViacheslav Ovsiienko } 110072512e18SViacheslav Ovsiienko for (ext_num = 0; ext_num < zone_num; ext_num++) { 110172512e18SViacheslav Ovsiienko struct rte_pktmbuf_extmem *xseg = xmem + ext_num; 110272512e18SViacheslav Ovsiienko const struct rte_memzone *mz; 110372512e18SViacheslav Ovsiienko char mz_name[RTE_MEMZONE_NAMESIZE]; 110472512e18SViacheslav Ovsiienko int ret; 110572512e18SViacheslav Ovsiienko 110672512e18SViacheslav Ovsiienko ret = snprintf(mz_name, sizeof(mz_name), 110772512e18SViacheslav Ovsiienko RTE_MEMPOOL_MZ_FORMAT "_xb_%u", pool_name, ext_num); 110872512e18SViacheslav Ovsiienko if (ret < 0 || ret >= (int)sizeof(mz_name)) { 110972512e18SViacheslav Ovsiienko errno = ENAMETOOLONG; 111072512e18SViacheslav Ovsiienko ext_num = 0; 111172512e18SViacheslav Ovsiienko break; 111272512e18SViacheslav Ovsiienko } 111313b19642SDmitry Kozlyuk mz = rte_memzone_reserve(mz_name, EXTBUF_ZONE_SIZE, 111472512e18SViacheslav Ovsiienko socket_id, 111572512e18SViacheslav Ovsiienko RTE_MEMZONE_IOVA_CONTIG | 111672512e18SViacheslav Ovsiienko RTE_MEMZONE_1GB | 111713b19642SDmitry Kozlyuk RTE_MEMZONE_SIZE_HINT_ONLY); 111872512e18SViacheslav Ovsiienko if (mz == NULL) { 111972512e18SViacheslav Ovsiienko /* 112072512e18SViacheslav Ovsiienko * The caller exits on external buffer creation 112172512e18SViacheslav Ovsiienko * error, so there is no need to free memzones. 112272512e18SViacheslav Ovsiienko */ 112372512e18SViacheslav Ovsiienko errno = ENOMEM; 112472512e18SViacheslav Ovsiienko ext_num = 0; 112572512e18SViacheslav Ovsiienko break; 112672512e18SViacheslav Ovsiienko } 112772512e18SViacheslav Ovsiienko xseg->buf_ptr = mz->addr; 112872512e18SViacheslav Ovsiienko xseg->buf_iova = mz->iova; 112972512e18SViacheslav Ovsiienko xseg->buf_len = EXTBUF_ZONE_SIZE; 113072512e18SViacheslav Ovsiienko xseg->elt_size = elt_size; 113172512e18SViacheslav Ovsiienko } 113272512e18SViacheslav Ovsiienko if (ext_num == 0 && xmem != NULL) { 113372512e18SViacheslav Ovsiienko free(xmem); 113472512e18SViacheslav Ovsiienko xmem = NULL; 113572512e18SViacheslav Ovsiienko } 113672512e18SViacheslav Ovsiienko *ext_mem = xmem; 113772512e18SViacheslav Ovsiienko return ext_num; 113872512e18SViacheslav Ovsiienko } 113972512e18SViacheslav Ovsiienko 1140af75078fSIntel /* 1141af75078fSIntel * Configuration initialisation done once at init time. 1142af75078fSIntel */ 1143401b744dSShahaf Shuler static struct rte_mempool * 1144af75078fSIntel mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf, 114526cbb419SViacheslav Ovsiienko unsigned int socket_id, uint16_t size_idx) 1146af75078fSIntel { 1147af75078fSIntel char pool_name[RTE_MEMPOOL_NAMESIZE]; 1148bece7b6cSChristian Ehrhardt struct rte_mempool *rte_mp = NULL; 1149761f7ae1SJie Zhou #ifndef RTE_EXEC_ENV_WINDOWS 1150af75078fSIntel uint32_t mb_size; 1151af75078fSIntel 1152dfb03bbeSOlivier Matz mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size; 1153761f7ae1SJie Zhou #endif 115426cbb419SViacheslav Ovsiienko mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name), size_idx); 1155a550baf2SMin Hu (Connor) if (!is_proc_primary()) { 1156a550baf2SMin Hu (Connor) rte_mp = rte_mempool_lookup(pool_name); 1157a550baf2SMin Hu (Connor) if (rte_mp == NULL) 1158a550baf2SMin Hu (Connor) rte_exit(EXIT_FAILURE, 1159a550baf2SMin Hu (Connor) "Get mbuf pool for socket %u failed: %s\n", 1160a550baf2SMin Hu (Connor) socket_id, rte_strerror(rte_errno)); 1161a550baf2SMin Hu (Connor) return rte_mp; 1162a550baf2SMin Hu (Connor) } 1163148f963fSBruce Richardson 1164285fd101SOlivier Matz TESTPMD_LOG(INFO, 1165d1eb542eSOlivier Matz "create a new mbuf pool <%s>: n=%u, size=%u, socket=%u\n", 1166d1eb542eSOlivier Matz pool_name, nb_mbuf, mbuf_seg_size, socket_id); 1167d1eb542eSOlivier Matz 1168c7f5dba7SAnatoly Burakov switch (mp_alloc_type) { 1169c7f5dba7SAnatoly Burakov case MP_ALLOC_NATIVE: 1170c7f5dba7SAnatoly Burakov { 1171c7f5dba7SAnatoly Burakov /* wrapper to rte_mempool_create() */ 1172c7f5dba7SAnatoly Burakov TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n", 1173c7f5dba7SAnatoly Burakov rte_mbuf_best_mempool_ops()); 1174c7f5dba7SAnatoly Burakov rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf, 1175c7f5dba7SAnatoly Burakov mb_mempool_cache, 0, mbuf_seg_size, socket_id); 1176c7f5dba7SAnatoly Burakov break; 1177c7f5dba7SAnatoly Burakov } 1178761f7ae1SJie Zhou #ifndef RTE_EXEC_ENV_WINDOWS 1179c7f5dba7SAnatoly Burakov case MP_ALLOC_ANON: 1180c7f5dba7SAnatoly Burakov { 1181b19a0c75SOlivier Matz rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf, 1182c7f5dba7SAnatoly Burakov mb_size, (unsigned int) mb_mempool_cache, 1183148f963fSBruce Richardson sizeof(struct rte_pktmbuf_pool_private), 118459fcf854SShahaf Shuler socket_id, mempool_flags); 118524427bb9SOlivier Matz if (rte_mp == NULL) 118624427bb9SOlivier Matz goto err; 1187b19a0c75SOlivier Matz 1188b19a0c75SOlivier Matz if (rte_mempool_populate_anon(rte_mp) == 0) { 1189b19a0c75SOlivier Matz rte_mempool_free(rte_mp); 1190b19a0c75SOlivier Matz rte_mp = NULL; 119124427bb9SOlivier Matz goto err; 1192b19a0c75SOlivier Matz } 1193b19a0c75SOlivier Matz rte_pktmbuf_pool_init(rte_mp, NULL); 1194b19a0c75SOlivier Matz rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL); 11953a0968c8SShahaf Shuler rte_mempool_mem_iter(rte_mp, dma_map_cb, NULL); 1196c7f5dba7SAnatoly Burakov break; 1197c7f5dba7SAnatoly Burakov } 1198c7f5dba7SAnatoly Burakov case MP_ALLOC_XMEM: 1199c7f5dba7SAnatoly Burakov case MP_ALLOC_XMEM_HUGE: 1200c7f5dba7SAnatoly Burakov { 1201c7f5dba7SAnatoly Burakov int heap_socket; 1202c7f5dba7SAnatoly Burakov bool huge = mp_alloc_type == MP_ALLOC_XMEM_HUGE; 1203c7f5dba7SAnatoly Burakov 1204c7f5dba7SAnatoly Burakov if (setup_extmem(nb_mbuf, mbuf_seg_size, huge) < 0) 1205c7f5dba7SAnatoly Burakov rte_exit(EXIT_FAILURE, "Could not create external memory\n"); 1206c7f5dba7SAnatoly Burakov 1207c7f5dba7SAnatoly Burakov heap_socket = 1208c7f5dba7SAnatoly Burakov rte_malloc_heap_get_socket(EXTMEM_HEAP_NAME); 1209c7f5dba7SAnatoly Burakov if (heap_socket < 0) 1210c7f5dba7SAnatoly Burakov rte_exit(EXIT_FAILURE, "Could not get external memory socket ID\n"); 1211c7f5dba7SAnatoly Burakov 12120e798567SPavan Nikhilesh TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n", 12130e798567SPavan Nikhilesh rte_mbuf_best_mempool_ops()); 1214ea0c20eaSOlivier Matz rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf, 1215c7f5dba7SAnatoly Burakov mb_mempool_cache, 0, mbuf_seg_size, 1216c7f5dba7SAnatoly Burakov heap_socket); 1217c7f5dba7SAnatoly Burakov break; 1218c7f5dba7SAnatoly Burakov } 1219761f7ae1SJie Zhou #endif 122072512e18SViacheslav Ovsiienko case MP_ALLOC_XBUF: 122172512e18SViacheslav Ovsiienko { 122272512e18SViacheslav Ovsiienko struct rte_pktmbuf_extmem *ext_mem; 122372512e18SViacheslav Ovsiienko unsigned int ext_num; 122472512e18SViacheslav Ovsiienko 122572512e18SViacheslav Ovsiienko ext_num = setup_extbuf(nb_mbuf, mbuf_seg_size, 122672512e18SViacheslav Ovsiienko socket_id, pool_name, &ext_mem); 122772512e18SViacheslav Ovsiienko if (ext_num == 0) 122872512e18SViacheslav Ovsiienko rte_exit(EXIT_FAILURE, 122972512e18SViacheslav Ovsiienko "Can't create pinned data buffers\n"); 123072512e18SViacheslav Ovsiienko 123172512e18SViacheslav Ovsiienko TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n", 123272512e18SViacheslav Ovsiienko rte_mbuf_best_mempool_ops()); 123372512e18SViacheslav Ovsiienko rte_mp = rte_pktmbuf_pool_create_extbuf 123472512e18SViacheslav Ovsiienko (pool_name, nb_mbuf, mb_mempool_cache, 123572512e18SViacheslav Ovsiienko 0, mbuf_seg_size, socket_id, 123672512e18SViacheslav Ovsiienko ext_mem, ext_num); 123772512e18SViacheslav Ovsiienko free(ext_mem); 123872512e18SViacheslav Ovsiienko break; 123972512e18SViacheslav Ovsiienko } 1240c7f5dba7SAnatoly Burakov default: 1241c7f5dba7SAnatoly Burakov { 1242c7f5dba7SAnatoly Burakov rte_exit(EXIT_FAILURE, "Invalid mempool creation mode\n"); 1243c7f5dba7SAnatoly Burakov } 1244bece7b6cSChristian Ehrhardt } 1245148f963fSBruce Richardson 1246761f7ae1SJie Zhou #ifndef RTE_EXEC_ENV_WINDOWS 124724427bb9SOlivier Matz err: 1248761f7ae1SJie Zhou #endif 1249af75078fSIntel if (rte_mp == NULL) { 1250d1eb542eSOlivier Matz rte_exit(EXIT_FAILURE, 1251d1eb542eSOlivier Matz "Creation of mbuf pool for socket %u failed: %s\n", 1252d1eb542eSOlivier Matz socket_id, rte_strerror(rte_errno)); 1253148f963fSBruce Richardson } else if (verbose_level > 0) { 1254591a9d79SStephen Hemminger rte_mempool_dump(stdout, rte_mp); 1255af75078fSIntel } 1256401b744dSShahaf Shuler return rte_mp; 1257af75078fSIntel } 1258af75078fSIntel 125920a0286fSLiu Xiaofeng /* 126020a0286fSLiu Xiaofeng * Check given socket id is valid or not with NUMA mode, 126120a0286fSLiu Xiaofeng * if valid, return 0, else return -1 126220a0286fSLiu Xiaofeng */ 126320a0286fSLiu Xiaofeng static int 126420a0286fSLiu Xiaofeng check_socket_id(const unsigned int socket_id) 126520a0286fSLiu Xiaofeng { 126620a0286fSLiu Xiaofeng static int warning_once = 0; 126720a0286fSLiu Xiaofeng 1268c9cafcc8SShahaf Shuler if (new_socket_id(socket_id)) { 126920a0286fSLiu Xiaofeng if (!warning_once && numa_support) 127061a3b0e5SAndrew Rybchenko fprintf(stderr, 127161a3b0e5SAndrew Rybchenko "Warning: NUMA should be configured manually by using --port-numa-config and --ring-numa-config parameters along with --numa.\n"); 127220a0286fSLiu Xiaofeng warning_once = 1; 127320a0286fSLiu Xiaofeng return -1; 127420a0286fSLiu Xiaofeng } 127520a0286fSLiu Xiaofeng return 0; 127620a0286fSLiu Xiaofeng } 127720a0286fSLiu Xiaofeng 12783f7311baSWei Dai /* 12793f7311baSWei Dai * Get the allowed maximum number of RX queues. 12803f7311baSWei Dai * *pid return the port id which has minimal value of 12813f7311baSWei Dai * max_rx_queues in all ports. 12823f7311baSWei Dai */ 12833f7311baSWei Dai queueid_t 12843f7311baSWei Dai get_allowed_max_nb_rxq(portid_t *pid) 12853f7311baSWei Dai { 12869e6b36c3SDavid Marchand queueid_t allowed_max_rxq = RTE_MAX_QUEUES_PER_PORT; 12876f51deb9SIvan Ilchenko bool max_rxq_valid = false; 12883f7311baSWei Dai portid_t pi; 12893f7311baSWei Dai struct rte_eth_dev_info dev_info; 12903f7311baSWei Dai 12913f7311baSWei Dai RTE_ETH_FOREACH_DEV(pi) { 12926f51deb9SIvan Ilchenko if (eth_dev_info_get_print_err(pi, &dev_info) != 0) 12936f51deb9SIvan Ilchenko continue; 12946f51deb9SIvan Ilchenko 12956f51deb9SIvan Ilchenko max_rxq_valid = true; 12963f7311baSWei Dai if (dev_info.max_rx_queues < allowed_max_rxq) { 12973f7311baSWei Dai allowed_max_rxq = dev_info.max_rx_queues; 12983f7311baSWei Dai *pid = pi; 12993f7311baSWei Dai } 13003f7311baSWei Dai } 13016f51deb9SIvan Ilchenko return max_rxq_valid ? allowed_max_rxq : 0; 13023f7311baSWei Dai } 13033f7311baSWei Dai 13043f7311baSWei Dai /* 13053f7311baSWei Dai * Check input rxq is valid or not. 13063f7311baSWei Dai * If input rxq is not greater than any of maximum number 13073f7311baSWei Dai * of RX queues of all ports, it is valid. 13083f7311baSWei Dai * if valid, return 0, else return -1 13093f7311baSWei Dai */ 13103f7311baSWei Dai int 13113f7311baSWei Dai check_nb_rxq(queueid_t rxq) 13123f7311baSWei Dai { 13133f7311baSWei Dai queueid_t allowed_max_rxq; 13143f7311baSWei Dai portid_t pid = 0; 13153f7311baSWei Dai 13163f7311baSWei Dai allowed_max_rxq = get_allowed_max_nb_rxq(&pid); 13173f7311baSWei Dai if (rxq > allowed_max_rxq) { 131861a3b0e5SAndrew Rybchenko fprintf(stderr, 131961a3b0e5SAndrew Rybchenko "Fail: input rxq (%u) can't be greater than max_rx_queues (%u) of port %u\n", 132061a3b0e5SAndrew Rybchenko rxq, allowed_max_rxq, pid); 13213f7311baSWei Dai return -1; 13223f7311baSWei Dai } 13233f7311baSWei Dai return 0; 13243f7311baSWei Dai } 13253f7311baSWei Dai 132636db4f6cSWei Dai /* 132736db4f6cSWei Dai * Get the allowed maximum number of TX queues. 132836db4f6cSWei Dai * *pid return the port id which has minimal value of 132936db4f6cSWei Dai * max_tx_queues in all ports. 133036db4f6cSWei Dai */ 133136db4f6cSWei Dai queueid_t 133236db4f6cSWei Dai get_allowed_max_nb_txq(portid_t *pid) 133336db4f6cSWei Dai { 13349e6b36c3SDavid Marchand queueid_t allowed_max_txq = RTE_MAX_QUEUES_PER_PORT; 13356f51deb9SIvan Ilchenko bool max_txq_valid = false; 133636db4f6cSWei Dai portid_t pi; 133736db4f6cSWei Dai struct rte_eth_dev_info dev_info; 133836db4f6cSWei Dai 133936db4f6cSWei Dai RTE_ETH_FOREACH_DEV(pi) { 13406f51deb9SIvan Ilchenko if (eth_dev_info_get_print_err(pi, &dev_info) != 0) 13416f51deb9SIvan Ilchenko continue; 13426f51deb9SIvan Ilchenko 13436f51deb9SIvan Ilchenko max_txq_valid = true; 134436db4f6cSWei Dai if (dev_info.max_tx_queues < allowed_max_txq) { 134536db4f6cSWei Dai allowed_max_txq = dev_info.max_tx_queues; 134636db4f6cSWei Dai *pid = pi; 134736db4f6cSWei Dai } 134836db4f6cSWei Dai } 13496f51deb9SIvan Ilchenko return max_txq_valid ? allowed_max_txq : 0; 135036db4f6cSWei Dai } 135136db4f6cSWei Dai 135236db4f6cSWei Dai /* 135336db4f6cSWei Dai * Check input txq is valid or not. 135436db4f6cSWei Dai * If input txq is not greater than any of maximum number 135536db4f6cSWei Dai * of TX queues of all ports, it is valid. 135636db4f6cSWei Dai * if valid, return 0, else return -1 135736db4f6cSWei Dai */ 135836db4f6cSWei Dai int 135936db4f6cSWei Dai check_nb_txq(queueid_t txq) 136036db4f6cSWei Dai { 136136db4f6cSWei Dai queueid_t allowed_max_txq; 136236db4f6cSWei Dai portid_t pid = 0; 136336db4f6cSWei Dai 136436db4f6cSWei Dai allowed_max_txq = get_allowed_max_nb_txq(&pid); 136536db4f6cSWei Dai if (txq > allowed_max_txq) { 136661a3b0e5SAndrew Rybchenko fprintf(stderr, 136761a3b0e5SAndrew Rybchenko "Fail: input txq (%u) can't be greater than max_tx_queues (%u) of port %u\n", 136861a3b0e5SAndrew Rybchenko txq, allowed_max_txq, pid); 136936db4f6cSWei Dai return -1; 137036db4f6cSWei Dai } 137136db4f6cSWei Dai return 0; 137236db4f6cSWei Dai } 137336db4f6cSWei Dai 13741c69df45SOri Kam /* 137599e040d3SLijun Ou * Get the allowed maximum number of RXDs of every rx queue. 137699e040d3SLijun Ou * *pid return the port id which has minimal value of 137799e040d3SLijun Ou * max_rxd in all queues of all ports. 137899e040d3SLijun Ou */ 137999e040d3SLijun Ou static uint16_t 138099e040d3SLijun Ou get_allowed_max_nb_rxd(portid_t *pid) 138199e040d3SLijun Ou { 138299e040d3SLijun Ou uint16_t allowed_max_rxd = UINT16_MAX; 138399e040d3SLijun Ou portid_t pi; 138499e040d3SLijun Ou struct rte_eth_dev_info dev_info; 138599e040d3SLijun Ou 138699e040d3SLijun Ou RTE_ETH_FOREACH_DEV(pi) { 138799e040d3SLijun Ou if (eth_dev_info_get_print_err(pi, &dev_info) != 0) 138899e040d3SLijun Ou continue; 138999e040d3SLijun Ou 139099e040d3SLijun Ou if (dev_info.rx_desc_lim.nb_max < allowed_max_rxd) { 139199e040d3SLijun Ou allowed_max_rxd = dev_info.rx_desc_lim.nb_max; 139299e040d3SLijun Ou *pid = pi; 139399e040d3SLijun Ou } 139499e040d3SLijun Ou } 139599e040d3SLijun Ou return allowed_max_rxd; 139699e040d3SLijun Ou } 139799e040d3SLijun Ou 139899e040d3SLijun Ou /* 139999e040d3SLijun Ou * Get the allowed minimal number of RXDs of every rx queue. 140099e040d3SLijun Ou * *pid return the port id which has minimal value of 140199e040d3SLijun Ou * min_rxd in all queues of all ports. 140299e040d3SLijun Ou */ 140399e040d3SLijun Ou static uint16_t 140499e040d3SLijun Ou get_allowed_min_nb_rxd(portid_t *pid) 140599e040d3SLijun Ou { 140699e040d3SLijun Ou uint16_t allowed_min_rxd = 0; 140799e040d3SLijun Ou portid_t pi; 140899e040d3SLijun Ou struct rte_eth_dev_info dev_info; 140999e040d3SLijun Ou 141099e040d3SLijun Ou RTE_ETH_FOREACH_DEV(pi) { 141199e040d3SLijun Ou if (eth_dev_info_get_print_err(pi, &dev_info) != 0) 141299e040d3SLijun Ou continue; 141399e040d3SLijun Ou 141499e040d3SLijun Ou if (dev_info.rx_desc_lim.nb_min > allowed_min_rxd) { 141599e040d3SLijun Ou allowed_min_rxd = dev_info.rx_desc_lim.nb_min; 141699e040d3SLijun Ou *pid = pi; 141799e040d3SLijun Ou } 141899e040d3SLijun Ou } 141999e040d3SLijun Ou 142099e040d3SLijun Ou return allowed_min_rxd; 142199e040d3SLijun Ou } 142299e040d3SLijun Ou 142399e040d3SLijun Ou /* 142499e040d3SLijun Ou * Check input rxd is valid or not. 142599e040d3SLijun Ou * If input rxd is not greater than any of maximum number 142699e040d3SLijun Ou * of RXDs of every Rx queues and is not less than any of 142799e040d3SLijun Ou * minimal number of RXDs of every Rx queues, it is valid. 142899e040d3SLijun Ou * if valid, return 0, else return -1 142999e040d3SLijun Ou */ 143099e040d3SLijun Ou int 143199e040d3SLijun Ou check_nb_rxd(queueid_t rxd) 143299e040d3SLijun Ou { 143399e040d3SLijun Ou uint16_t allowed_max_rxd; 143499e040d3SLijun Ou uint16_t allowed_min_rxd; 143599e040d3SLijun Ou portid_t pid = 0; 143699e040d3SLijun Ou 143799e040d3SLijun Ou allowed_max_rxd = get_allowed_max_nb_rxd(&pid); 143899e040d3SLijun Ou if (rxd > allowed_max_rxd) { 143961a3b0e5SAndrew Rybchenko fprintf(stderr, 144061a3b0e5SAndrew Rybchenko "Fail: input rxd (%u) can't be greater than max_rxds (%u) of port %u\n", 144161a3b0e5SAndrew Rybchenko rxd, allowed_max_rxd, pid); 144299e040d3SLijun Ou return -1; 144399e040d3SLijun Ou } 144499e040d3SLijun Ou 144599e040d3SLijun Ou allowed_min_rxd = get_allowed_min_nb_rxd(&pid); 144699e040d3SLijun Ou if (rxd < allowed_min_rxd) { 144761a3b0e5SAndrew Rybchenko fprintf(stderr, 144861a3b0e5SAndrew Rybchenko "Fail: input rxd (%u) can't be less than min_rxds (%u) of port %u\n", 144961a3b0e5SAndrew Rybchenko rxd, allowed_min_rxd, pid); 145099e040d3SLijun Ou return -1; 145199e040d3SLijun Ou } 145299e040d3SLijun Ou 145399e040d3SLijun Ou return 0; 145499e040d3SLijun Ou } 145599e040d3SLijun Ou 145699e040d3SLijun Ou /* 145799e040d3SLijun Ou * Get the allowed maximum number of TXDs of every rx queues. 145899e040d3SLijun Ou * *pid return the port id which has minimal value of 145999e040d3SLijun Ou * max_txd in every tx queue. 146099e040d3SLijun Ou */ 146199e040d3SLijun Ou static uint16_t 146299e040d3SLijun Ou get_allowed_max_nb_txd(portid_t *pid) 146399e040d3SLijun Ou { 146499e040d3SLijun Ou uint16_t allowed_max_txd = UINT16_MAX; 146599e040d3SLijun Ou portid_t pi; 146699e040d3SLijun Ou struct rte_eth_dev_info dev_info; 146799e040d3SLijun Ou 146899e040d3SLijun Ou RTE_ETH_FOREACH_DEV(pi) { 146999e040d3SLijun Ou if (eth_dev_info_get_print_err(pi, &dev_info) != 0) 147099e040d3SLijun Ou continue; 147199e040d3SLijun Ou 147299e040d3SLijun Ou if (dev_info.tx_desc_lim.nb_max < allowed_max_txd) { 147399e040d3SLijun Ou allowed_max_txd = dev_info.tx_desc_lim.nb_max; 147499e040d3SLijun Ou *pid = pi; 147599e040d3SLijun Ou } 147699e040d3SLijun Ou } 147799e040d3SLijun Ou return allowed_max_txd; 147899e040d3SLijun Ou } 147999e040d3SLijun Ou 148099e040d3SLijun Ou /* 148199e040d3SLijun Ou * Get the allowed maximum number of TXDs of every tx queues. 148299e040d3SLijun Ou * *pid return the port id which has minimal value of 148399e040d3SLijun Ou * min_txd in every tx queue. 148499e040d3SLijun Ou */ 148599e040d3SLijun Ou static uint16_t 148699e040d3SLijun Ou get_allowed_min_nb_txd(portid_t *pid) 148799e040d3SLijun Ou { 148899e040d3SLijun Ou uint16_t allowed_min_txd = 0; 148999e040d3SLijun Ou portid_t pi; 149099e040d3SLijun Ou struct rte_eth_dev_info dev_info; 149199e040d3SLijun Ou 149299e040d3SLijun Ou RTE_ETH_FOREACH_DEV(pi) { 149399e040d3SLijun Ou if (eth_dev_info_get_print_err(pi, &dev_info) != 0) 149499e040d3SLijun Ou continue; 149599e040d3SLijun Ou 149699e040d3SLijun Ou if (dev_info.tx_desc_lim.nb_min > allowed_min_txd) { 149799e040d3SLijun Ou allowed_min_txd = dev_info.tx_desc_lim.nb_min; 149899e040d3SLijun Ou *pid = pi; 149999e040d3SLijun Ou } 150099e040d3SLijun Ou } 150199e040d3SLijun Ou 150299e040d3SLijun Ou return allowed_min_txd; 150399e040d3SLijun Ou } 150499e040d3SLijun Ou 150599e040d3SLijun Ou /* 150699e040d3SLijun Ou * Check input txd is valid or not. 150799e040d3SLijun Ou * If input txd is not greater than any of maximum number 150899e040d3SLijun Ou * of TXDs of every Rx queues, it is valid. 150999e040d3SLijun Ou * if valid, return 0, else return -1 151099e040d3SLijun Ou */ 151199e040d3SLijun Ou int 151299e040d3SLijun Ou check_nb_txd(queueid_t txd) 151399e040d3SLijun Ou { 151499e040d3SLijun Ou uint16_t allowed_max_txd; 151599e040d3SLijun Ou uint16_t allowed_min_txd; 151699e040d3SLijun Ou portid_t pid = 0; 151799e040d3SLijun Ou 151899e040d3SLijun Ou allowed_max_txd = get_allowed_max_nb_txd(&pid); 151999e040d3SLijun Ou if (txd > allowed_max_txd) { 152061a3b0e5SAndrew Rybchenko fprintf(stderr, 152161a3b0e5SAndrew Rybchenko "Fail: input txd (%u) can't be greater than max_txds (%u) of port %u\n", 152261a3b0e5SAndrew Rybchenko txd, allowed_max_txd, pid); 152399e040d3SLijun Ou return -1; 152499e040d3SLijun Ou } 152599e040d3SLijun Ou 152699e040d3SLijun Ou allowed_min_txd = get_allowed_min_nb_txd(&pid); 152799e040d3SLijun Ou if (txd < allowed_min_txd) { 152861a3b0e5SAndrew Rybchenko fprintf(stderr, 152961a3b0e5SAndrew Rybchenko "Fail: input txd (%u) can't be less than min_txds (%u) of port %u\n", 153061a3b0e5SAndrew Rybchenko txd, allowed_min_txd, pid); 153199e040d3SLijun Ou return -1; 153299e040d3SLijun Ou } 153399e040d3SLijun Ou return 0; 153499e040d3SLijun Ou } 153599e040d3SLijun Ou 153699e040d3SLijun Ou 153799e040d3SLijun Ou /* 15381c69df45SOri Kam * Get the allowed maximum number of hairpin queues. 15391c69df45SOri Kam * *pid return the port id which has minimal value of 15401c69df45SOri Kam * max_hairpin_queues in all ports. 15411c69df45SOri Kam */ 15421c69df45SOri Kam queueid_t 15431c69df45SOri Kam get_allowed_max_nb_hairpinq(portid_t *pid) 15441c69df45SOri Kam { 15459e6b36c3SDavid Marchand queueid_t allowed_max_hairpinq = RTE_MAX_QUEUES_PER_PORT; 15461c69df45SOri Kam portid_t pi; 15471c69df45SOri Kam struct rte_eth_hairpin_cap cap; 15481c69df45SOri Kam 15491c69df45SOri Kam RTE_ETH_FOREACH_DEV(pi) { 15501c69df45SOri Kam if (rte_eth_dev_hairpin_capability_get(pi, &cap) != 0) { 15511c69df45SOri Kam *pid = pi; 15521c69df45SOri Kam return 0; 15531c69df45SOri Kam } 15541c69df45SOri Kam if (cap.max_nb_queues < allowed_max_hairpinq) { 15551c69df45SOri Kam allowed_max_hairpinq = cap.max_nb_queues; 15561c69df45SOri Kam *pid = pi; 15571c69df45SOri Kam } 15581c69df45SOri Kam } 15591c69df45SOri Kam return allowed_max_hairpinq; 15601c69df45SOri Kam } 15611c69df45SOri Kam 15621c69df45SOri Kam /* 15631c69df45SOri Kam * Check input hairpin is valid or not. 15641c69df45SOri Kam * If input hairpin is not greater than any of maximum number 15651c69df45SOri Kam * of hairpin queues of all ports, it is valid. 15661c69df45SOri Kam * if valid, return 0, else return -1 15671c69df45SOri Kam */ 15681c69df45SOri Kam int 15691c69df45SOri Kam check_nb_hairpinq(queueid_t hairpinq) 15701c69df45SOri Kam { 15711c69df45SOri Kam queueid_t allowed_max_hairpinq; 15721c69df45SOri Kam portid_t pid = 0; 15731c69df45SOri Kam 15741c69df45SOri Kam allowed_max_hairpinq = get_allowed_max_nb_hairpinq(&pid); 15751c69df45SOri Kam if (hairpinq > allowed_max_hairpinq) { 157661a3b0e5SAndrew Rybchenko fprintf(stderr, 157761a3b0e5SAndrew Rybchenko "Fail: input hairpin (%u) can't be greater than max_hairpin_queues (%u) of port %u\n", 15781c69df45SOri Kam hairpinq, allowed_max_hairpinq, pid); 15791c69df45SOri Kam return -1; 15801c69df45SOri Kam } 15811c69df45SOri Kam return 0; 15821c69df45SOri Kam } 15831c69df45SOri Kam 15841bb4a528SFerruh Yigit static int 15851bb4a528SFerruh Yigit get_eth_overhead(struct rte_eth_dev_info *dev_info) 15861bb4a528SFerruh Yigit { 15871bb4a528SFerruh Yigit uint32_t eth_overhead; 15881bb4a528SFerruh Yigit 15891bb4a528SFerruh Yigit if (dev_info->max_mtu != UINT16_MAX && 15901bb4a528SFerruh Yigit dev_info->max_rx_pktlen > dev_info->max_mtu) 15911bb4a528SFerruh Yigit eth_overhead = dev_info->max_rx_pktlen - dev_info->max_mtu; 15921bb4a528SFerruh Yigit else 15931bb4a528SFerruh Yigit eth_overhead = RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN; 15941bb4a528SFerruh Yigit 15951bb4a528SFerruh Yigit return eth_overhead; 15961bb4a528SFerruh Yigit } 15971bb4a528SFerruh Yigit 1598af75078fSIntel static void 1599b6b8a1ebSViacheslav Ovsiienko init_config_port_offloads(portid_t pid, uint32_t socket_id) 1600b6b8a1ebSViacheslav Ovsiienko { 1601b6b8a1ebSViacheslav Ovsiienko struct rte_port *port = &ports[pid]; 1602b6b8a1ebSViacheslav Ovsiienko int ret; 1603b6b8a1ebSViacheslav Ovsiienko int i; 1604b6b8a1ebSViacheslav Ovsiienko 1605f6d8a6d3SIvan Malov eth_rx_metadata_negotiate_mp(pid); 1606f6d8a6d3SIvan Malov 1607b6b8a1ebSViacheslav Ovsiienko port->dev_conf.txmode = tx_mode; 1608b6b8a1ebSViacheslav Ovsiienko port->dev_conf.rxmode = rx_mode; 1609b6b8a1ebSViacheslav Ovsiienko 1610b6b8a1ebSViacheslav Ovsiienko ret = eth_dev_info_get_print_err(pid, &port->dev_info); 1611b6b8a1ebSViacheslav Ovsiienko if (ret != 0) 1612b6b8a1ebSViacheslav Ovsiienko rte_exit(EXIT_FAILURE, "rte_eth_dev_info_get() failed\n"); 1613b6b8a1ebSViacheslav Ovsiienko 1614295968d1SFerruh Yigit if (!(port->dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)) 1615b6b8a1ebSViacheslav Ovsiienko port->dev_conf.txmode.offloads &= 1616295968d1SFerruh Yigit ~RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE; 1617b6b8a1ebSViacheslav Ovsiienko 1618b6b8a1ebSViacheslav Ovsiienko /* Apply Rx offloads configuration */ 1619b6b8a1ebSViacheslav Ovsiienko for (i = 0; i < port->dev_info.max_rx_queues; i++) 16203c4426dbSDmitry Kozlyuk port->rxq[i].conf.offloads = port->dev_conf.rxmode.offloads; 1621b6b8a1ebSViacheslav Ovsiienko /* Apply Tx offloads configuration */ 1622b6b8a1ebSViacheslav Ovsiienko for (i = 0; i < port->dev_info.max_tx_queues; i++) 16233c4426dbSDmitry Kozlyuk port->txq[i].conf.offloads = port->dev_conf.txmode.offloads; 1624b6b8a1ebSViacheslav Ovsiienko 1625b6b8a1ebSViacheslav Ovsiienko if (eth_link_speed) 1626b6b8a1ebSViacheslav Ovsiienko port->dev_conf.link_speeds = eth_link_speed; 1627b6b8a1ebSViacheslav Ovsiienko 16281bb4a528SFerruh Yigit if (max_rx_pkt_len) 16291bb4a528SFerruh Yigit port->dev_conf.rxmode.mtu = max_rx_pkt_len - 16301bb4a528SFerruh Yigit get_eth_overhead(&port->dev_info); 16311bb4a528SFerruh Yigit 1632b6b8a1ebSViacheslav Ovsiienko /* set flag to initialize port/queue */ 1633b6b8a1ebSViacheslav Ovsiienko port->need_reconfig = 1; 1634b6b8a1ebSViacheslav Ovsiienko port->need_reconfig_queues = 1; 1635b6b8a1ebSViacheslav Ovsiienko port->socket_id = socket_id; 1636b6b8a1ebSViacheslav Ovsiienko port->tx_metadata = 0; 1637b6b8a1ebSViacheslav Ovsiienko 1638b6b8a1ebSViacheslav Ovsiienko /* 1639b6b8a1ebSViacheslav Ovsiienko * Check for maximum number of segments per MTU. 1640b6b8a1ebSViacheslav Ovsiienko * Accordingly update the mbuf data size. 1641b6b8a1ebSViacheslav Ovsiienko */ 1642b6b8a1ebSViacheslav Ovsiienko if (port->dev_info.rx_desc_lim.nb_mtu_seg_max != UINT16_MAX && 1643b6b8a1ebSViacheslav Ovsiienko port->dev_info.rx_desc_lim.nb_mtu_seg_max != 0) { 16441bb4a528SFerruh Yigit uint32_t eth_overhead = get_eth_overhead(&port->dev_info); 16451bb4a528SFerruh Yigit uint16_t mtu; 1646b6b8a1ebSViacheslav Ovsiienko 16471bb4a528SFerruh Yigit if (rte_eth_dev_get_mtu(pid, &mtu) == 0) { 16481bb4a528SFerruh Yigit uint16_t data_size = (mtu + eth_overhead) / 16491bb4a528SFerruh Yigit port->dev_info.rx_desc_lim.nb_mtu_seg_max; 16501bb4a528SFerruh Yigit uint16_t buffer_size = data_size + RTE_PKTMBUF_HEADROOM; 16511bb4a528SFerruh Yigit 16521bb4a528SFerruh Yigit if (buffer_size > mbuf_data_size[0]) { 16531bb4a528SFerruh Yigit mbuf_data_size[0] = buffer_size; 1654b6b8a1ebSViacheslav Ovsiienko TESTPMD_LOG(WARNING, 1655b6b8a1ebSViacheslav Ovsiienko "Configured mbuf size of the first segment %hu\n", 1656b6b8a1ebSViacheslav Ovsiienko mbuf_data_size[0]); 1657b6b8a1ebSViacheslav Ovsiienko } 1658b6b8a1ebSViacheslav Ovsiienko } 1659b6b8a1ebSViacheslav Ovsiienko } 16601bb4a528SFerruh Yigit } 1661b6b8a1ebSViacheslav Ovsiienko 1662b6b8a1ebSViacheslav Ovsiienko static void 1663af75078fSIntel init_config(void) 1664af75078fSIntel { 1665ce8d5614SIntel portid_t pid; 1666af75078fSIntel struct rte_mempool *mbp; 1667af75078fSIntel unsigned int nb_mbuf_per_pool; 1668af75078fSIntel lcoreid_t lc_id; 16696970401eSDavid Marchand #ifdef RTE_LIB_GRO 1670b7091f1dSJiayu Hu struct rte_gro_param gro_param; 16716970401eSDavid Marchand #endif 16726970401eSDavid Marchand #ifdef RTE_LIB_GSO 167352f38a20SJiayu Hu uint32_t gso_types; 16746970401eSDavid Marchand #endif 1675487f9a59SYulong Pei 1676af75078fSIntel /* Configuration of logical cores. */ 1677af75078fSIntel fwd_lcores = rte_zmalloc("testpmd: fwd_lcores", 1678af75078fSIntel sizeof(struct fwd_lcore *) * nb_lcores, 1679fdf20fa7SSergio Gonzalez Monroy RTE_CACHE_LINE_SIZE); 1680af75078fSIntel if (fwd_lcores == NULL) { 1681ce8d5614SIntel rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) " 1682ce8d5614SIntel "failed\n", nb_lcores); 1683af75078fSIntel } 1684af75078fSIntel for (lc_id = 0; lc_id < nb_lcores; lc_id++) { 1685af75078fSIntel fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore", 1686af75078fSIntel sizeof(struct fwd_lcore), 1687fdf20fa7SSergio Gonzalez Monroy RTE_CACHE_LINE_SIZE); 1688af75078fSIntel if (fwd_lcores[lc_id] == NULL) { 1689ce8d5614SIntel rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) " 1690ce8d5614SIntel "failed\n"); 1691af75078fSIntel } 1692af75078fSIntel fwd_lcores[lc_id]->cpuid_idx = lc_id; 1693af75078fSIntel } 1694af75078fSIntel 16957d89b261SGaetan Rivet RTE_ETH_FOREACH_DEV(pid) { 1696b6b8a1ebSViacheslav Ovsiienko uint32_t socket_id; 16976f51deb9SIvan Ilchenko 1698b6ea6408SIntel if (numa_support) { 1699b6b8a1ebSViacheslav Ovsiienko socket_id = port_numa[pid]; 1700b6b8a1ebSViacheslav Ovsiienko if (port_numa[pid] == NUMA_NO_CONFIG) { 1701b6b8a1ebSViacheslav Ovsiienko socket_id = rte_eth_dev_socket_id(pid); 170220a0286fSLiu Xiaofeng 170329841336SPhil Yang /* 170429841336SPhil Yang * if socket_id is invalid, 170529841336SPhil Yang * set to the first available socket. 170629841336SPhil Yang */ 170720a0286fSLiu Xiaofeng if (check_socket_id(socket_id) < 0) 170829841336SPhil Yang socket_id = socket_ids[0]; 1709b6ea6408SIntel } 1710b6b8a1ebSViacheslav Ovsiienko } else { 1711b6b8a1ebSViacheslav Ovsiienko socket_id = (socket_num == UMA_NO_CONFIG) ? 1712b6b8a1ebSViacheslav Ovsiienko 0 : socket_num; 1713b6ea6408SIntel } 1714b6b8a1ebSViacheslav Ovsiienko /* Apply default TxRx configuration for all ports */ 1715b6b8a1ebSViacheslav Ovsiienko init_config_port_offloads(pid, socket_id); 1716ce8d5614SIntel } 17173ab64341SOlivier Matz /* 17183ab64341SOlivier Matz * Create pools of mbuf. 17193ab64341SOlivier Matz * If NUMA support is disabled, create a single pool of mbuf in 17203ab64341SOlivier Matz * socket 0 memory by default. 17213ab64341SOlivier Matz * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1. 17223ab64341SOlivier Matz * 17233ab64341SOlivier Matz * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and 17243ab64341SOlivier Matz * nb_txd can be configured at run time. 17253ab64341SOlivier Matz */ 17263ab64341SOlivier Matz if (param_total_num_mbufs) 17273ab64341SOlivier Matz nb_mbuf_per_pool = param_total_num_mbufs; 17283ab64341SOlivier Matz else { 17294ed89049SDavid Marchand nb_mbuf_per_pool = RX_DESC_MAX + 17303ab64341SOlivier Matz (nb_lcores * mb_mempool_cache) + 17314ed89049SDavid Marchand TX_DESC_MAX + MAX_PKT_BURST; 17323ab64341SOlivier Matz nb_mbuf_per_pool *= RTE_MAX_ETHPORTS; 17333ab64341SOlivier Matz } 17343ab64341SOlivier Matz 1735b6ea6408SIntel if (numa_support) { 173626cbb419SViacheslav Ovsiienko uint8_t i, j; 1737ce8d5614SIntel 1738c9cafcc8SShahaf Shuler for (i = 0; i < num_sockets; i++) 173926cbb419SViacheslav Ovsiienko for (j = 0; j < mbuf_data_size_n; j++) 174026cbb419SViacheslav Ovsiienko mempools[i * MAX_SEGS_BUFFER_SPLIT + j] = 174126cbb419SViacheslav Ovsiienko mbuf_pool_create(mbuf_data_size[j], 1742401b744dSShahaf Shuler nb_mbuf_per_pool, 174326cbb419SViacheslav Ovsiienko socket_ids[i], j); 17443ab64341SOlivier Matz } else { 174526cbb419SViacheslav Ovsiienko uint8_t i; 174626cbb419SViacheslav Ovsiienko 174726cbb419SViacheslav Ovsiienko for (i = 0; i < mbuf_data_size_n; i++) 174826cbb419SViacheslav Ovsiienko mempools[i] = mbuf_pool_create 174926cbb419SViacheslav Ovsiienko (mbuf_data_size[i], 1750401b744dSShahaf Shuler nb_mbuf_per_pool, 175126cbb419SViacheslav Ovsiienko socket_num == UMA_NO_CONFIG ? 175226cbb419SViacheslav Ovsiienko 0 : socket_num, i); 17533ab64341SOlivier Matz } 1754b6ea6408SIntel 1755b6ea6408SIntel init_port_config(); 17565886ae07SAdrien Mazarguil 17576970401eSDavid Marchand #ifdef RTE_LIB_GSO 1758295968d1SFerruh Yigit gso_types = RTE_ETH_TX_OFFLOAD_TCP_TSO | RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO | 1759295968d1SFerruh Yigit RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO | RTE_ETH_TX_OFFLOAD_UDP_TSO; 17606970401eSDavid Marchand #endif 17615886ae07SAdrien Mazarguil /* 17625886ae07SAdrien Mazarguil * Records which Mbuf pool to use by each logical core, if needed. 17635886ae07SAdrien Mazarguil */ 17645886ae07SAdrien Mazarguil for (lc_id = 0; lc_id < nb_lcores; lc_id++) { 17658fd8bebcSAdrien Mazarguil mbp = mbuf_pool_find( 176626cbb419SViacheslav Ovsiienko rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]), 0); 17678fd8bebcSAdrien Mazarguil 17685886ae07SAdrien Mazarguil if (mbp == NULL) 176926cbb419SViacheslav Ovsiienko mbp = mbuf_pool_find(0, 0); 17705886ae07SAdrien Mazarguil fwd_lcores[lc_id]->mbp = mbp; 17716970401eSDavid Marchand #ifdef RTE_LIB_GSO 177252f38a20SJiayu Hu /* initialize GSO context */ 177352f38a20SJiayu Hu fwd_lcores[lc_id]->gso_ctx.direct_pool = mbp; 177452f38a20SJiayu Hu fwd_lcores[lc_id]->gso_ctx.indirect_pool = mbp; 177552f38a20SJiayu Hu fwd_lcores[lc_id]->gso_ctx.gso_types = gso_types; 177635b2d13fSOlivier Matz fwd_lcores[lc_id]->gso_ctx.gso_size = RTE_ETHER_MAX_LEN - 177735b2d13fSOlivier Matz RTE_ETHER_CRC_LEN; 177852f38a20SJiayu Hu fwd_lcores[lc_id]->gso_ctx.flag = 0; 17796970401eSDavid Marchand #endif 17805886ae07SAdrien Mazarguil } 17815886ae07SAdrien Mazarguil 17820c0db76fSBernard Iremonger fwd_config_setup(); 1783b7091f1dSJiayu Hu 17846970401eSDavid Marchand #ifdef RTE_LIB_GRO 1785b7091f1dSJiayu Hu /* create a gro context for each lcore */ 1786b7091f1dSJiayu Hu gro_param.gro_types = RTE_GRO_TCP_IPV4; 1787b7091f1dSJiayu Hu gro_param.max_flow_num = GRO_MAX_FLUSH_CYCLES; 1788b7091f1dSJiayu Hu gro_param.max_item_per_flow = MAX_PKT_BURST; 1789b7091f1dSJiayu Hu for (lc_id = 0; lc_id < nb_lcores; lc_id++) { 1790b7091f1dSJiayu Hu gro_param.socket_id = rte_lcore_to_socket_id( 1791b7091f1dSJiayu Hu fwd_lcores_cpuids[lc_id]); 1792b7091f1dSJiayu Hu fwd_lcores[lc_id]->gro_ctx = rte_gro_ctx_create(&gro_param); 1793b7091f1dSJiayu Hu if (fwd_lcores[lc_id]->gro_ctx == NULL) { 1794b7091f1dSJiayu Hu rte_exit(EXIT_FAILURE, 1795b7091f1dSJiayu Hu "rte_gro_ctx_create() failed\n"); 1796b7091f1dSJiayu Hu } 1797b7091f1dSJiayu Hu } 17986970401eSDavid Marchand #endif 1799ce8d5614SIntel } 1800ce8d5614SIntel 18012950a769SDeclan Doherty 18022950a769SDeclan Doherty void 1803a21d5a4bSDeclan Doherty reconfig(portid_t new_port_id, unsigned socket_id) 18042950a769SDeclan Doherty { 18052950a769SDeclan Doherty /* Reconfiguration of Ethernet ports. */ 1806b6b8a1ebSViacheslav Ovsiienko init_config_port_offloads(new_port_id, socket_id); 18072950a769SDeclan Doherty init_port_config(); 18082950a769SDeclan Doherty } 18092950a769SDeclan Doherty 1810ce8d5614SIntel int 1811ce8d5614SIntel init_fwd_streams(void) 1812ce8d5614SIntel { 1813ce8d5614SIntel portid_t pid; 1814ce8d5614SIntel struct rte_port *port; 1815ce8d5614SIntel streamid_t sm_id, nb_fwd_streams_new; 18165a8fb55cSReshma Pattan queueid_t q; 1817ce8d5614SIntel 1818ce8d5614SIntel /* set socket id according to numa or not */ 18197d89b261SGaetan Rivet RTE_ETH_FOREACH_DEV(pid) { 1820ce8d5614SIntel port = &ports[pid]; 1821ce8d5614SIntel if (nb_rxq > port->dev_info.max_rx_queues) { 182261a3b0e5SAndrew Rybchenko fprintf(stderr, 182361a3b0e5SAndrew Rybchenko "Fail: nb_rxq(%d) is greater than max_rx_queues(%d)\n", 182461a3b0e5SAndrew Rybchenko nb_rxq, port->dev_info.max_rx_queues); 1825ce8d5614SIntel return -1; 1826ce8d5614SIntel } 1827ce8d5614SIntel if (nb_txq > port->dev_info.max_tx_queues) { 182861a3b0e5SAndrew Rybchenko fprintf(stderr, 182961a3b0e5SAndrew Rybchenko "Fail: nb_txq(%d) is greater than max_tx_queues(%d)\n", 183061a3b0e5SAndrew Rybchenko nb_txq, port->dev_info.max_tx_queues); 1831ce8d5614SIntel return -1; 1832ce8d5614SIntel } 183320a0286fSLiu Xiaofeng if (numa_support) { 183420a0286fSLiu Xiaofeng if (port_numa[pid] != NUMA_NO_CONFIG) 183520a0286fSLiu Xiaofeng port->socket_id = port_numa[pid]; 183620a0286fSLiu Xiaofeng else { 1837b6ea6408SIntel port->socket_id = rte_eth_dev_socket_id(pid); 183820a0286fSLiu Xiaofeng 183929841336SPhil Yang /* 184029841336SPhil Yang * if socket_id is invalid, 184129841336SPhil Yang * set to the first available socket. 184229841336SPhil Yang */ 184320a0286fSLiu Xiaofeng if (check_socket_id(port->socket_id) < 0) 184429841336SPhil Yang port->socket_id = socket_ids[0]; 184520a0286fSLiu Xiaofeng } 184620a0286fSLiu Xiaofeng } 1847b6ea6408SIntel else { 1848b6ea6408SIntel if (socket_num == UMA_NO_CONFIG) 1849af75078fSIntel port->socket_id = 0; 1850b6ea6408SIntel else 1851b6ea6408SIntel port->socket_id = socket_num; 1852b6ea6408SIntel } 1853af75078fSIntel } 1854af75078fSIntel 18555a8fb55cSReshma Pattan q = RTE_MAX(nb_rxq, nb_txq); 18565a8fb55cSReshma Pattan if (q == 0) { 185761a3b0e5SAndrew Rybchenko fprintf(stderr, 185861a3b0e5SAndrew Rybchenko "Fail: Cannot allocate fwd streams as number of queues is 0\n"); 18595a8fb55cSReshma Pattan return -1; 18605a8fb55cSReshma Pattan } 18615a8fb55cSReshma Pattan nb_fwd_streams_new = (streamid_t)(nb_ports * q); 1862ce8d5614SIntel if (nb_fwd_streams_new == nb_fwd_streams) 1863ce8d5614SIntel return 0; 1864ce8d5614SIntel /* clear the old */ 1865ce8d5614SIntel if (fwd_streams != NULL) { 1866ce8d5614SIntel for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) { 1867ce8d5614SIntel if (fwd_streams[sm_id] == NULL) 1868ce8d5614SIntel continue; 1869ce8d5614SIntel rte_free(fwd_streams[sm_id]); 1870ce8d5614SIntel fwd_streams[sm_id] = NULL; 1871af75078fSIntel } 1872ce8d5614SIntel rte_free(fwd_streams); 1873ce8d5614SIntel fwd_streams = NULL; 1874ce8d5614SIntel } 1875ce8d5614SIntel 1876ce8d5614SIntel /* init new */ 1877ce8d5614SIntel nb_fwd_streams = nb_fwd_streams_new; 18781f84c469SMatan Azrad if (nb_fwd_streams) { 1879ce8d5614SIntel fwd_streams = rte_zmalloc("testpmd: fwd_streams", 18801f84c469SMatan Azrad sizeof(struct fwd_stream *) * nb_fwd_streams, 18811f84c469SMatan Azrad RTE_CACHE_LINE_SIZE); 1882ce8d5614SIntel if (fwd_streams == NULL) 18831f84c469SMatan Azrad rte_exit(EXIT_FAILURE, "rte_zmalloc(%d" 18841f84c469SMatan Azrad " (struct fwd_stream *)) failed\n", 18851f84c469SMatan Azrad nb_fwd_streams); 1886ce8d5614SIntel 1887af75078fSIntel for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) { 18881f84c469SMatan Azrad fwd_streams[sm_id] = rte_zmalloc("testpmd:" 18891f84c469SMatan Azrad " struct fwd_stream", sizeof(struct fwd_stream), 18901f84c469SMatan Azrad RTE_CACHE_LINE_SIZE); 1891ce8d5614SIntel if (fwd_streams[sm_id] == NULL) 18921f84c469SMatan Azrad rte_exit(EXIT_FAILURE, "rte_zmalloc" 18931f84c469SMatan Azrad "(struct fwd_stream) failed\n"); 18941f84c469SMatan Azrad } 1895af75078fSIntel } 1896ce8d5614SIntel 1897ce8d5614SIntel return 0; 1898af75078fSIntel } 1899af75078fSIntel 1900af75078fSIntel static void 1901af75078fSIntel pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs) 1902af75078fSIntel { 19037569b8c1SHonnappa Nagarahalli uint64_t total_burst, sburst; 190485de481aSHonnappa Nagarahalli uint64_t nb_burst; 19057569b8c1SHonnappa Nagarahalli uint64_t burst_stats[4]; 19067569b8c1SHonnappa Nagarahalli uint16_t pktnb_stats[4]; 1907af75078fSIntel uint16_t nb_pkt; 19087569b8c1SHonnappa Nagarahalli int burst_percent[4], sburstp; 19097569b8c1SHonnappa Nagarahalli int i; 1910af75078fSIntel 1911af75078fSIntel /* 1912af75078fSIntel * First compute the total number of packet bursts and the 1913af75078fSIntel * two highest numbers of bursts of the same number of packets. 1914af75078fSIntel */ 19157569b8c1SHonnappa Nagarahalli memset(&burst_stats, 0x0, sizeof(burst_stats)); 19167569b8c1SHonnappa Nagarahalli memset(&pktnb_stats, 0x0, sizeof(pktnb_stats)); 19177569b8c1SHonnappa Nagarahalli 19187569b8c1SHonnappa Nagarahalli /* Show stats for 0 burst size always */ 19197569b8c1SHonnappa Nagarahalli total_burst = pbs->pkt_burst_spread[0]; 19207569b8c1SHonnappa Nagarahalli burst_stats[0] = pbs->pkt_burst_spread[0]; 19217569b8c1SHonnappa Nagarahalli pktnb_stats[0] = 0; 19227569b8c1SHonnappa Nagarahalli 19237569b8c1SHonnappa Nagarahalli /* Find the next 2 burst sizes with highest occurrences. */ 19246a8b64fdSEli Britstein for (nb_pkt = 1; nb_pkt < MAX_PKT_BURST + 1; nb_pkt++) { 1925af75078fSIntel nb_burst = pbs->pkt_burst_spread[nb_pkt]; 19267569b8c1SHonnappa Nagarahalli 1927af75078fSIntel if (nb_burst == 0) 1928af75078fSIntel continue; 19297569b8c1SHonnappa Nagarahalli 1930af75078fSIntel total_burst += nb_burst; 19317569b8c1SHonnappa Nagarahalli 19327569b8c1SHonnappa Nagarahalli if (nb_burst > burst_stats[1]) { 19337569b8c1SHonnappa Nagarahalli burst_stats[2] = burst_stats[1]; 19347569b8c1SHonnappa Nagarahalli pktnb_stats[2] = pktnb_stats[1]; 1935fe613657SDaniel Shelepov burst_stats[1] = nb_burst; 1936fe613657SDaniel Shelepov pktnb_stats[1] = nb_pkt; 19377569b8c1SHonnappa Nagarahalli } else if (nb_burst > burst_stats[2]) { 19387569b8c1SHonnappa Nagarahalli burst_stats[2] = nb_burst; 19397569b8c1SHonnappa Nagarahalli pktnb_stats[2] = nb_pkt; 1940af75078fSIntel } 1941af75078fSIntel } 1942af75078fSIntel if (total_burst == 0) 1943af75078fSIntel return; 19447569b8c1SHonnappa Nagarahalli 19457569b8c1SHonnappa Nagarahalli printf(" %s-bursts : %"PRIu64" [", rx_tx, total_burst); 19467569b8c1SHonnappa Nagarahalli for (i = 0, sburst = 0, sburstp = 0; i < 4; i++) { 19477569b8c1SHonnappa Nagarahalli if (i == 3) { 19487569b8c1SHonnappa Nagarahalli printf("%d%% of other]\n", 100 - sburstp); 1949af75078fSIntel return; 1950af75078fSIntel } 19517569b8c1SHonnappa Nagarahalli 19527569b8c1SHonnappa Nagarahalli sburst += burst_stats[i]; 19537569b8c1SHonnappa Nagarahalli if (sburst == total_burst) { 19547569b8c1SHonnappa Nagarahalli printf("%d%% of %d pkts]\n", 19557569b8c1SHonnappa Nagarahalli 100 - sburstp, (int) pktnb_stats[i]); 1956af75078fSIntel return; 1957af75078fSIntel } 19587569b8c1SHonnappa Nagarahalli 19597569b8c1SHonnappa Nagarahalli burst_percent[i] = 19607569b8c1SHonnappa Nagarahalli (double)burst_stats[i] / total_burst * 100; 19617569b8c1SHonnappa Nagarahalli printf("%d%% of %d pkts + ", 19627569b8c1SHonnappa Nagarahalli burst_percent[i], (int) pktnb_stats[i]); 19637569b8c1SHonnappa Nagarahalli sburstp += burst_percent[i]; 1964af75078fSIntel } 1965af75078fSIntel } 1966af75078fSIntel 1967af75078fSIntel static void 1968af75078fSIntel fwd_stream_stats_display(streamid_t stream_id) 1969af75078fSIntel { 1970af75078fSIntel struct fwd_stream *fs; 1971af75078fSIntel static const char *fwd_top_stats_border = "-------"; 1972af75078fSIntel 1973af75078fSIntel fs = fwd_streams[stream_id]; 1974af75078fSIntel if ((fs->rx_packets == 0) && (fs->tx_packets == 0) && 1975af75078fSIntel (fs->fwd_dropped == 0)) 1976af75078fSIntel return; 1977af75078fSIntel printf("\n %s Forward Stats for RX Port=%2d/Queue=%2d -> " 1978af75078fSIntel "TX Port=%2d/Queue=%2d %s\n", 1979af75078fSIntel fwd_top_stats_border, fs->rx_port, fs->rx_queue, 1980af75078fSIntel fs->tx_port, fs->tx_queue, fwd_top_stats_border); 1981c185d42cSDavid Marchand printf(" RX-packets: %-14"PRIu64" TX-packets: %-14"PRIu64 1982c185d42cSDavid Marchand " TX-dropped: %-14"PRIu64, 1983af75078fSIntel fs->rx_packets, fs->tx_packets, fs->fwd_dropped); 1984af75078fSIntel 1985af75078fSIntel /* if checksum mode */ 1986af75078fSIntel if (cur_fwd_eng == &csum_fwd_engine) { 1987c185d42cSDavid Marchand printf(" RX- bad IP checksum: %-14"PRIu64 1988c185d42cSDavid Marchand " Rx- bad L4 checksum: %-14"PRIu64 1989c185d42cSDavid Marchand " Rx- bad outer L4 checksum: %-14"PRIu64"\n", 199058d475b7SJerin Jacob fs->rx_bad_ip_csum, fs->rx_bad_l4_csum, 199158d475b7SJerin Jacob fs->rx_bad_outer_l4_csum); 1992d139cf23SLance Richardson printf(" RX- bad outer IP checksum: %-14"PRIu64"\n", 1993d139cf23SLance Richardson fs->rx_bad_outer_ip_csum); 199494d65546SDavid Marchand } else { 199594d65546SDavid Marchand printf("\n"); 1996af75078fSIntel } 1997af75078fSIntel 19980e4b1963SDharmik Thakkar if (record_burst_stats) { 1999af75078fSIntel pkt_burst_stats_display("RX", &fs->rx_burst_stats); 2000af75078fSIntel pkt_burst_stats_display("TX", &fs->tx_burst_stats); 20010e4b1963SDharmik Thakkar } 2002af75078fSIntel } 2003af75078fSIntel 200453324971SDavid Marchand void 200553324971SDavid Marchand fwd_stats_display(void) 200653324971SDavid Marchand { 200753324971SDavid Marchand static const char *fwd_stats_border = "----------------------"; 200853324971SDavid Marchand static const char *acc_stats_border = "+++++++++++++++"; 200953324971SDavid Marchand struct { 201053324971SDavid Marchand struct fwd_stream *rx_stream; 201153324971SDavid Marchand struct fwd_stream *tx_stream; 201253324971SDavid Marchand uint64_t tx_dropped; 201353324971SDavid Marchand uint64_t rx_bad_ip_csum; 201453324971SDavid Marchand uint64_t rx_bad_l4_csum; 201553324971SDavid Marchand uint64_t rx_bad_outer_l4_csum; 2016d139cf23SLance Richardson uint64_t rx_bad_outer_ip_csum; 201753324971SDavid Marchand } ports_stats[RTE_MAX_ETHPORTS]; 201853324971SDavid Marchand uint64_t total_rx_dropped = 0; 201953324971SDavid Marchand uint64_t total_tx_dropped = 0; 202053324971SDavid Marchand uint64_t total_rx_nombuf = 0; 202153324971SDavid Marchand struct rte_eth_stats stats; 202253324971SDavid Marchand uint64_t fwd_cycles = 0; 202353324971SDavid Marchand uint64_t total_recv = 0; 202453324971SDavid Marchand uint64_t total_xmit = 0; 202553324971SDavid Marchand struct rte_port *port; 202653324971SDavid Marchand streamid_t sm_id; 202753324971SDavid Marchand portid_t pt_id; 2028baef6bbfSMin Hu (Connor) int ret; 202953324971SDavid Marchand int i; 203053324971SDavid Marchand 203153324971SDavid Marchand memset(ports_stats, 0, sizeof(ports_stats)); 203253324971SDavid Marchand 203353324971SDavid Marchand for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) { 203453324971SDavid Marchand struct fwd_stream *fs = fwd_streams[sm_id]; 203553324971SDavid Marchand 203653324971SDavid Marchand if (cur_fwd_config.nb_fwd_streams > 203753324971SDavid Marchand cur_fwd_config.nb_fwd_ports) { 203853324971SDavid Marchand fwd_stream_stats_display(sm_id); 203953324971SDavid Marchand } else { 204053324971SDavid Marchand ports_stats[fs->tx_port].tx_stream = fs; 204153324971SDavid Marchand ports_stats[fs->rx_port].rx_stream = fs; 204253324971SDavid Marchand } 204353324971SDavid Marchand 204453324971SDavid Marchand ports_stats[fs->tx_port].tx_dropped += fs->fwd_dropped; 204553324971SDavid Marchand 204653324971SDavid Marchand ports_stats[fs->rx_port].rx_bad_ip_csum += fs->rx_bad_ip_csum; 204753324971SDavid Marchand ports_stats[fs->rx_port].rx_bad_l4_csum += fs->rx_bad_l4_csum; 204853324971SDavid Marchand ports_stats[fs->rx_port].rx_bad_outer_l4_csum += 204953324971SDavid Marchand fs->rx_bad_outer_l4_csum; 2050d139cf23SLance Richardson ports_stats[fs->rx_port].rx_bad_outer_ip_csum += 2051d139cf23SLance Richardson fs->rx_bad_outer_ip_csum; 205253324971SDavid Marchand 2053bc700b67SDharmik Thakkar if (record_core_cycles) 205453324971SDavid Marchand fwd_cycles += fs->core_cycles; 205553324971SDavid Marchand } 205653324971SDavid Marchand for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) { 205753324971SDavid Marchand pt_id = fwd_ports_ids[i]; 205853324971SDavid Marchand port = &ports[pt_id]; 205953324971SDavid Marchand 2060baef6bbfSMin Hu (Connor) ret = rte_eth_stats_get(pt_id, &stats); 2061baef6bbfSMin Hu (Connor) if (ret != 0) { 2062baef6bbfSMin Hu (Connor) fprintf(stderr, 2063baef6bbfSMin Hu (Connor) "%s: Error: failed to get stats (port %u): %d", 2064baef6bbfSMin Hu (Connor) __func__, pt_id, ret); 2065baef6bbfSMin Hu (Connor) continue; 2066baef6bbfSMin Hu (Connor) } 206753324971SDavid Marchand stats.ipackets -= port->stats.ipackets; 206853324971SDavid Marchand stats.opackets -= port->stats.opackets; 206953324971SDavid Marchand stats.ibytes -= port->stats.ibytes; 207053324971SDavid Marchand stats.obytes -= port->stats.obytes; 207153324971SDavid Marchand stats.imissed -= port->stats.imissed; 207253324971SDavid Marchand stats.oerrors -= port->stats.oerrors; 207353324971SDavid Marchand stats.rx_nombuf -= port->stats.rx_nombuf; 207453324971SDavid Marchand 207553324971SDavid Marchand total_recv += stats.ipackets; 207653324971SDavid Marchand total_xmit += stats.opackets; 207753324971SDavid Marchand total_rx_dropped += stats.imissed; 207853324971SDavid Marchand total_tx_dropped += ports_stats[pt_id].tx_dropped; 207953324971SDavid Marchand total_tx_dropped += stats.oerrors; 208053324971SDavid Marchand total_rx_nombuf += stats.rx_nombuf; 208153324971SDavid Marchand 208253324971SDavid Marchand printf("\n %s Forward statistics for port %-2d %s\n", 208353324971SDavid Marchand fwd_stats_border, pt_id, fwd_stats_border); 208453324971SDavid Marchand 208508dcd187SHuisong Li printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64 208608dcd187SHuisong Li "RX-total: %-"PRIu64"\n", stats.ipackets, stats.imissed, 208753324971SDavid Marchand stats.ipackets + stats.imissed); 208853324971SDavid Marchand 2089d139cf23SLance Richardson if (cur_fwd_eng == &csum_fwd_engine) { 209053324971SDavid Marchand printf(" Bad-ipcsum: %-14"PRIu64 209153324971SDavid Marchand " Bad-l4csum: %-14"PRIu64 209253324971SDavid Marchand "Bad-outer-l4csum: %-14"PRIu64"\n", 209353324971SDavid Marchand ports_stats[pt_id].rx_bad_ip_csum, 209453324971SDavid Marchand ports_stats[pt_id].rx_bad_l4_csum, 209553324971SDavid Marchand ports_stats[pt_id].rx_bad_outer_l4_csum); 2096d139cf23SLance Richardson printf(" Bad-outer-ipcsum: %-14"PRIu64"\n", 2097d139cf23SLance Richardson ports_stats[pt_id].rx_bad_outer_ip_csum); 2098d139cf23SLance Richardson } 209953324971SDavid Marchand if (stats.ierrors + stats.rx_nombuf > 0) { 210008dcd187SHuisong Li printf(" RX-error: %-"PRIu64"\n", stats.ierrors); 210108dcd187SHuisong Li printf(" RX-nombufs: %-14"PRIu64"\n", stats.rx_nombuf); 210253324971SDavid Marchand } 210353324971SDavid Marchand 210408dcd187SHuisong Li printf(" TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64 210553324971SDavid Marchand "TX-total: %-"PRIu64"\n", 210653324971SDavid Marchand stats.opackets, ports_stats[pt_id].tx_dropped, 210753324971SDavid Marchand stats.opackets + ports_stats[pt_id].tx_dropped); 210853324971SDavid Marchand 21090e4b1963SDharmik Thakkar if (record_burst_stats) { 211053324971SDavid Marchand if (ports_stats[pt_id].rx_stream) 211153324971SDavid Marchand pkt_burst_stats_display("RX", 211253324971SDavid Marchand &ports_stats[pt_id].rx_stream->rx_burst_stats); 211353324971SDavid Marchand if (ports_stats[pt_id].tx_stream) 211453324971SDavid Marchand pkt_burst_stats_display("TX", 211553324971SDavid Marchand &ports_stats[pt_id].tx_stream->tx_burst_stats); 21160e4b1963SDharmik Thakkar } 211753324971SDavid Marchand 211853324971SDavid Marchand printf(" %s--------------------------------%s\n", 211953324971SDavid Marchand fwd_stats_border, fwd_stats_border); 212053324971SDavid Marchand } 212153324971SDavid Marchand 212253324971SDavid Marchand printf("\n %s Accumulated forward statistics for all ports" 212353324971SDavid Marchand "%s\n", 212453324971SDavid Marchand acc_stats_border, acc_stats_border); 212553324971SDavid Marchand printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: " 212653324971SDavid Marchand "%-"PRIu64"\n" 212753324971SDavid Marchand " TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: " 212853324971SDavid Marchand "%-"PRIu64"\n", 212953324971SDavid Marchand total_recv, total_rx_dropped, total_recv + total_rx_dropped, 213053324971SDavid Marchand total_xmit, total_tx_dropped, total_xmit + total_tx_dropped); 213153324971SDavid Marchand if (total_rx_nombuf > 0) 213253324971SDavid Marchand printf(" RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf); 213353324971SDavid Marchand printf(" %s++++++++++++++++++++++++++++++++++++++++++++++" 213453324971SDavid Marchand "%s\n", 213553324971SDavid Marchand acc_stats_border, acc_stats_border); 2136bc700b67SDharmik Thakkar if (record_core_cycles) { 21374c0497b1SDharmik Thakkar #define CYC_PER_MHZ 1E6 21383a164e00SPhil Yang if (total_recv > 0 || total_xmit > 0) { 21393a164e00SPhil Yang uint64_t total_pkts = 0; 21403a164e00SPhil Yang if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 || 21413a164e00SPhil Yang strcmp(cur_fwd_eng->fwd_mode_name, "flowgen") == 0) 21423a164e00SPhil Yang total_pkts = total_xmit; 21433a164e00SPhil Yang else 21443a164e00SPhil Yang total_pkts = total_recv; 21453a164e00SPhil Yang 21461920832aSDharmik Thakkar printf("\n CPU cycles/packet=%.2F (total cycles=" 21473a164e00SPhil Yang "%"PRIu64" / total %s packets=%"PRIu64") at %"PRIu64 21484c0497b1SDharmik Thakkar " MHz Clock\n", 21493a164e00SPhil Yang (double) fwd_cycles / total_pkts, 21503a164e00SPhil Yang fwd_cycles, cur_fwd_eng->fwd_mode_name, total_pkts, 21514c0497b1SDharmik Thakkar (uint64_t)(rte_get_tsc_hz() / CYC_PER_MHZ)); 21523a164e00SPhil Yang } 2153bc700b67SDharmik Thakkar } 215453324971SDavid Marchand } 215553324971SDavid Marchand 215653324971SDavid Marchand void 215753324971SDavid Marchand fwd_stats_reset(void) 215853324971SDavid Marchand { 215953324971SDavid Marchand streamid_t sm_id; 216053324971SDavid Marchand portid_t pt_id; 2161baef6bbfSMin Hu (Connor) int ret; 216253324971SDavid Marchand int i; 216353324971SDavid Marchand 216453324971SDavid Marchand for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) { 216553324971SDavid Marchand pt_id = fwd_ports_ids[i]; 2166baef6bbfSMin Hu (Connor) ret = rte_eth_stats_get(pt_id, &ports[pt_id].stats); 2167baef6bbfSMin Hu (Connor) if (ret != 0) 2168baef6bbfSMin Hu (Connor) fprintf(stderr, 2169baef6bbfSMin Hu (Connor) "%s: Error: failed to clear stats (port %u):%d", 2170baef6bbfSMin Hu (Connor) __func__, pt_id, ret); 217153324971SDavid Marchand } 217253324971SDavid Marchand for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) { 217353324971SDavid Marchand struct fwd_stream *fs = fwd_streams[sm_id]; 217453324971SDavid Marchand 217553324971SDavid Marchand fs->rx_packets = 0; 217653324971SDavid Marchand fs->tx_packets = 0; 217753324971SDavid Marchand fs->fwd_dropped = 0; 217853324971SDavid Marchand fs->rx_bad_ip_csum = 0; 217953324971SDavid Marchand fs->rx_bad_l4_csum = 0; 218053324971SDavid Marchand fs->rx_bad_outer_l4_csum = 0; 2181d139cf23SLance Richardson fs->rx_bad_outer_ip_csum = 0; 218253324971SDavid Marchand 218353324971SDavid Marchand memset(&fs->rx_burst_stats, 0, sizeof(fs->rx_burst_stats)); 218453324971SDavid Marchand memset(&fs->tx_burst_stats, 0, sizeof(fs->tx_burst_stats)); 218553324971SDavid Marchand fs->core_cycles = 0; 218653324971SDavid Marchand } 218753324971SDavid Marchand } 218853324971SDavid Marchand 2189af75078fSIntel static void 21907741e4cfSIntel flush_fwd_rx_queues(void) 2191af75078fSIntel { 2192af75078fSIntel struct rte_mbuf *pkts_burst[MAX_PKT_BURST]; 2193af75078fSIntel portid_t rxp; 21947741e4cfSIntel portid_t port_id; 2195af75078fSIntel queueid_t rxq; 2196af75078fSIntel uint16_t nb_rx; 2197af75078fSIntel uint16_t i; 2198af75078fSIntel uint8_t j; 2199f487715fSReshma Pattan uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0; 2200594302c7SJames Poole uint64_t timer_period; 2201f487715fSReshma Pattan 2202a550baf2SMin Hu (Connor) if (num_procs > 1) { 2203a550baf2SMin Hu (Connor) printf("multi-process not support for flushing fwd Rx queues, skip the below lines and return.\n"); 2204a550baf2SMin Hu (Connor) return; 2205a550baf2SMin Hu (Connor) } 2206a550baf2SMin Hu (Connor) 2207f487715fSReshma Pattan /* convert to number of cycles */ 2208594302c7SJames Poole timer_period = rte_get_timer_hz(); /* 1 second timeout */ 2209af75078fSIntel 2210af75078fSIntel for (j = 0; j < 2; j++) { 22117741e4cfSIntel for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) { 2212af75078fSIntel for (rxq = 0; rxq < nb_rxq; rxq++) { 22137741e4cfSIntel port_id = fwd_ports_ids[rxp]; 22143c4426dbSDmitry Kozlyuk 22153c4426dbSDmitry Kozlyuk /* Polling stopped queues is prohibited. */ 22163c4426dbSDmitry Kozlyuk if (ports[port_id].rxq[rxq].state == 22173c4426dbSDmitry Kozlyuk RTE_ETH_QUEUE_STATE_STOPPED) 22183c4426dbSDmitry Kozlyuk continue; 22193c4426dbSDmitry Kozlyuk 2220f487715fSReshma Pattan /** 2221f487715fSReshma Pattan * testpmd can stuck in the below do while loop 2222f487715fSReshma Pattan * if rte_eth_rx_burst() always returns nonzero 2223f487715fSReshma Pattan * packets. So timer is added to exit this loop 2224f487715fSReshma Pattan * after 1sec timer expiry. 2225f487715fSReshma Pattan */ 2226f487715fSReshma Pattan prev_tsc = rte_rdtsc(); 2227af75078fSIntel do { 22287741e4cfSIntel nb_rx = rte_eth_rx_burst(port_id, rxq, 2229013af9b6SIntel pkts_burst, MAX_PKT_BURST); 2230af75078fSIntel for (i = 0; i < nb_rx; i++) 2231af75078fSIntel rte_pktmbuf_free(pkts_burst[i]); 2232f487715fSReshma Pattan 2233f487715fSReshma Pattan cur_tsc = rte_rdtsc(); 2234f487715fSReshma Pattan diff_tsc = cur_tsc - prev_tsc; 2235f487715fSReshma Pattan timer_tsc += diff_tsc; 2236f487715fSReshma Pattan } while ((nb_rx > 0) && 2237f487715fSReshma Pattan (timer_tsc < timer_period)); 2238f487715fSReshma Pattan timer_tsc = 0; 2239af75078fSIntel } 2240af75078fSIntel } 2241af75078fSIntel rte_delay_ms(10); /* wait 10 milli-seconds before retrying */ 2242af75078fSIntel } 2243af75078fSIntel } 2244af75078fSIntel 2245af75078fSIntel static void 2246af75078fSIntel run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd) 2247af75078fSIntel { 2248af75078fSIntel struct fwd_stream **fsm; 2249af75078fSIntel streamid_t nb_fs; 2250af75078fSIntel streamid_t sm_id; 2251a8d0d473SBruce Richardson #ifdef RTE_LIB_BITRATESTATS 22527e4441c8SRemy Horton uint64_t tics_per_1sec; 22537e4441c8SRemy Horton uint64_t tics_datum; 22547e4441c8SRemy Horton uint64_t tics_current; 22554918a357SXiaoyun Li uint16_t i, cnt_ports; 2256af75078fSIntel 22574918a357SXiaoyun Li cnt_ports = nb_ports; 22587e4441c8SRemy Horton tics_datum = rte_rdtsc(); 22597e4441c8SRemy Horton tics_per_1sec = rte_get_timer_hz(); 22607e4441c8SRemy Horton #endif 2261af75078fSIntel fsm = &fwd_streams[fc->stream_idx]; 2262af75078fSIntel nb_fs = fc->stream_nb; 2263af75078fSIntel do { 2264af75078fSIntel for (sm_id = 0; sm_id < nb_fs; sm_id++) 22653c4426dbSDmitry Kozlyuk if (!fsm[sm_id]->disabled) 2266af75078fSIntel (*pkt_fwd)(fsm[sm_id]); 2267a8d0d473SBruce Richardson #ifdef RTE_LIB_BITRATESTATS 2268e25e6c70SRemy Horton if (bitrate_enabled != 0 && 2269e25e6c70SRemy Horton bitrate_lcore_id == rte_lcore_id()) { 22707e4441c8SRemy Horton tics_current = rte_rdtsc(); 22717e4441c8SRemy Horton if (tics_current - tics_datum >= tics_per_1sec) { 22727e4441c8SRemy Horton /* Periodic bitrate calculation */ 22734918a357SXiaoyun Li for (i = 0; i < cnt_ports; i++) 2274e25e6c70SRemy Horton rte_stats_bitrate_calc(bitrate_data, 22754918a357SXiaoyun Li ports_ids[i]); 22767e4441c8SRemy Horton tics_datum = tics_current; 22777e4441c8SRemy Horton } 2278e25e6c70SRemy Horton } 22797e4441c8SRemy Horton #endif 2280a8d0d473SBruce Richardson #ifdef RTE_LIB_LATENCYSTATS 228165eb1e54SPablo de Lara if (latencystats_enabled != 0 && 228265eb1e54SPablo de Lara latencystats_lcore_id == rte_lcore_id()) 228362d3216dSReshma Pattan rte_latencystats_update(); 228462d3216dSReshma Pattan #endif 228562d3216dSReshma Pattan 2286af75078fSIntel } while (! fc->stopped); 2287af75078fSIntel } 2288af75078fSIntel 2289af75078fSIntel static int 2290af75078fSIntel start_pkt_forward_on_core(void *fwd_arg) 2291af75078fSIntel { 2292af75078fSIntel run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg, 2293af75078fSIntel cur_fwd_config.fwd_eng->packet_fwd); 2294af75078fSIntel return 0; 2295af75078fSIntel } 2296af75078fSIntel 2297af75078fSIntel /* 2298af75078fSIntel * Run the TXONLY packet forwarding engine to send a single burst of packets. 2299af75078fSIntel * Used to start communication flows in network loopback test configurations. 2300af75078fSIntel */ 2301af75078fSIntel static int 2302af75078fSIntel run_one_txonly_burst_on_core(void *fwd_arg) 2303af75078fSIntel { 2304af75078fSIntel struct fwd_lcore *fwd_lc; 2305af75078fSIntel struct fwd_lcore tmp_lcore; 2306af75078fSIntel 2307af75078fSIntel fwd_lc = (struct fwd_lcore *) fwd_arg; 2308af75078fSIntel tmp_lcore = *fwd_lc; 2309af75078fSIntel tmp_lcore.stopped = 1; 2310af75078fSIntel run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd); 2311af75078fSIntel return 0; 2312af75078fSIntel } 2313af75078fSIntel 2314af75078fSIntel /* 2315af75078fSIntel * Launch packet forwarding: 2316af75078fSIntel * - Setup per-port forwarding context. 2317af75078fSIntel * - launch logical cores with their forwarding configuration. 2318af75078fSIntel */ 2319af75078fSIntel static void 2320af75078fSIntel launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore) 2321af75078fSIntel { 2322af75078fSIntel unsigned int i; 2323af75078fSIntel unsigned int lc_id; 2324af75078fSIntel int diag; 2325af75078fSIntel 2326af75078fSIntel for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) { 2327af75078fSIntel lc_id = fwd_lcores_cpuids[i]; 2328af75078fSIntel if ((interactive == 0) || (lc_id != rte_lcore_id())) { 2329af75078fSIntel fwd_lcores[i]->stopped = 0; 2330af75078fSIntel diag = rte_eal_remote_launch(pkt_fwd_on_lcore, 2331af75078fSIntel fwd_lcores[i], lc_id); 2332af75078fSIntel if (diag != 0) 233361a3b0e5SAndrew Rybchenko fprintf(stderr, 233461a3b0e5SAndrew Rybchenko "launch lcore %u failed - diag=%d\n", 2335af75078fSIntel lc_id, diag); 2336af75078fSIntel } 2337af75078fSIntel } 2338af75078fSIntel } 2339af75078fSIntel 2340af75078fSIntel /* 2341af75078fSIntel * Launch packet forwarding configuration. 2342af75078fSIntel */ 2343af75078fSIntel void 2344af75078fSIntel start_packet_forwarding(int with_tx_first) 2345af75078fSIntel { 2346af75078fSIntel port_fwd_begin_t port_fwd_begin; 2347af75078fSIntel port_fwd_end_t port_fwd_end; 23483c4426dbSDmitry Kozlyuk stream_init_t stream_init = cur_fwd_eng->stream_init; 2349af75078fSIntel unsigned int i; 2350af75078fSIntel 23515a8fb55cSReshma Pattan if (strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") == 0 && !nb_rxq) 23525a8fb55cSReshma Pattan rte_exit(EXIT_FAILURE, "rxq are 0, cannot use rxonly fwd mode\n"); 23535a8fb55cSReshma Pattan 23545a8fb55cSReshma Pattan if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 && !nb_txq) 23555a8fb55cSReshma Pattan rte_exit(EXIT_FAILURE, "txq are 0, cannot use txonly fwd mode\n"); 23565a8fb55cSReshma Pattan 23575a8fb55cSReshma Pattan if ((strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") != 0 && 23585a8fb55cSReshma Pattan strcmp(cur_fwd_eng->fwd_mode_name, "txonly") != 0) && 23595a8fb55cSReshma Pattan (!nb_rxq || !nb_txq)) 23605a8fb55cSReshma Pattan rte_exit(EXIT_FAILURE, 23615a8fb55cSReshma Pattan "Either rxq or txq are 0, cannot use %s fwd mode\n", 23625a8fb55cSReshma Pattan cur_fwd_eng->fwd_mode_name); 23635a8fb55cSReshma Pattan 2364ce8d5614SIntel if (all_ports_started() == 0) { 236561a3b0e5SAndrew Rybchenko fprintf(stderr, "Not all ports were started\n"); 2366ce8d5614SIntel return; 2367ce8d5614SIntel } 2368af75078fSIntel if (test_done == 0) { 236961a3b0e5SAndrew Rybchenko fprintf(stderr, "Packet forwarding already started\n"); 2370af75078fSIntel return; 2371af75078fSIntel } 23727741e4cfSIntel 237347a767b2SMatan Azrad fwd_config_setup(); 237447a767b2SMatan Azrad 237565744833SXueming Li pkt_fwd_config_display(&cur_fwd_config); 237665744833SXueming Li if (!pkt_fwd_shared_rxq_check()) 237765744833SXueming Li return; 237865744833SXueming Li 23793c4426dbSDmitry Kozlyuk if (stream_init != NULL) 23803c4426dbSDmitry Kozlyuk for (i = 0; i < cur_fwd_config.nb_fwd_streams; i++) 23813c4426dbSDmitry Kozlyuk stream_init(fwd_streams[i]); 23823c4426dbSDmitry Kozlyuk 2383a78040c9SAlvin Zhang port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin; 2384a78040c9SAlvin Zhang if (port_fwd_begin != NULL) { 2385a78040c9SAlvin Zhang for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) { 2386a78040c9SAlvin Zhang if (port_fwd_begin(fwd_ports_ids[i])) { 2387a78040c9SAlvin Zhang fprintf(stderr, 2388a78040c9SAlvin Zhang "Packet forwarding is not ready\n"); 2389a78040c9SAlvin Zhang return; 2390a78040c9SAlvin Zhang } 2391a78040c9SAlvin Zhang } 2392a78040c9SAlvin Zhang } 2393a78040c9SAlvin Zhang 2394a78040c9SAlvin Zhang if (with_tx_first) { 2395a78040c9SAlvin Zhang port_fwd_begin = tx_only_engine.port_fwd_begin; 2396a78040c9SAlvin Zhang if (port_fwd_begin != NULL) { 2397a78040c9SAlvin Zhang for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) { 2398a78040c9SAlvin Zhang if (port_fwd_begin(fwd_ports_ids[i])) { 2399a78040c9SAlvin Zhang fprintf(stderr, 2400a78040c9SAlvin Zhang "Packet forwarding is not ready\n"); 2401a78040c9SAlvin Zhang return; 2402a78040c9SAlvin Zhang } 2403a78040c9SAlvin Zhang } 2404a78040c9SAlvin Zhang } 2405a78040c9SAlvin Zhang } 2406a78040c9SAlvin Zhang 2407a78040c9SAlvin Zhang test_done = 0; 2408a78040c9SAlvin Zhang 24097741e4cfSIntel if(!no_flush_rx) 24107741e4cfSIntel flush_fwd_rx_queues(); 24117741e4cfSIntel 2412af75078fSIntel rxtx_config_display(); 2413af75078fSIntel 241453324971SDavid Marchand fwd_stats_reset(); 2415af75078fSIntel if (with_tx_first) { 2416acbf77a6SZhihong Wang while (with_tx_first--) { 2417acbf77a6SZhihong Wang launch_packet_forwarding( 2418acbf77a6SZhihong Wang run_one_txonly_burst_on_core); 2419af75078fSIntel rte_eal_mp_wait_lcore(); 2420acbf77a6SZhihong Wang } 2421af75078fSIntel port_fwd_end = tx_only_engine.port_fwd_end; 2422af75078fSIntel if (port_fwd_end != NULL) { 2423af75078fSIntel for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) 2424af75078fSIntel (*port_fwd_end)(fwd_ports_ids[i]); 2425af75078fSIntel } 2426af75078fSIntel } 2427af75078fSIntel launch_packet_forwarding(start_pkt_forward_on_core); 2428af75078fSIntel } 2429af75078fSIntel 2430af75078fSIntel void 2431af75078fSIntel stop_packet_forwarding(void) 2432af75078fSIntel { 2433af75078fSIntel port_fwd_end_t port_fwd_end; 2434af75078fSIntel lcoreid_t lc_id; 243553324971SDavid Marchand portid_t pt_id; 243653324971SDavid Marchand int i; 2437af75078fSIntel 2438af75078fSIntel if (test_done) { 243961a3b0e5SAndrew Rybchenko fprintf(stderr, "Packet forwarding not started\n"); 2440af75078fSIntel return; 2441af75078fSIntel } 2442af75078fSIntel printf("Telling cores to stop..."); 2443af75078fSIntel for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) 2444af75078fSIntel fwd_lcores[lc_id]->stopped = 1; 2445af75078fSIntel printf("\nWaiting for lcores to finish...\n"); 2446af75078fSIntel rte_eal_mp_wait_lcore(); 2447af75078fSIntel port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end; 2448af75078fSIntel if (port_fwd_end != NULL) { 2449af75078fSIntel for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) { 2450af75078fSIntel pt_id = fwd_ports_ids[i]; 2451af75078fSIntel (*port_fwd_end)(pt_id); 2452af75078fSIntel } 2453af75078fSIntel } 2454c185d42cSDavid Marchand 245553324971SDavid Marchand fwd_stats_display(); 245658d475b7SJerin Jacob 2457af75078fSIntel printf("\nDone.\n"); 2458af75078fSIntel test_done = 1; 2459af75078fSIntel } 2460af75078fSIntel 2461cfae07fdSOuyang Changchun void 2462cfae07fdSOuyang Changchun dev_set_link_up(portid_t pid) 2463cfae07fdSOuyang Changchun { 2464492ab604SZhiyong Yang if (rte_eth_dev_set_link_up(pid) < 0) 246561a3b0e5SAndrew Rybchenko fprintf(stderr, "\nSet link up fail.\n"); 2466cfae07fdSOuyang Changchun } 2467cfae07fdSOuyang Changchun 2468cfae07fdSOuyang Changchun void 2469cfae07fdSOuyang Changchun dev_set_link_down(portid_t pid) 2470cfae07fdSOuyang Changchun { 2471492ab604SZhiyong Yang if (rte_eth_dev_set_link_down(pid) < 0) 247261a3b0e5SAndrew Rybchenko fprintf(stderr, "\nSet link down fail.\n"); 2473cfae07fdSOuyang Changchun } 2474cfae07fdSOuyang Changchun 2475ce8d5614SIntel static int 2476ce8d5614SIntel all_ports_started(void) 2477ce8d5614SIntel { 2478ce8d5614SIntel portid_t pi; 2479ce8d5614SIntel struct rte_port *port; 2480ce8d5614SIntel 24817d89b261SGaetan Rivet RTE_ETH_FOREACH_DEV(pi) { 2482ce8d5614SIntel port = &ports[pi]; 2483ce8d5614SIntel /* Check if there is a port which is not started */ 248441b05095SBernard Iremonger if ((port->port_status != RTE_PORT_STARTED) && 248541b05095SBernard Iremonger (port->slave_flag == 0)) 2486ce8d5614SIntel return 0; 2487ce8d5614SIntel } 2488ce8d5614SIntel 2489ce8d5614SIntel /* No port is not started */ 2490ce8d5614SIntel return 1; 2491ce8d5614SIntel } 2492ce8d5614SIntel 2493148f963fSBruce Richardson int 24946018eb8cSShahaf Shuler port_is_stopped(portid_t port_id) 24956018eb8cSShahaf Shuler { 24966018eb8cSShahaf Shuler struct rte_port *port = &ports[port_id]; 24976018eb8cSShahaf Shuler 24986018eb8cSShahaf Shuler if ((port->port_status != RTE_PORT_STOPPED) && 24996018eb8cSShahaf Shuler (port->slave_flag == 0)) 25006018eb8cSShahaf Shuler return 0; 25016018eb8cSShahaf Shuler return 1; 25026018eb8cSShahaf Shuler } 25036018eb8cSShahaf Shuler 25046018eb8cSShahaf Shuler int 2505edab33b1STetsuya Mukawa all_ports_stopped(void) 2506edab33b1STetsuya Mukawa { 2507edab33b1STetsuya Mukawa portid_t pi; 2508edab33b1STetsuya Mukawa 25097d89b261SGaetan Rivet RTE_ETH_FOREACH_DEV(pi) { 25106018eb8cSShahaf Shuler if (!port_is_stopped(pi)) 2511edab33b1STetsuya Mukawa return 0; 2512edab33b1STetsuya Mukawa } 2513edab33b1STetsuya Mukawa 2514edab33b1STetsuya Mukawa return 1; 2515edab33b1STetsuya Mukawa } 2516edab33b1STetsuya Mukawa 2517edab33b1STetsuya Mukawa int 2518edab33b1STetsuya Mukawa port_is_started(portid_t port_id) 2519edab33b1STetsuya Mukawa { 2520edab33b1STetsuya Mukawa if (port_id_is_invalid(port_id, ENABLED_WARN)) 2521edab33b1STetsuya Mukawa return 0; 2522edab33b1STetsuya Mukawa 2523edab33b1STetsuya Mukawa if (ports[port_id].port_status != RTE_PORT_STARTED) 2524edab33b1STetsuya Mukawa return 0; 2525edab33b1STetsuya Mukawa 2526edab33b1STetsuya Mukawa return 1; 2527edab33b1STetsuya Mukawa } 2528edab33b1STetsuya Mukawa 252923095155SDariusz Sosnowski #define HAIRPIN_MODE_RX_FORCE_MEMORY RTE_BIT32(8) 253023095155SDariusz Sosnowski #define HAIRPIN_MODE_TX_FORCE_MEMORY RTE_BIT32(9) 253123095155SDariusz Sosnowski 253223095155SDariusz Sosnowski #define HAIRPIN_MODE_RX_LOCKED_MEMORY RTE_BIT32(12) 253323095155SDariusz Sosnowski #define HAIRPIN_MODE_RX_RTE_MEMORY RTE_BIT32(13) 253423095155SDariusz Sosnowski 253523095155SDariusz Sosnowski #define HAIRPIN_MODE_TX_LOCKED_MEMORY RTE_BIT32(16) 253623095155SDariusz Sosnowski #define HAIRPIN_MODE_TX_RTE_MEMORY RTE_BIT32(17) 253723095155SDariusz Sosnowski 253823095155SDariusz Sosnowski 25391c69df45SOri Kam /* Configure the Rx and Tx hairpin queues for the selected port. */ 25401c69df45SOri Kam static int 254101817b10SBing Zhao setup_hairpin_queues(portid_t pi, portid_t p_pi, uint16_t cnt_pi) 25421c69df45SOri Kam { 25431c69df45SOri Kam queueid_t qi; 25441c69df45SOri Kam struct rte_eth_hairpin_conf hairpin_conf = { 25451c69df45SOri Kam .peer_count = 1, 25461c69df45SOri Kam }; 25471c69df45SOri Kam int i; 25481c69df45SOri Kam int diag; 25491c69df45SOri Kam struct rte_port *port = &ports[pi]; 255001817b10SBing Zhao uint16_t peer_rx_port = pi; 255101817b10SBing Zhao uint16_t peer_tx_port = pi; 255201817b10SBing Zhao uint32_t manual = 1; 255301817b10SBing Zhao uint32_t tx_exp = hairpin_mode & 0x10; 255423095155SDariusz Sosnowski uint32_t rx_force_memory = hairpin_mode & HAIRPIN_MODE_RX_FORCE_MEMORY; 255523095155SDariusz Sosnowski uint32_t rx_locked_memory = hairpin_mode & HAIRPIN_MODE_RX_LOCKED_MEMORY; 255623095155SDariusz Sosnowski uint32_t rx_rte_memory = hairpin_mode & HAIRPIN_MODE_RX_RTE_MEMORY; 255723095155SDariusz Sosnowski uint32_t tx_force_memory = hairpin_mode & HAIRPIN_MODE_TX_FORCE_MEMORY; 255823095155SDariusz Sosnowski uint32_t tx_locked_memory = hairpin_mode & HAIRPIN_MODE_TX_LOCKED_MEMORY; 255923095155SDariusz Sosnowski uint32_t tx_rte_memory = hairpin_mode & HAIRPIN_MODE_TX_RTE_MEMORY; 256001817b10SBing Zhao 256101817b10SBing Zhao if (!(hairpin_mode & 0xf)) { 256201817b10SBing Zhao peer_rx_port = pi; 256301817b10SBing Zhao peer_tx_port = pi; 256401817b10SBing Zhao manual = 0; 256501817b10SBing Zhao } else if (hairpin_mode & 0x1) { 256601817b10SBing Zhao peer_tx_port = rte_eth_find_next_owned_by(pi + 1, 256701817b10SBing Zhao RTE_ETH_DEV_NO_OWNER); 256801817b10SBing Zhao if (peer_tx_port >= RTE_MAX_ETHPORTS) 256901817b10SBing Zhao peer_tx_port = rte_eth_find_next_owned_by(0, 257001817b10SBing Zhao RTE_ETH_DEV_NO_OWNER); 257101817b10SBing Zhao if (p_pi != RTE_MAX_ETHPORTS) { 257201817b10SBing Zhao peer_rx_port = p_pi; 257301817b10SBing Zhao } else { 257401817b10SBing Zhao uint16_t next_pi; 257501817b10SBing Zhao 257601817b10SBing Zhao /* Last port will be the peer RX port of the first. */ 257701817b10SBing Zhao RTE_ETH_FOREACH_DEV(next_pi) 257801817b10SBing Zhao peer_rx_port = next_pi; 257901817b10SBing Zhao } 258001817b10SBing Zhao manual = 1; 258101817b10SBing Zhao } else if (hairpin_mode & 0x2) { 258201817b10SBing Zhao if (cnt_pi & 0x1) { 258301817b10SBing Zhao peer_rx_port = p_pi; 258401817b10SBing Zhao } else { 258501817b10SBing Zhao peer_rx_port = rte_eth_find_next_owned_by(pi + 1, 258601817b10SBing Zhao RTE_ETH_DEV_NO_OWNER); 258701817b10SBing Zhao if (peer_rx_port >= RTE_MAX_ETHPORTS) 258801817b10SBing Zhao peer_rx_port = pi; 258901817b10SBing Zhao } 259001817b10SBing Zhao peer_tx_port = peer_rx_port; 259101817b10SBing Zhao manual = 1; 259201817b10SBing Zhao } 25931c69df45SOri Kam 25941c69df45SOri Kam for (qi = nb_txq, i = 0; qi < nb_hairpinq + nb_txq; qi++) { 259501817b10SBing Zhao hairpin_conf.peers[0].port = peer_rx_port; 25961c69df45SOri Kam hairpin_conf.peers[0].queue = i + nb_rxq; 259701817b10SBing Zhao hairpin_conf.manual_bind = !!manual; 259801817b10SBing Zhao hairpin_conf.tx_explicit = !!tx_exp; 259923095155SDariusz Sosnowski hairpin_conf.force_memory = !!tx_force_memory; 260023095155SDariusz Sosnowski hairpin_conf.use_locked_device_memory = !!tx_locked_memory; 260123095155SDariusz Sosnowski hairpin_conf.use_rte_memory = !!tx_rte_memory; 26021c69df45SOri Kam diag = rte_eth_tx_hairpin_queue_setup 26031c69df45SOri Kam (pi, qi, nb_txd, &hairpin_conf); 26041c69df45SOri Kam i++; 26051c69df45SOri Kam if (diag == 0) 26061c69df45SOri Kam continue; 26071c69df45SOri Kam 26081c69df45SOri Kam /* Fail to setup rx queue, return */ 2609eac341d3SJoyce Kong if (port->port_status == RTE_PORT_HANDLING) 2610eac341d3SJoyce Kong port->port_status = RTE_PORT_STOPPED; 2611eac341d3SJoyce Kong else 261261a3b0e5SAndrew Rybchenko fprintf(stderr, 261361a3b0e5SAndrew Rybchenko "Port %d can not be set back to stopped\n", pi); 261461a3b0e5SAndrew Rybchenko fprintf(stderr, "Fail to configure port %d hairpin queues\n", 261561a3b0e5SAndrew Rybchenko pi); 26161c69df45SOri Kam /* try to reconfigure queues next time */ 26171c69df45SOri Kam port->need_reconfig_queues = 1; 26181c69df45SOri Kam return -1; 26191c69df45SOri Kam } 26201c69df45SOri Kam for (qi = nb_rxq, i = 0; qi < nb_hairpinq + nb_rxq; qi++) { 262101817b10SBing Zhao hairpin_conf.peers[0].port = peer_tx_port; 26221c69df45SOri Kam hairpin_conf.peers[0].queue = i + nb_txq; 262301817b10SBing Zhao hairpin_conf.manual_bind = !!manual; 262401817b10SBing Zhao hairpin_conf.tx_explicit = !!tx_exp; 262523095155SDariusz Sosnowski hairpin_conf.force_memory = !!rx_force_memory; 262623095155SDariusz Sosnowski hairpin_conf.use_locked_device_memory = !!rx_locked_memory; 262723095155SDariusz Sosnowski hairpin_conf.use_rte_memory = !!rx_rte_memory; 26281c69df45SOri Kam diag = rte_eth_rx_hairpin_queue_setup 26291c69df45SOri Kam (pi, qi, nb_rxd, &hairpin_conf); 26301c69df45SOri Kam i++; 26311c69df45SOri Kam if (diag == 0) 26321c69df45SOri Kam continue; 26331c69df45SOri Kam 26341c69df45SOri Kam /* Fail to setup rx queue, return */ 2635eac341d3SJoyce Kong if (port->port_status == RTE_PORT_HANDLING) 2636eac341d3SJoyce Kong port->port_status = RTE_PORT_STOPPED; 2637eac341d3SJoyce Kong else 263861a3b0e5SAndrew Rybchenko fprintf(stderr, 263961a3b0e5SAndrew Rybchenko "Port %d can not be set back to stopped\n", pi); 264061a3b0e5SAndrew Rybchenko fprintf(stderr, "Fail to configure port %d hairpin queues\n", 264161a3b0e5SAndrew Rybchenko pi); 26421c69df45SOri Kam /* try to reconfigure queues next time */ 26431c69df45SOri Kam port->need_reconfig_queues = 1; 26441c69df45SOri Kam return -1; 26451c69df45SOri Kam } 26461c69df45SOri Kam return 0; 26471c69df45SOri Kam } 26481c69df45SOri Kam 26492befc67fSViacheslav Ovsiienko /* Configure the Rx with optional split. */ 26502befc67fSViacheslav Ovsiienko int 26512befc67fSViacheslav Ovsiienko rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id, 26522befc67fSViacheslav Ovsiienko uint16_t nb_rx_desc, unsigned int socket_id, 26532befc67fSViacheslav Ovsiienko struct rte_eth_rxconf *rx_conf, struct rte_mempool *mp) 26542befc67fSViacheslav Ovsiienko { 26552befc67fSViacheslav Ovsiienko union rte_eth_rxseg rx_useg[MAX_SEGS_BUFFER_SPLIT] = {}; 26562befc67fSViacheslav Ovsiienko unsigned int i, mp_n; 26572befc67fSViacheslav Ovsiienko int ret; 26582befc67fSViacheslav Ovsiienko 26592befc67fSViacheslav Ovsiienko if (rx_pkt_nb_segs <= 1 || 26602befc67fSViacheslav Ovsiienko (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) == 0) { 26612befc67fSViacheslav Ovsiienko rx_conf->rx_seg = NULL; 26622befc67fSViacheslav Ovsiienko rx_conf->rx_nseg = 0; 26632befc67fSViacheslav Ovsiienko ret = rte_eth_rx_queue_setup(port_id, rx_queue_id, 26642befc67fSViacheslav Ovsiienko nb_rx_desc, socket_id, 26652befc67fSViacheslav Ovsiienko rx_conf, mp); 26663c4426dbSDmitry Kozlyuk goto exit; 26672befc67fSViacheslav Ovsiienko } 26682befc67fSViacheslav Ovsiienko for (i = 0; i < rx_pkt_nb_segs; i++) { 26692befc67fSViacheslav Ovsiienko struct rte_eth_rxseg_split *rx_seg = &rx_useg[i].split; 26702befc67fSViacheslav Ovsiienko struct rte_mempool *mpx; 26712befc67fSViacheslav Ovsiienko /* 26722befc67fSViacheslav Ovsiienko * Use last valid pool for the segments with number 26732befc67fSViacheslav Ovsiienko * exceeding the pool index. 26742befc67fSViacheslav Ovsiienko */ 26751108c33eSRaja Zidane mp_n = (i >= mbuf_data_size_n) ? mbuf_data_size_n - 1 : i; 26762befc67fSViacheslav Ovsiienko mpx = mbuf_pool_find(socket_id, mp_n); 26772befc67fSViacheslav Ovsiienko /* Handle zero as mbuf data buffer size. */ 26782befc67fSViacheslav Ovsiienko rx_seg->offset = i < rx_pkt_nb_offs ? 26792befc67fSViacheslav Ovsiienko rx_pkt_seg_offsets[i] : 0; 26802befc67fSViacheslav Ovsiienko rx_seg->mp = mpx ? mpx : mp; 268152e2e7edSYuan Wang if (rx_pkt_hdr_protos[i] != 0 && rx_pkt_seg_lengths[i] == 0) { 268252e2e7edSYuan Wang rx_seg->proto_hdr = rx_pkt_hdr_protos[i]; 268352e2e7edSYuan Wang } else { 268452e2e7edSYuan Wang rx_seg->length = rx_pkt_seg_lengths[i] ? 268552e2e7edSYuan Wang rx_pkt_seg_lengths[i] : 268652e2e7edSYuan Wang mbuf_data_size[mp_n]; 268752e2e7edSYuan Wang } 26882befc67fSViacheslav Ovsiienko } 26892befc67fSViacheslav Ovsiienko rx_conf->rx_nseg = rx_pkt_nb_segs; 26902befc67fSViacheslav Ovsiienko rx_conf->rx_seg = rx_useg; 26912befc67fSViacheslav Ovsiienko ret = rte_eth_rx_queue_setup(port_id, rx_queue_id, nb_rx_desc, 26922befc67fSViacheslav Ovsiienko socket_id, rx_conf, NULL); 26932befc67fSViacheslav Ovsiienko rx_conf->rx_seg = NULL; 26942befc67fSViacheslav Ovsiienko rx_conf->rx_nseg = 0; 26953c4426dbSDmitry Kozlyuk exit: 26963c4426dbSDmitry Kozlyuk ports[port_id].rxq[rx_queue_id].state = rx_conf->rx_deferred_start ? 26973c4426dbSDmitry Kozlyuk RTE_ETH_QUEUE_STATE_STOPPED : 26983c4426dbSDmitry Kozlyuk RTE_ETH_QUEUE_STATE_STARTED; 26992befc67fSViacheslav Ovsiienko return ret; 27002befc67fSViacheslav Ovsiienko } 27012befc67fSViacheslav Ovsiienko 270263b72657SIvan Ilchenko static int 270363b72657SIvan Ilchenko alloc_xstats_display_info(portid_t pi) 270463b72657SIvan Ilchenko { 270563b72657SIvan Ilchenko uint64_t **ids_supp = &ports[pi].xstats_info.ids_supp; 270663b72657SIvan Ilchenko uint64_t **prev_values = &ports[pi].xstats_info.prev_values; 270763b72657SIvan Ilchenko uint64_t **curr_values = &ports[pi].xstats_info.curr_values; 270863b72657SIvan Ilchenko 270963b72657SIvan Ilchenko if (xstats_display_num == 0) 271063b72657SIvan Ilchenko return 0; 271163b72657SIvan Ilchenko 271263b72657SIvan Ilchenko *ids_supp = calloc(xstats_display_num, sizeof(**ids_supp)); 271363b72657SIvan Ilchenko if (*ids_supp == NULL) 271463b72657SIvan Ilchenko goto fail_ids_supp; 271563b72657SIvan Ilchenko 271663b72657SIvan Ilchenko *prev_values = calloc(xstats_display_num, 271763b72657SIvan Ilchenko sizeof(**prev_values)); 271863b72657SIvan Ilchenko if (*prev_values == NULL) 271963b72657SIvan Ilchenko goto fail_prev_values; 272063b72657SIvan Ilchenko 272163b72657SIvan Ilchenko *curr_values = calloc(xstats_display_num, 272263b72657SIvan Ilchenko sizeof(**curr_values)); 272363b72657SIvan Ilchenko if (*curr_values == NULL) 272463b72657SIvan Ilchenko goto fail_curr_values; 272563b72657SIvan Ilchenko 272663b72657SIvan Ilchenko ports[pi].xstats_info.allocated = true; 272763b72657SIvan Ilchenko 272863b72657SIvan Ilchenko return 0; 272963b72657SIvan Ilchenko 273063b72657SIvan Ilchenko fail_curr_values: 273163b72657SIvan Ilchenko free(*prev_values); 273263b72657SIvan Ilchenko fail_prev_values: 273363b72657SIvan Ilchenko free(*ids_supp); 273463b72657SIvan Ilchenko fail_ids_supp: 273563b72657SIvan Ilchenko return -ENOMEM; 273663b72657SIvan Ilchenko } 273763b72657SIvan Ilchenko 273863b72657SIvan Ilchenko static void 273963b72657SIvan Ilchenko free_xstats_display_info(portid_t pi) 274063b72657SIvan Ilchenko { 274163b72657SIvan Ilchenko if (!ports[pi].xstats_info.allocated) 274263b72657SIvan Ilchenko return; 274363b72657SIvan Ilchenko free(ports[pi].xstats_info.ids_supp); 274463b72657SIvan Ilchenko free(ports[pi].xstats_info.prev_values); 274563b72657SIvan Ilchenko free(ports[pi].xstats_info.curr_values); 274663b72657SIvan Ilchenko ports[pi].xstats_info.allocated = false; 274763b72657SIvan Ilchenko } 274863b72657SIvan Ilchenko 274963b72657SIvan Ilchenko /** Fill helper structures for specified port to show extended statistics. */ 275063b72657SIvan Ilchenko static void 275163b72657SIvan Ilchenko fill_xstats_display_info_for_port(portid_t pi) 275263b72657SIvan Ilchenko { 275363b72657SIvan Ilchenko unsigned int stat, stat_supp; 275463b72657SIvan Ilchenko const char *xstat_name; 275563b72657SIvan Ilchenko struct rte_port *port; 275663b72657SIvan Ilchenko uint64_t *ids_supp; 275763b72657SIvan Ilchenko int rc; 275863b72657SIvan Ilchenko 275963b72657SIvan Ilchenko if (xstats_display_num == 0) 276063b72657SIvan Ilchenko return; 276163b72657SIvan Ilchenko 276263b72657SIvan Ilchenko if (pi == (portid_t)RTE_PORT_ALL) { 276363b72657SIvan Ilchenko fill_xstats_display_info(); 276463b72657SIvan Ilchenko return; 276563b72657SIvan Ilchenko } 276663b72657SIvan Ilchenko 276763b72657SIvan Ilchenko port = &ports[pi]; 276863b72657SIvan Ilchenko if (port->port_status != RTE_PORT_STARTED) 276963b72657SIvan Ilchenko return; 277063b72657SIvan Ilchenko 277163b72657SIvan Ilchenko if (!port->xstats_info.allocated && alloc_xstats_display_info(pi) != 0) 277263b72657SIvan Ilchenko rte_exit(EXIT_FAILURE, 277363b72657SIvan Ilchenko "Failed to allocate xstats display memory\n"); 277463b72657SIvan Ilchenko 277563b72657SIvan Ilchenko ids_supp = port->xstats_info.ids_supp; 277663b72657SIvan Ilchenko for (stat = stat_supp = 0; stat < xstats_display_num; stat++) { 277763b72657SIvan Ilchenko xstat_name = xstats_display[stat].name; 277863b72657SIvan Ilchenko rc = rte_eth_xstats_get_id_by_name(pi, xstat_name, 277963b72657SIvan Ilchenko ids_supp + stat_supp); 278063b72657SIvan Ilchenko if (rc != 0) { 278163b72657SIvan Ilchenko fprintf(stderr, "No xstat '%s' on port %u - skip it %u\n", 278263b72657SIvan Ilchenko xstat_name, pi, stat); 278363b72657SIvan Ilchenko continue; 278463b72657SIvan Ilchenko } 278563b72657SIvan Ilchenko stat_supp++; 278663b72657SIvan Ilchenko } 278763b72657SIvan Ilchenko 278863b72657SIvan Ilchenko port->xstats_info.ids_supp_sz = stat_supp; 278963b72657SIvan Ilchenko } 279063b72657SIvan Ilchenko 279163b72657SIvan Ilchenko /** Fill helper structures for all ports to show extended statistics. */ 279263b72657SIvan Ilchenko static void 279363b72657SIvan Ilchenko fill_xstats_display_info(void) 279463b72657SIvan Ilchenko { 279563b72657SIvan Ilchenko portid_t pi; 279663b72657SIvan Ilchenko 279763b72657SIvan Ilchenko if (xstats_display_num == 0) 279863b72657SIvan Ilchenko return; 279963b72657SIvan Ilchenko 280063b72657SIvan Ilchenko RTE_ETH_FOREACH_DEV(pi) 280163b72657SIvan Ilchenko fill_xstats_display_info_for_port(pi); 280263b72657SIvan Ilchenko } 280363b72657SIvan Ilchenko 2804edab33b1STetsuya Mukawa int 2805ce8d5614SIntel start_port(portid_t pid) 2806ce8d5614SIntel { 280792d2703eSMichael Qiu int diag, need_check_link_status = -1; 2808ce8d5614SIntel portid_t pi; 280901817b10SBing Zhao portid_t p_pi = RTE_MAX_ETHPORTS; 281001817b10SBing Zhao portid_t pl[RTE_MAX_ETHPORTS]; 281101817b10SBing Zhao portid_t peer_pl[RTE_MAX_ETHPORTS]; 281201817b10SBing Zhao uint16_t cnt_pi = 0; 281301817b10SBing Zhao uint16_t cfg_pi = 0; 281401817b10SBing Zhao int peer_pi; 2815ce8d5614SIntel queueid_t qi; 2816ce8d5614SIntel struct rte_port *port; 28171c69df45SOri Kam struct rte_eth_hairpin_cap cap; 2818ce8d5614SIntel 28194468635fSMichael Qiu if (port_id_is_invalid(pid, ENABLED_WARN)) 28204468635fSMichael Qiu return 0; 28214468635fSMichael Qiu 28227d89b261SGaetan Rivet RTE_ETH_FOREACH_DEV(pi) { 2823edab33b1STetsuya Mukawa if (pid != pi && pid != (portid_t)RTE_PORT_ALL) 2824ce8d5614SIntel continue; 2825ce8d5614SIntel 2826d8c079a5SMin Hu (Connor) if (port_is_bonding_slave(pi)) { 2827d8c079a5SMin Hu (Connor) fprintf(stderr, 2828d8c079a5SMin Hu (Connor) "Please remove port %d from bonded device.\n", 2829d8c079a5SMin Hu (Connor) pi); 2830d8c079a5SMin Hu (Connor) continue; 2831d8c079a5SMin Hu (Connor) } 2832d8c079a5SMin Hu (Connor) 283392d2703eSMichael Qiu need_check_link_status = 0; 2834ce8d5614SIntel port = &ports[pi]; 2835eac341d3SJoyce Kong if (port->port_status == RTE_PORT_STOPPED) 2836eac341d3SJoyce Kong port->port_status = RTE_PORT_HANDLING; 2837eac341d3SJoyce Kong else { 283861a3b0e5SAndrew Rybchenko fprintf(stderr, "Port %d is now not stopped\n", pi); 2839ce8d5614SIntel continue; 2840ce8d5614SIntel } 2841ce8d5614SIntel 2842ce8d5614SIntel if (port->need_reconfig > 0) { 2843655eae01SJie Wang struct rte_eth_conf dev_conf; 2844655eae01SJie Wang int k; 2845655eae01SJie Wang 2846ce8d5614SIntel port->need_reconfig = 0; 2847ce8d5614SIntel 28487ee3e944SVasily Philipov if (flow_isolate_all) { 28497ee3e944SVasily Philipov int ret = port_flow_isolate(pi, 1); 28507ee3e944SVasily Philipov if (ret) { 285161a3b0e5SAndrew Rybchenko fprintf(stderr, 285261a3b0e5SAndrew Rybchenko "Failed to apply isolated mode on port %d\n", 285361a3b0e5SAndrew Rybchenko pi); 28547ee3e944SVasily Philipov return -1; 28557ee3e944SVasily Philipov } 28567ee3e944SVasily Philipov } 2857b5b38ed8SRaslan Darawsheh configure_rxtx_dump_callbacks(0); 28585706de65SJulien Cretin printf("Configuring Port %d (socket %u)\n", pi, 285920a0286fSLiu Xiaofeng port->socket_id); 28601c69df45SOri Kam if (nb_hairpinq > 0 && 28611c69df45SOri Kam rte_eth_dev_hairpin_capability_get(pi, &cap)) { 286261a3b0e5SAndrew Rybchenko fprintf(stderr, 286361a3b0e5SAndrew Rybchenko "Port %d doesn't support hairpin queues\n", 286461a3b0e5SAndrew Rybchenko pi); 28651c69df45SOri Kam return -1; 28661c69df45SOri Kam } 28671bb4a528SFerruh Yigit 2868ce8d5614SIntel /* configure port */ 2869a550baf2SMin Hu (Connor) diag = eth_dev_configure_mp(pi, nb_rxq + nb_hairpinq, 28701c69df45SOri Kam nb_txq + nb_hairpinq, 2871ce8d5614SIntel &(port->dev_conf)); 2872ce8d5614SIntel if (diag != 0) { 2873eac341d3SJoyce Kong if (port->port_status == RTE_PORT_HANDLING) 2874eac341d3SJoyce Kong port->port_status = RTE_PORT_STOPPED; 2875eac341d3SJoyce Kong else 287661a3b0e5SAndrew Rybchenko fprintf(stderr, 287761a3b0e5SAndrew Rybchenko "Port %d can not be set back to stopped\n", 287861a3b0e5SAndrew Rybchenko pi); 287961a3b0e5SAndrew Rybchenko fprintf(stderr, "Fail to configure port %d\n", 288061a3b0e5SAndrew Rybchenko pi); 2881ce8d5614SIntel /* try to reconfigure port next time */ 2882ce8d5614SIntel port->need_reconfig = 1; 2883148f963fSBruce Richardson return -1; 2884ce8d5614SIntel } 2885655eae01SJie Wang /* get device configuration*/ 2886655eae01SJie Wang if (0 != 2887655eae01SJie Wang eth_dev_conf_get_print_err(pi, &dev_conf)) { 2888655eae01SJie Wang fprintf(stderr, 2889655eae01SJie Wang "port %d can not get device configuration\n", 2890655eae01SJie Wang pi); 2891655eae01SJie Wang return -1; 2892655eae01SJie Wang } 2893655eae01SJie Wang /* Apply Rx offloads configuration */ 2894655eae01SJie Wang if (dev_conf.rxmode.offloads != 2895655eae01SJie Wang port->dev_conf.rxmode.offloads) { 2896655eae01SJie Wang port->dev_conf.rxmode.offloads |= 2897655eae01SJie Wang dev_conf.rxmode.offloads; 2898655eae01SJie Wang for (k = 0; 2899655eae01SJie Wang k < port->dev_info.max_rx_queues; 2900655eae01SJie Wang k++) 29013c4426dbSDmitry Kozlyuk port->rxq[k].conf.offloads |= 2902655eae01SJie Wang dev_conf.rxmode.offloads; 2903655eae01SJie Wang } 2904655eae01SJie Wang /* Apply Tx offloads configuration */ 2905655eae01SJie Wang if (dev_conf.txmode.offloads != 2906655eae01SJie Wang port->dev_conf.txmode.offloads) { 2907655eae01SJie Wang port->dev_conf.txmode.offloads |= 2908655eae01SJie Wang dev_conf.txmode.offloads; 2909655eae01SJie Wang for (k = 0; 2910655eae01SJie Wang k < port->dev_info.max_tx_queues; 2911655eae01SJie Wang k++) 29123c4426dbSDmitry Kozlyuk port->txq[k].conf.offloads |= 2913655eae01SJie Wang dev_conf.txmode.offloads; 2914655eae01SJie Wang } 2915ce8d5614SIntel } 2916a550baf2SMin Hu (Connor) if (port->need_reconfig_queues > 0 && is_proc_primary()) { 2917ce8d5614SIntel port->need_reconfig_queues = 0; 2918ce8d5614SIntel /* setup tx queues */ 2919ce8d5614SIntel for (qi = 0; qi < nb_txq; qi++) { 29203c4426dbSDmitry Kozlyuk struct rte_eth_txconf *conf = 29213c4426dbSDmitry Kozlyuk &port->txq[qi].conf; 29223c4426dbSDmitry Kozlyuk 2923b6ea6408SIntel if ((numa_support) && 2924b6ea6408SIntel (txring_numa[pi] != NUMA_NO_CONFIG)) 2925b6ea6408SIntel diag = rte_eth_tx_queue_setup(pi, qi, 2926d44f8a48SQi Zhang port->nb_tx_desc[qi], 2927d44f8a48SQi Zhang txring_numa[pi], 29283c4426dbSDmitry Kozlyuk &(port->txq[qi].conf)); 2929b6ea6408SIntel else 2930b6ea6408SIntel diag = rte_eth_tx_queue_setup(pi, qi, 2931d44f8a48SQi Zhang port->nb_tx_desc[qi], 2932d44f8a48SQi Zhang port->socket_id, 29333c4426dbSDmitry Kozlyuk &(port->txq[qi].conf)); 2934b6ea6408SIntel 29353c4426dbSDmitry Kozlyuk if (diag == 0) { 29363c4426dbSDmitry Kozlyuk port->txq[qi].state = 29373c4426dbSDmitry Kozlyuk conf->tx_deferred_start ? 29383c4426dbSDmitry Kozlyuk RTE_ETH_QUEUE_STATE_STOPPED : 29393c4426dbSDmitry Kozlyuk RTE_ETH_QUEUE_STATE_STARTED; 2940ce8d5614SIntel continue; 29413c4426dbSDmitry Kozlyuk } 2942ce8d5614SIntel 2943ce8d5614SIntel /* Fail to setup tx queue, return */ 2944eac341d3SJoyce Kong if (port->port_status == RTE_PORT_HANDLING) 2945eac341d3SJoyce Kong port->port_status = RTE_PORT_STOPPED; 2946eac341d3SJoyce Kong else 294761a3b0e5SAndrew Rybchenko fprintf(stderr, 294861a3b0e5SAndrew Rybchenko "Port %d can not be set back to stopped\n", 294961a3b0e5SAndrew Rybchenko pi); 295061a3b0e5SAndrew Rybchenko fprintf(stderr, 295161a3b0e5SAndrew Rybchenko "Fail to configure port %d tx queues\n", 2952d44f8a48SQi Zhang pi); 2953ce8d5614SIntel /* try to reconfigure queues next time */ 2954ce8d5614SIntel port->need_reconfig_queues = 1; 2955148f963fSBruce Richardson return -1; 2956ce8d5614SIntel } 2957ce8d5614SIntel for (qi = 0; qi < nb_rxq; qi++) { 2958d44f8a48SQi Zhang /* setup rx queues */ 2959b6ea6408SIntel if ((numa_support) && 2960b6ea6408SIntel (rxring_numa[pi] != NUMA_NO_CONFIG)) { 2961b6ea6408SIntel struct rte_mempool * mp = 296226cbb419SViacheslav Ovsiienko mbuf_pool_find 296326cbb419SViacheslav Ovsiienko (rxring_numa[pi], 0); 2964b6ea6408SIntel if (mp == NULL) { 296561a3b0e5SAndrew Rybchenko fprintf(stderr, 296661a3b0e5SAndrew Rybchenko "Failed to setup RX queue: No mempool allocation on the socket %d\n", 2967b6ea6408SIntel rxring_numa[pi]); 2968148f963fSBruce Richardson return -1; 2969b6ea6408SIntel } 2970b6ea6408SIntel 29712befc67fSViacheslav Ovsiienko diag = rx_queue_setup(pi, qi, 2972d4930794SFerruh Yigit port->nb_rx_desc[qi], 2973d44f8a48SQi Zhang rxring_numa[pi], 29743c4426dbSDmitry Kozlyuk &(port->rxq[qi].conf), 2975d44f8a48SQi Zhang mp); 29761e1d6bddSBernard Iremonger } else { 29771e1d6bddSBernard Iremonger struct rte_mempool *mp = 297826cbb419SViacheslav Ovsiienko mbuf_pool_find 297926cbb419SViacheslav Ovsiienko (port->socket_id, 0); 29801e1d6bddSBernard Iremonger if (mp == NULL) { 298161a3b0e5SAndrew Rybchenko fprintf(stderr, 298261a3b0e5SAndrew Rybchenko "Failed to setup RX queue: No mempool allocation on the socket %d\n", 29831e1d6bddSBernard Iremonger port->socket_id); 29841e1d6bddSBernard Iremonger return -1; 2985b6ea6408SIntel } 29862befc67fSViacheslav Ovsiienko diag = rx_queue_setup(pi, qi, 2987d4930794SFerruh Yigit port->nb_rx_desc[qi], 2988d44f8a48SQi Zhang port->socket_id, 29893c4426dbSDmitry Kozlyuk &(port->rxq[qi].conf), 2990d44f8a48SQi Zhang mp); 29911e1d6bddSBernard Iremonger } 2992ce8d5614SIntel if (diag == 0) 2993ce8d5614SIntel continue; 2994ce8d5614SIntel 2995ce8d5614SIntel /* Fail to setup rx queue, return */ 2996eac341d3SJoyce Kong if (port->port_status == RTE_PORT_HANDLING) 2997eac341d3SJoyce Kong port->port_status = RTE_PORT_STOPPED; 2998eac341d3SJoyce Kong else 299961a3b0e5SAndrew Rybchenko fprintf(stderr, 300061a3b0e5SAndrew Rybchenko "Port %d can not be set back to stopped\n", 300161a3b0e5SAndrew Rybchenko pi); 300261a3b0e5SAndrew Rybchenko fprintf(stderr, 300361a3b0e5SAndrew Rybchenko "Fail to configure port %d rx queues\n", 3004d44f8a48SQi Zhang pi); 3005ce8d5614SIntel /* try to reconfigure queues next time */ 3006ce8d5614SIntel port->need_reconfig_queues = 1; 3007148f963fSBruce Richardson return -1; 3008ce8d5614SIntel } 30091c69df45SOri Kam /* setup hairpin queues */ 301001817b10SBing Zhao if (setup_hairpin_queues(pi, p_pi, cnt_pi) != 0) 30111c69df45SOri Kam return -1; 3012ce8d5614SIntel } 3013b5b38ed8SRaslan Darawsheh configure_rxtx_dump_callbacks(verbose_level); 3014b0a9354aSPavan Nikhilesh if (clear_ptypes) { 3015b0a9354aSPavan Nikhilesh diag = rte_eth_dev_set_ptypes(pi, RTE_PTYPE_UNKNOWN, 3016b0a9354aSPavan Nikhilesh NULL, 0); 3017b0a9354aSPavan Nikhilesh if (diag < 0) 301861a3b0e5SAndrew Rybchenko fprintf(stderr, 3019b0a9354aSPavan Nikhilesh "Port %d: Failed to disable Ptype parsing\n", 3020b0a9354aSPavan Nikhilesh pi); 3021b0a9354aSPavan Nikhilesh } 3022b0a9354aSPavan Nikhilesh 302301817b10SBing Zhao p_pi = pi; 302401817b10SBing Zhao cnt_pi++; 302501817b10SBing Zhao 3026ce8d5614SIntel /* start port */ 3027a550baf2SMin Hu (Connor) diag = eth_dev_start_mp(pi); 302852f2c6f2SAndrew Rybchenko if (diag < 0) { 302961a3b0e5SAndrew Rybchenko fprintf(stderr, "Fail to start port %d: %s\n", 303061a3b0e5SAndrew Rybchenko pi, rte_strerror(-diag)); 3031ce8d5614SIntel 3032ce8d5614SIntel /* Fail to setup rx queue, return */ 3033eac341d3SJoyce Kong if (port->port_status == RTE_PORT_HANDLING) 3034eac341d3SJoyce Kong port->port_status = RTE_PORT_STOPPED; 3035eac341d3SJoyce Kong else 303661a3b0e5SAndrew Rybchenko fprintf(stderr, 303761a3b0e5SAndrew Rybchenko "Port %d can not be set back to stopped\n", 303861a3b0e5SAndrew Rybchenko pi); 3039ce8d5614SIntel continue; 3040ce8d5614SIntel } 3041ce8d5614SIntel 3042eac341d3SJoyce Kong if (port->port_status == RTE_PORT_HANDLING) 3043eac341d3SJoyce Kong port->port_status = RTE_PORT_STARTED; 3044eac341d3SJoyce Kong else 304561a3b0e5SAndrew Rybchenko fprintf(stderr, "Port %d can not be set into started\n", 304661a3b0e5SAndrew Rybchenko pi); 3047ce8d5614SIntel 30485ffc4a2aSYuying Zhang if (eth_macaddr_get_print_err(pi, &port->eth_addr) == 0) 3049c2c4f87bSAman Deep Singh printf("Port %d: " RTE_ETHER_ADDR_PRT_FMT "\n", pi, 3050a7db3afcSAman Deep Singh RTE_ETHER_ADDR_BYTES(&port->eth_addr)); 3051d8c89163SZijie Pan 3052ce8d5614SIntel /* at least one port started, need checking link status */ 3053ce8d5614SIntel need_check_link_status = 1; 305401817b10SBing Zhao 305501817b10SBing Zhao pl[cfg_pi++] = pi; 3056ce8d5614SIntel } 3057ce8d5614SIntel 305892d2703eSMichael Qiu if (need_check_link_status == 1 && !no_link_check) 3059edab33b1STetsuya Mukawa check_all_ports_link_status(RTE_PORT_ALL); 306092d2703eSMichael Qiu else if (need_check_link_status == 0) 306161a3b0e5SAndrew Rybchenko fprintf(stderr, "Please stop the ports first\n"); 3062ce8d5614SIntel 306301817b10SBing Zhao if (hairpin_mode & 0xf) { 306401817b10SBing Zhao uint16_t i; 306501817b10SBing Zhao int j; 306601817b10SBing Zhao 306701817b10SBing Zhao /* bind all started hairpin ports */ 306801817b10SBing Zhao for (i = 0; i < cfg_pi; i++) { 306901817b10SBing Zhao pi = pl[i]; 307001817b10SBing Zhao /* bind current Tx to all peer Rx */ 307101817b10SBing Zhao peer_pi = rte_eth_hairpin_get_peer_ports(pi, peer_pl, 307201817b10SBing Zhao RTE_MAX_ETHPORTS, 1); 307301817b10SBing Zhao if (peer_pi < 0) 307401817b10SBing Zhao return peer_pi; 307501817b10SBing Zhao for (j = 0; j < peer_pi; j++) { 307601817b10SBing Zhao if (!port_is_started(peer_pl[j])) 307701817b10SBing Zhao continue; 307801817b10SBing Zhao diag = rte_eth_hairpin_bind(pi, peer_pl[j]); 307901817b10SBing Zhao if (diag < 0) { 308061a3b0e5SAndrew Rybchenko fprintf(stderr, 308161a3b0e5SAndrew Rybchenko "Error during binding hairpin Tx port %u to %u: %s\n", 308201817b10SBing Zhao pi, peer_pl[j], 308301817b10SBing Zhao rte_strerror(-diag)); 308401817b10SBing Zhao return -1; 308501817b10SBing Zhao } 308601817b10SBing Zhao } 308701817b10SBing Zhao /* bind all peer Tx to current Rx */ 308801817b10SBing Zhao peer_pi = rte_eth_hairpin_get_peer_ports(pi, peer_pl, 308901817b10SBing Zhao RTE_MAX_ETHPORTS, 0); 309001817b10SBing Zhao if (peer_pi < 0) 309101817b10SBing Zhao return peer_pi; 309201817b10SBing Zhao for (j = 0; j < peer_pi; j++) { 309301817b10SBing Zhao if (!port_is_started(peer_pl[j])) 309401817b10SBing Zhao continue; 309501817b10SBing Zhao diag = rte_eth_hairpin_bind(peer_pl[j], pi); 309601817b10SBing Zhao if (diag < 0) { 309761a3b0e5SAndrew Rybchenko fprintf(stderr, 309861a3b0e5SAndrew Rybchenko "Error during binding hairpin Tx port %u to %u: %s\n", 309901817b10SBing Zhao peer_pl[j], pi, 310001817b10SBing Zhao rte_strerror(-diag)); 310101817b10SBing Zhao return -1; 310201817b10SBing Zhao } 310301817b10SBing Zhao } 310401817b10SBing Zhao } 310501817b10SBing Zhao } 310601817b10SBing Zhao 310763b72657SIvan Ilchenko fill_xstats_display_info_for_port(pid); 310863b72657SIvan Ilchenko 3109ce8d5614SIntel printf("Done\n"); 3110148f963fSBruce Richardson return 0; 3111ce8d5614SIntel } 3112ce8d5614SIntel 3113ce8d5614SIntel void 3114ce8d5614SIntel stop_port(portid_t pid) 3115ce8d5614SIntel { 3116ce8d5614SIntel portid_t pi; 3117ce8d5614SIntel struct rte_port *port; 3118ce8d5614SIntel int need_check_link_status = 0; 311901817b10SBing Zhao portid_t peer_pl[RTE_MAX_ETHPORTS]; 312001817b10SBing Zhao int peer_pi; 3121ce8d5614SIntel 31224468635fSMichael Qiu if (port_id_is_invalid(pid, ENABLED_WARN)) 31234468635fSMichael Qiu return; 31244468635fSMichael Qiu 3125ce8d5614SIntel printf("Stopping ports...\n"); 3126ce8d5614SIntel 31277d89b261SGaetan Rivet RTE_ETH_FOREACH_DEV(pi) { 31284468635fSMichael Qiu if (pid != pi && pid != (portid_t)RTE_PORT_ALL) 3129ce8d5614SIntel continue; 3130ce8d5614SIntel 3131a8ef3e3aSBernard Iremonger if (port_is_forwarding(pi) != 0 && test_done == 0) { 313261a3b0e5SAndrew Rybchenko fprintf(stderr, 313361a3b0e5SAndrew Rybchenko "Please remove port %d from forwarding configuration.\n", 313461a3b0e5SAndrew Rybchenko pi); 3135a8ef3e3aSBernard Iremonger continue; 3136a8ef3e3aSBernard Iremonger } 3137a8ef3e3aSBernard Iremonger 31380e545d30SBernard Iremonger if (port_is_bonding_slave(pi)) { 313961a3b0e5SAndrew Rybchenko fprintf(stderr, 314061a3b0e5SAndrew Rybchenko "Please remove port %d from bonded device.\n", 314161a3b0e5SAndrew Rybchenko pi); 31420e545d30SBernard Iremonger continue; 31430e545d30SBernard Iremonger } 31440e545d30SBernard Iremonger 3145ce8d5614SIntel port = &ports[pi]; 3146eac341d3SJoyce Kong if (port->port_status == RTE_PORT_STARTED) 3147eac341d3SJoyce Kong port->port_status = RTE_PORT_HANDLING; 3148eac341d3SJoyce Kong else 3149ce8d5614SIntel continue; 3150ce8d5614SIntel 315101817b10SBing Zhao if (hairpin_mode & 0xf) { 315201817b10SBing Zhao int j; 315301817b10SBing Zhao 315401817b10SBing Zhao rte_eth_hairpin_unbind(pi, RTE_MAX_ETHPORTS); 315501817b10SBing Zhao /* unbind all peer Tx from current Rx */ 315601817b10SBing Zhao peer_pi = rte_eth_hairpin_get_peer_ports(pi, peer_pl, 315701817b10SBing Zhao RTE_MAX_ETHPORTS, 0); 315801817b10SBing Zhao if (peer_pi < 0) 315901817b10SBing Zhao continue; 316001817b10SBing Zhao for (j = 0; j < peer_pi; j++) { 316101817b10SBing Zhao if (!port_is_started(peer_pl[j])) 316201817b10SBing Zhao continue; 316301817b10SBing Zhao rte_eth_hairpin_unbind(peer_pl[j], pi); 316401817b10SBing Zhao } 316501817b10SBing Zhao } 316601817b10SBing Zhao 31670f93edbfSGregory Etelson if (port->flow_list) 31680f93edbfSGregory Etelson port_flow_flush(pi); 31690f93edbfSGregory Etelson 3170a550baf2SMin Hu (Connor) if (eth_dev_stop_mp(pi) != 0) 3171e62c5a12SIvan Ilchenko RTE_LOG(ERR, EAL, "rte_eth_dev_stop failed for port %u\n", 3172e62c5a12SIvan Ilchenko pi); 3173ce8d5614SIntel 3174eac341d3SJoyce Kong if (port->port_status == RTE_PORT_HANDLING) 3175eac341d3SJoyce Kong port->port_status = RTE_PORT_STOPPED; 3176eac341d3SJoyce Kong else 317761a3b0e5SAndrew Rybchenko fprintf(stderr, "Port %d can not be set into stopped\n", 317861a3b0e5SAndrew Rybchenko pi); 3179ce8d5614SIntel need_check_link_status = 1; 3180ce8d5614SIntel } 3181bc202406SDavid Marchand if (need_check_link_status && !no_link_check) 3182edab33b1STetsuya Mukawa check_all_ports_link_status(RTE_PORT_ALL); 3183ce8d5614SIntel 3184ce8d5614SIntel printf("Done\n"); 3185ce8d5614SIntel } 3186ce8d5614SIntel 3187ce6959bfSWisam Jaddo static void 31884f1de450SThomas Monjalon remove_invalid_ports_in(portid_t *array, portid_t *total) 3189ce6959bfSWisam Jaddo { 31904f1de450SThomas Monjalon portid_t i; 31914f1de450SThomas Monjalon portid_t new_total = 0; 3192ce6959bfSWisam Jaddo 31934f1de450SThomas Monjalon for (i = 0; i < *total; i++) 31944f1de450SThomas Monjalon if (!port_id_is_invalid(array[i], DISABLED_WARN)) { 31954f1de450SThomas Monjalon array[new_total] = array[i]; 31964f1de450SThomas Monjalon new_total++; 3197ce6959bfSWisam Jaddo } 31984f1de450SThomas Monjalon *total = new_total; 31994f1de450SThomas Monjalon } 32004f1de450SThomas Monjalon 32014f1de450SThomas Monjalon static void 32024f1de450SThomas Monjalon remove_invalid_ports(void) 32034f1de450SThomas Monjalon { 32044f1de450SThomas Monjalon remove_invalid_ports_in(ports_ids, &nb_ports); 32054f1de450SThomas Monjalon remove_invalid_ports_in(fwd_ports_ids, &nb_fwd_ports); 32064f1de450SThomas Monjalon nb_cfg_ports = nb_fwd_ports; 3207ce6959bfSWisam Jaddo } 3208ce6959bfSWisam Jaddo 32093889a322SHuisong Li static void 32104b27989dSDmitry Kozlyuk flush_port_owned_resources(portid_t pi) 32114b27989dSDmitry Kozlyuk { 32124b27989dSDmitry Kozlyuk mcast_addr_pool_destroy(pi); 32134b27989dSDmitry Kozlyuk port_flow_flush(pi); 32144b27989dSDmitry Kozlyuk port_flex_item_flush(pi); 32154b27989dSDmitry Kozlyuk port_action_handle_flush(pi); 32164b27989dSDmitry Kozlyuk } 32174b27989dSDmitry Kozlyuk 32184b27989dSDmitry Kozlyuk static void 32193889a322SHuisong Li clear_bonding_slave_device(portid_t *slave_pids, uint16_t num_slaves) 32203889a322SHuisong Li { 32213889a322SHuisong Li struct rte_port *port; 32223889a322SHuisong Li portid_t slave_pid; 32233889a322SHuisong Li uint16_t i; 32243889a322SHuisong Li 32253889a322SHuisong Li for (i = 0; i < num_slaves; i++) { 32263889a322SHuisong Li slave_pid = slave_pids[i]; 32273889a322SHuisong Li if (port_is_started(slave_pid) == 1) { 32283889a322SHuisong Li if (rte_eth_dev_stop(slave_pid) != 0) 32293889a322SHuisong Li fprintf(stderr, "rte_eth_dev_stop failed for port %u\n", 32303889a322SHuisong Li slave_pid); 32313889a322SHuisong Li 32323889a322SHuisong Li port = &ports[slave_pid]; 32333889a322SHuisong Li port->port_status = RTE_PORT_STOPPED; 32343889a322SHuisong Li } 32353889a322SHuisong Li 32363889a322SHuisong Li clear_port_slave_flag(slave_pid); 32373889a322SHuisong Li 32383889a322SHuisong Li /* Close slave device when testpmd quit or is killed. */ 32393889a322SHuisong Li if (cl_quit == 1 || f_quit == 1) 32403889a322SHuisong Li rte_eth_dev_close(slave_pid); 32413889a322SHuisong Li } 32423889a322SHuisong Li } 32433889a322SHuisong Li 3244ce8d5614SIntel void 3245ce8d5614SIntel close_port(portid_t pid) 3246ce8d5614SIntel { 3247ce8d5614SIntel portid_t pi; 3248ce8d5614SIntel struct rte_port *port; 32493889a322SHuisong Li portid_t slave_pids[RTE_MAX_ETHPORTS]; 32503889a322SHuisong Li int num_slaves = 0; 3251ce8d5614SIntel 32524468635fSMichael Qiu if (port_id_is_invalid(pid, ENABLED_WARN)) 32534468635fSMichael Qiu return; 32544468635fSMichael Qiu 3255ce8d5614SIntel printf("Closing ports...\n"); 3256ce8d5614SIntel 32577d89b261SGaetan Rivet RTE_ETH_FOREACH_DEV(pi) { 32584468635fSMichael Qiu if (pid != pi && pid != (portid_t)RTE_PORT_ALL) 3259ce8d5614SIntel continue; 3260ce8d5614SIntel 3261a8ef3e3aSBernard Iremonger if (port_is_forwarding(pi) != 0 && test_done == 0) { 326261a3b0e5SAndrew Rybchenko fprintf(stderr, 326361a3b0e5SAndrew Rybchenko "Please remove port %d from forwarding configuration.\n", 326461a3b0e5SAndrew Rybchenko pi); 3265a8ef3e3aSBernard Iremonger continue; 3266a8ef3e3aSBernard Iremonger } 3267a8ef3e3aSBernard Iremonger 32680e545d30SBernard Iremonger if (port_is_bonding_slave(pi)) { 326961a3b0e5SAndrew Rybchenko fprintf(stderr, 327061a3b0e5SAndrew Rybchenko "Please remove port %d from bonded device.\n", 327161a3b0e5SAndrew Rybchenko pi); 32720e545d30SBernard Iremonger continue; 32730e545d30SBernard Iremonger } 32740e545d30SBernard Iremonger 3275ce8d5614SIntel port = &ports[pi]; 3276eac341d3SJoyce Kong if (port->port_status == RTE_PORT_CLOSED) { 327761a3b0e5SAndrew Rybchenko fprintf(stderr, "Port %d is already closed\n", pi); 3278d4e8ad64SMichael Qiu continue; 3279d4e8ad64SMichael Qiu } 3280d4e8ad64SMichael Qiu 3281a550baf2SMin Hu (Connor) if (is_proc_primary()) { 32824b27989dSDmitry Kozlyuk flush_port_owned_resources(pi); 32833889a322SHuisong Li #ifdef RTE_NET_BOND 32843889a322SHuisong Li if (port->bond_flag == 1) 32853889a322SHuisong Li num_slaves = rte_eth_bond_slaves_get(pi, 32863889a322SHuisong Li slave_pids, RTE_MAX_ETHPORTS); 32873889a322SHuisong Li #endif 3288ce8d5614SIntel rte_eth_dev_close(pi); 32893889a322SHuisong Li /* 32903889a322SHuisong Li * If this port is bonded device, all slaves under the 32913889a322SHuisong Li * device need to be removed or closed. 32923889a322SHuisong Li */ 32933889a322SHuisong Li if (port->bond_flag == 1 && num_slaves > 0) 32943889a322SHuisong Li clear_bonding_slave_device(slave_pids, 32953889a322SHuisong Li num_slaves); 3296ce8d5614SIntel } 329763b72657SIvan Ilchenko 329863b72657SIvan Ilchenko free_xstats_display_info(pi); 3299a550baf2SMin Hu (Connor) } 3300ce8d5614SIntel 330185c6571cSThomas Monjalon remove_invalid_ports(); 3302ce8d5614SIntel printf("Done\n"); 3303ce8d5614SIntel } 3304ce8d5614SIntel 3305edab33b1STetsuya Mukawa void 330697f1e196SWei Dai reset_port(portid_t pid) 330797f1e196SWei Dai { 330897f1e196SWei Dai int diag; 330997f1e196SWei Dai portid_t pi; 331097f1e196SWei Dai struct rte_port *port; 331197f1e196SWei Dai 331297f1e196SWei Dai if (port_id_is_invalid(pid, ENABLED_WARN)) 331397f1e196SWei Dai return; 331497f1e196SWei Dai 33151cde1b9aSShougang Wang if ((pid == (portid_t)RTE_PORT_ALL && !all_ports_stopped()) || 33161cde1b9aSShougang Wang (pid != (portid_t)RTE_PORT_ALL && !port_is_stopped(pid))) { 331761a3b0e5SAndrew Rybchenko fprintf(stderr, 331861a3b0e5SAndrew Rybchenko "Can not reset port(s), please stop port(s) first.\n"); 33191cde1b9aSShougang Wang return; 33201cde1b9aSShougang Wang } 33211cde1b9aSShougang Wang 332297f1e196SWei Dai printf("Resetting ports...\n"); 332397f1e196SWei Dai 332497f1e196SWei Dai RTE_ETH_FOREACH_DEV(pi) { 332597f1e196SWei Dai if (pid != pi && pid != (portid_t)RTE_PORT_ALL) 332697f1e196SWei Dai continue; 332797f1e196SWei Dai 332897f1e196SWei Dai if (port_is_forwarding(pi) != 0 && test_done == 0) { 332961a3b0e5SAndrew Rybchenko fprintf(stderr, 333061a3b0e5SAndrew Rybchenko "Please remove port %d from forwarding configuration.\n", 333161a3b0e5SAndrew Rybchenko pi); 333297f1e196SWei Dai continue; 333397f1e196SWei Dai } 333497f1e196SWei Dai 333597f1e196SWei Dai if (port_is_bonding_slave(pi)) { 333661a3b0e5SAndrew Rybchenko fprintf(stderr, 333761a3b0e5SAndrew Rybchenko "Please remove port %d from bonded device.\n", 333897f1e196SWei Dai pi); 333997f1e196SWei Dai continue; 334097f1e196SWei Dai } 334197f1e196SWei Dai 3342e9351eaaSQiming Yang if (is_proc_primary()) { 334397f1e196SWei Dai diag = rte_eth_dev_reset(pi); 334497f1e196SWei Dai if (diag == 0) { 334597f1e196SWei Dai port = &ports[pi]; 334697f1e196SWei Dai port->need_reconfig = 1; 334797f1e196SWei Dai port->need_reconfig_queues = 1; 334897f1e196SWei Dai } else { 334961a3b0e5SAndrew Rybchenko fprintf(stderr, "Failed to reset port %d. diag=%d\n", 335061a3b0e5SAndrew Rybchenko pi, diag); 335197f1e196SWei Dai } 335297f1e196SWei Dai } 3353e9351eaaSQiming Yang } 335497f1e196SWei Dai 335597f1e196SWei Dai printf("Done\n"); 335697f1e196SWei Dai } 335797f1e196SWei Dai 335897f1e196SWei Dai void 3359edab33b1STetsuya Mukawa attach_port(char *identifier) 3360ce8d5614SIntel { 33614f1ed78eSThomas Monjalon portid_t pi; 3362c9cce428SThomas Monjalon struct rte_dev_iterator iterator; 3363ce8d5614SIntel 3364edab33b1STetsuya Mukawa printf("Attaching a new port...\n"); 3365edab33b1STetsuya Mukawa 3366edab33b1STetsuya Mukawa if (identifier == NULL) { 336761a3b0e5SAndrew Rybchenko fprintf(stderr, "Invalid parameters are specified\n"); 3368edab33b1STetsuya Mukawa return; 3369ce8d5614SIntel } 3370ce8d5614SIntel 337175b66decSIlya Maximets if (rte_dev_probe(identifier) < 0) { 3372c9cce428SThomas Monjalon TESTPMD_LOG(ERR, "Failed to attach port %s\n", identifier); 3373edab33b1STetsuya Mukawa return; 3374c9cce428SThomas Monjalon } 3375c9cce428SThomas Monjalon 33764f1ed78eSThomas Monjalon /* first attach mode: event */ 33774f1ed78eSThomas Monjalon if (setup_on_probe_event) { 33784f1ed78eSThomas Monjalon /* new ports are detected on RTE_ETH_EVENT_NEW event */ 33794f1ed78eSThomas Monjalon for (pi = 0; pi < RTE_MAX_ETHPORTS; pi++) 33804f1ed78eSThomas Monjalon if (ports[pi].port_status == RTE_PORT_HANDLING && 33814f1ed78eSThomas Monjalon ports[pi].need_setup != 0) 33824f1ed78eSThomas Monjalon setup_attached_port(pi); 33834f1ed78eSThomas Monjalon return; 33844f1ed78eSThomas Monjalon } 33854f1ed78eSThomas Monjalon 33864f1ed78eSThomas Monjalon /* second attach mode: iterator */ 338786fa5de1SThomas Monjalon RTE_ETH_FOREACH_MATCHING_DEV(pi, identifier, &iterator) { 33884f1ed78eSThomas Monjalon /* setup ports matching the devargs used for probing */ 338986fa5de1SThomas Monjalon if (port_is_forwarding(pi)) 339086fa5de1SThomas Monjalon continue; /* port was already attached before */ 3391c9cce428SThomas Monjalon setup_attached_port(pi); 3392c9cce428SThomas Monjalon } 339386fa5de1SThomas Monjalon } 3394c9cce428SThomas Monjalon 3395c9cce428SThomas Monjalon static void 3396c9cce428SThomas Monjalon setup_attached_port(portid_t pi) 3397c9cce428SThomas Monjalon { 3398c9cce428SThomas Monjalon unsigned int socket_id; 339934fc1051SIvan Ilchenko int ret; 3400edab33b1STetsuya Mukawa 3401931126baSBernard Iremonger socket_id = (unsigned)rte_eth_dev_socket_id(pi); 340229841336SPhil Yang /* if socket_id is invalid, set to the first available socket. */ 3403931126baSBernard Iremonger if (check_socket_id(socket_id) < 0) 340429841336SPhil Yang socket_id = socket_ids[0]; 3405931126baSBernard Iremonger reconfig(pi, socket_id); 340634fc1051SIvan Ilchenko ret = rte_eth_promiscuous_enable(pi); 340734fc1051SIvan Ilchenko if (ret != 0) 340861a3b0e5SAndrew Rybchenko fprintf(stderr, 340961a3b0e5SAndrew Rybchenko "Error during enabling promiscuous mode for port %u: %s - ignore\n", 341034fc1051SIvan Ilchenko pi, rte_strerror(-ret)); 3411edab33b1STetsuya Mukawa 34124f1de450SThomas Monjalon ports_ids[nb_ports++] = pi; 34134f1de450SThomas Monjalon fwd_ports_ids[nb_fwd_ports++] = pi; 34144f1de450SThomas Monjalon nb_cfg_ports = nb_fwd_ports; 34154f1ed78eSThomas Monjalon ports[pi].need_setup = 0; 3416edab33b1STetsuya Mukawa ports[pi].port_status = RTE_PORT_STOPPED; 3417edab33b1STetsuya Mukawa 3418edab33b1STetsuya Mukawa printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports); 3419edab33b1STetsuya Mukawa printf("Done\n"); 3420edab33b1STetsuya Mukawa } 3421edab33b1STetsuya Mukawa 34220654d4a8SThomas Monjalon static void 34230654d4a8SThomas Monjalon detach_device(struct rte_device *dev) 34245f4ec54fSChen Jing D(Mark) { 3425f8e5baa2SThomas Monjalon portid_t sibling; 3426f8e5baa2SThomas Monjalon 3427f8e5baa2SThomas Monjalon if (dev == NULL) { 342861a3b0e5SAndrew Rybchenko fprintf(stderr, "Device already removed\n"); 3429f8e5baa2SThomas Monjalon return; 3430f8e5baa2SThomas Monjalon } 3431f8e5baa2SThomas Monjalon 34320654d4a8SThomas Monjalon printf("Removing a device...\n"); 3433938a184aSAdrien Mazarguil 34342a449871SThomas Monjalon RTE_ETH_FOREACH_DEV_OF(sibling, dev) { 34352a449871SThomas Monjalon if (ports[sibling].port_status != RTE_PORT_CLOSED) { 34362a449871SThomas Monjalon if (ports[sibling].port_status != RTE_PORT_STOPPED) { 343761a3b0e5SAndrew Rybchenko fprintf(stderr, "Port %u not stopped\n", 343861a3b0e5SAndrew Rybchenko sibling); 34392a449871SThomas Monjalon return; 34402a449871SThomas Monjalon } 34414b27989dSDmitry Kozlyuk flush_port_owned_resources(sibling); 34422a449871SThomas Monjalon } 34432a449871SThomas Monjalon } 34442a449871SThomas Monjalon 344575b66decSIlya Maximets if (rte_dev_remove(dev) < 0) { 3446ec5ecd7eSDavid Marchand TESTPMD_LOG(ERR, "Failed to detach device %s\n", rte_dev_name(dev)); 3447edab33b1STetsuya Mukawa return; 34483070419eSGaetan Rivet } 34494f1de450SThomas Monjalon remove_invalid_ports(); 345003ce2c53SMatan Azrad 34510654d4a8SThomas Monjalon printf("Device is detached\n"); 3452f8e5baa2SThomas Monjalon printf("Now total ports is %d\n", nb_ports); 3453edab33b1STetsuya Mukawa printf("Done\n"); 3454edab33b1STetsuya Mukawa return; 34555f4ec54fSChen Jing D(Mark) } 34565f4ec54fSChen Jing D(Mark) 3457af75078fSIntel void 34580654d4a8SThomas Monjalon detach_port_device(portid_t port_id) 34590654d4a8SThomas Monjalon { 34600a0821bcSPaulis Gributs int ret; 34610a0821bcSPaulis Gributs struct rte_eth_dev_info dev_info; 34620a0821bcSPaulis Gributs 34630654d4a8SThomas Monjalon if (port_id_is_invalid(port_id, ENABLED_WARN)) 34640654d4a8SThomas Monjalon return; 34650654d4a8SThomas Monjalon 34660654d4a8SThomas Monjalon if (ports[port_id].port_status != RTE_PORT_CLOSED) { 34670654d4a8SThomas Monjalon if (ports[port_id].port_status != RTE_PORT_STOPPED) { 346861a3b0e5SAndrew Rybchenko fprintf(stderr, "Port not stopped\n"); 34690654d4a8SThomas Monjalon return; 34700654d4a8SThomas Monjalon } 347161a3b0e5SAndrew Rybchenko fprintf(stderr, "Port was not closed\n"); 34720654d4a8SThomas Monjalon } 34730654d4a8SThomas Monjalon 34740a0821bcSPaulis Gributs ret = eth_dev_info_get_print_err(port_id, &dev_info); 34750a0821bcSPaulis Gributs if (ret != 0) { 34760a0821bcSPaulis Gributs TESTPMD_LOG(ERR, 34770a0821bcSPaulis Gributs "Failed to get device info for port %d, not detaching\n", 34780a0821bcSPaulis Gributs port_id); 34790a0821bcSPaulis Gributs return; 34800a0821bcSPaulis Gributs } 34810a0821bcSPaulis Gributs detach_device(dev_info.device); 34820654d4a8SThomas Monjalon } 34830654d4a8SThomas Monjalon 34840654d4a8SThomas Monjalon void 34855edee5f6SThomas Monjalon detach_devargs(char *identifier) 348655e51c96SNithin Dabilpuram { 348755e51c96SNithin Dabilpuram struct rte_dev_iterator iterator; 348855e51c96SNithin Dabilpuram struct rte_devargs da; 348955e51c96SNithin Dabilpuram portid_t port_id; 349055e51c96SNithin Dabilpuram 349155e51c96SNithin Dabilpuram printf("Removing a device...\n"); 349255e51c96SNithin Dabilpuram 349355e51c96SNithin Dabilpuram memset(&da, 0, sizeof(da)); 349455e51c96SNithin Dabilpuram if (rte_devargs_parsef(&da, "%s", identifier)) { 349561a3b0e5SAndrew Rybchenko fprintf(stderr, "cannot parse identifier\n"); 349655e51c96SNithin Dabilpuram return; 349755e51c96SNithin Dabilpuram } 349855e51c96SNithin Dabilpuram 349955e51c96SNithin Dabilpuram RTE_ETH_FOREACH_MATCHING_DEV(port_id, identifier, &iterator) { 350055e51c96SNithin Dabilpuram if (ports[port_id].port_status != RTE_PORT_CLOSED) { 350155e51c96SNithin Dabilpuram if (ports[port_id].port_status != RTE_PORT_STOPPED) { 350261a3b0e5SAndrew Rybchenko fprintf(stderr, "Port %u not stopped\n", 350361a3b0e5SAndrew Rybchenko port_id); 3504149677c9SStephen Hemminger rte_eth_iterator_cleanup(&iterator); 350564051bb1SXueming Li rte_devargs_reset(&da); 350655e51c96SNithin Dabilpuram return; 350755e51c96SNithin Dabilpuram } 35084b27989dSDmitry Kozlyuk flush_port_owned_resources(port_id); 350955e51c96SNithin Dabilpuram } 351055e51c96SNithin Dabilpuram } 351155e51c96SNithin Dabilpuram 3512148c51a3SDavid Marchand if (rte_eal_hotplug_remove(rte_bus_name(da.bus), da.name) != 0) { 351355e51c96SNithin Dabilpuram TESTPMD_LOG(ERR, "Failed to detach device %s(%s)\n", 3514148c51a3SDavid Marchand da.name, rte_bus_name(da.bus)); 351564051bb1SXueming Li rte_devargs_reset(&da); 351655e51c96SNithin Dabilpuram return; 351755e51c96SNithin Dabilpuram } 351855e51c96SNithin Dabilpuram 351955e51c96SNithin Dabilpuram remove_invalid_ports(); 352055e51c96SNithin Dabilpuram 352155e51c96SNithin Dabilpuram printf("Device %s is detached\n", identifier); 352255e51c96SNithin Dabilpuram printf("Now total ports is %d\n", nb_ports); 352355e51c96SNithin Dabilpuram printf("Done\n"); 352464051bb1SXueming Li rte_devargs_reset(&da); 352555e51c96SNithin Dabilpuram } 352655e51c96SNithin Dabilpuram 352755e51c96SNithin Dabilpuram void 3528af75078fSIntel pmd_test_exit(void) 3529af75078fSIntel { 3530af75078fSIntel portid_t pt_id; 353126cbb419SViacheslav Ovsiienko unsigned int i; 3532fb73e096SJeff Guo int ret; 3533af75078fSIntel 35348210ec25SPablo de Lara if (test_done == 0) 35358210ec25SPablo de Lara stop_packet_forwarding(); 35368210ec25SPablo de Lara 3537761f7ae1SJie Zhou #ifndef RTE_EXEC_ENV_WINDOWS 353826cbb419SViacheslav Ovsiienko for (i = 0 ; i < RTE_DIM(mempools) ; i++) { 35393a0968c8SShahaf Shuler if (mempools[i]) { 35403a0968c8SShahaf Shuler if (mp_alloc_type == MP_ALLOC_ANON) 35413a0968c8SShahaf Shuler rte_mempool_mem_iter(mempools[i], dma_unmap_cb, 35423a0968c8SShahaf Shuler NULL); 35433a0968c8SShahaf Shuler } 35443a0968c8SShahaf Shuler } 3545761f7ae1SJie Zhou #endif 3546d3a274ceSZhihong Wang if (ports != NULL) { 3547d3a274ceSZhihong Wang no_link_check = 1; 35487d89b261SGaetan Rivet RTE_ETH_FOREACH_DEV(pt_id) { 354908fd782bSCristian Dumitrescu printf("\nStopping port %d...\n", pt_id); 3550af75078fSIntel fflush(stdout); 3551d3a274ceSZhihong Wang stop_port(pt_id); 355208fd782bSCristian Dumitrescu } 355308fd782bSCristian Dumitrescu RTE_ETH_FOREACH_DEV(pt_id) { 355408fd782bSCristian Dumitrescu printf("\nShutting down port %d...\n", pt_id); 355508fd782bSCristian Dumitrescu fflush(stdout); 3556d3a274ceSZhihong Wang close_port(pt_id); 3557af75078fSIntel } 3558d3a274ceSZhihong Wang } 3559fb73e096SJeff Guo 3560fb73e096SJeff Guo if (hot_plug) { 3561fb73e096SJeff Guo ret = rte_dev_event_monitor_stop(); 35622049c511SJeff Guo if (ret) { 3563fb73e096SJeff Guo RTE_LOG(ERR, EAL, 3564fb73e096SJeff Guo "fail to stop device event monitor."); 35652049c511SJeff Guo return; 35662049c511SJeff Guo } 3567fb73e096SJeff Guo 35682049c511SJeff Guo ret = rte_dev_event_callback_unregister(NULL, 3569cc1bf307SJeff Guo dev_event_callback, NULL); 35702049c511SJeff Guo if (ret < 0) { 3571fb73e096SJeff Guo RTE_LOG(ERR, EAL, 35722049c511SJeff Guo "fail to unregister device event callback.\n"); 35732049c511SJeff Guo return; 35742049c511SJeff Guo } 35752049c511SJeff Guo 35762049c511SJeff Guo ret = rte_dev_hotplug_handle_disable(); 35772049c511SJeff Guo if (ret) { 35782049c511SJeff Guo RTE_LOG(ERR, EAL, 35792049c511SJeff Guo "fail to disable hotplug handling.\n"); 35802049c511SJeff Guo return; 35812049c511SJeff Guo } 3582fb73e096SJeff Guo } 358326cbb419SViacheslav Ovsiienko for (i = 0 ; i < RTE_DIM(mempools) ; i++) { 3584401b744dSShahaf Shuler if (mempools[i]) 3585a550baf2SMin Hu (Connor) mempool_free_mp(mempools[i]); 3586401b744dSShahaf Shuler } 358763b72657SIvan Ilchenko free(xstats_display); 3588fb73e096SJeff Guo 3589d3a274ceSZhihong Wang printf("\nBye...\n"); 3590af75078fSIntel } 3591af75078fSIntel 3592af75078fSIntel typedef void (*cmd_func_t)(void); 3593af75078fSIntel struct pmd_test_command { 3594af75078fSIntel const char *cmd_name; 3595af75078fSIntel cmd_func_t cmd_func; 3596af75078fSIntel }; 3597af75078fSIntel 3598ce8d5614SIntel /* Check the link status of all ports in up to 9s, and print them finally */ 3599af75078fSIntel static void 3600edab33b1STetsuya Mukawa check_all_ports_link_status(uint32_t port_mask) 3601af75078fSIntel { 3602ce8d5614SIntel #define CHECK_INTERVAL 100 /* 100ms */ 3603ce8d5614SIntel #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */ 3604f8244c63SZhiyong Yang portid_t portid; 3605f8244c63SZhiyong Yang uint8_t count, all_ports_up, print_flag = 0; 3606ce8d5614SIntel struct rte_eth_link link; 3607e661a08bSIgor Romanov int ret; 3608ba5509a6SIvan Dyukov char link_status[RTE_ETH_LINK_MAX_STR_LEN]; 3609ce8d5614SIntel 3610ce8d5614SIntel printf("Checking link statuses...\n"); 3611ce8d5614SIntel fflush(stdout); 3612ce8d5614SIntel for (count = 0; count <= MAX_CHECK_TIME; count++) { 3613ce8d5614SIntel all_ports_up = 1; 36147d89b261SGaetan Rivet RTE_ETH_FOREACH_DEV(portid) { 3615ce8d5614SIntel if ((port_mask & (1 << portid)) == 0) 3616ce8d5614SIntel continue; 3617ce8d5614SIntel memset(&link, 0, sizeof(link)); 3618e661a08bSIgor Romanov ret = rte_eth_link_get_nowait(portid, &link); 3619e661a08bSIgor Romanov if (ret < 0) { 3620e661a08bSIgor Romanov all_ports_up = 0; 3621e661a08bSIgor Romanov if (print_flag == 1) 362261a3b0e5SAndrew Rybchenko fprintf(stderr, 362361a3b0e5SAndrew Rybchenko "Port %u link get failed: %s\n", 3624e661a08bSIgor Romanov portid, rte_strerror(-ret)); 3625e661a08bSIgor Romanov continue; 3626e661a08bSIgor Romanov } 3627ce8d5614SIntel /* print link status if flag set */ 3628ce8d5614SIntel if (print_flag == 1) { 3629ba5509a6SIvan Dyukov rte_eth_link_to_str(link_status, 3630ba5509a6SIvan Dyukov sizeof(link_status), &link); 3631ba5509a6SIvan Dyukov printf("Port %d %s\n", portid, link_status); 3632ce8d5614SIntel continue; 3633ce8d5614SIntel } 3634ce8d5614SIntel /* clear all_ports_up flag if any link down */ 3635295968d1SFerruh Yigit if (link.link_status == RTE_ETH_LINK_DOWN) { 3636ce8d5614SIntel all_ports_up = 0; 3637ce8d5614SIntel break; 3638ce8d5614SIntel } 3639ce8d5614SIntel } 3640ce8d5614SIntel /* after finally printing all link status, get out */ 3641ce8d5614SIntel if (print_flag == 1) 3642ce8d5614SIntel break; 3643ce8d5614SIntel 3644ce8d5614SIntel if (all_ports_up == 0) { 3645ce8d5614SIntel fflush(stdout); 3646ce8d5614SIntel rte_delay_ms(CHECK_INTERVAL); 3647ce8d5614SIntel } 3648ce8d5614SIntel 3649ce8d5614SIntel /* set the print_flag if all ports up or timeout */ 3650ce8d5614SIntel if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) { 3651ce8d5614SIntel print_flag = 1; 3652ce8d5614SIntel } 36538ea656f8SGaetan Rivet 36548ea656f8SGaetan Rivet if (lsc_interrupt) 36558ea656f8SGaetan Rivet break; 3656ce8d5614SIntel } 3657af75078fSIntel } 3658af75078fSIntel 3659284c908cSGaetan Rivet static void 3660cc1bf307SJeff Guo rmv_port_callback(void *arg) 3661284c908cSGaetan Rivet { 36623b97888aSMatan Azrad int need_to_start = 0; 36630da2a62bSMatan Azrad int org_no_link_check = no_link_check; 366428caa76aSZhiyong Yang portid_t port_id = (intptr_t)arg; 36650a0821bcSPaulis Gributs struct rte_eth_dev_info dev_info; 36660a0821bcSPaulis Gributs int ret; 3667284c908cSGaetan Rivet 3668284c908cSGaetan Rivet RTE_ETH_VALID_PORTID_OR_RET(port_id); 3669284c908cSGaetan Rivet 36703b97888aSMatan Azrad if (!test_done && port_is_forwarding(port_id)) { 36713b97888aSMatan Azrad need_to_start = 1; 36723b97888aSMatan Azrad stop_packet_forwarding(); 36733b97888aSMatan Azrad } 36740da2a62bSMatan Azrad no_link_check = 1; 3675284c908cSGaetan Rivet stop_port(port_id); 36760da2a62bSMatan Azrad no_link_check = org_no_link_check; 36770654d4a8SThomas Monjalon 36780a0821bcSPaulis Gributs ret = eth_dev_info_get_print_err(port_id, &dev_info); 36790a0821bcSPaulis Gributs if (ret != 0) 36800a0821bcSPaulis Gributs TESTPMD_LOG(ERR, 36810a0821bcSPaulis Gributs "Failed to get device info for port %d, not detaching\n", 36820a0821bcSPaulis Gributs port_id); 3683e1d38504SPaulis Gributs else { 3684e1d38504SPaulis Gributs struct rte_device *device = dev_info.device; 3685e1d38504SPaulis Gributs close_port(port_id); 3686e1d38504SPaulis Gributs detach_device(device); /* might be already removed or have more ports */ 3687e1d38504SPaulis Gributs } 36883b97888aSMatan Azrad if (need_to_start) 36893b97888aSMatan Azrad start_packet_forwarding(0); 3690284c908cSGaetan Rivet } 3691284c908cSGaetan Rivet 369276ad4a2dSGaetan Rivet /* This function is used by the interrupt thread */ 3693d6af1a13SBernard Iremonger static int 3694f8244c63SZhiyong Yang eth_event_callback(portid_t port_id, enum rte_eth_event_type type, void *param, 3695d6af1a13SBernard Iremonger void *ret_param) 369676ad4a2dSGaetan Rivet { 369776ad4a2dSGaetan Rivet RTE_SET_USED(param); 3698d6af1a13SBernard Iremonger RTE_SET_USED(ret_param); 369976ad4a2dSGaetan Rivet 370076ad4a2dSGaetan Rivet if (type >= RTE_ETH_EVENT_MAX) { 370161a3b0e5SAndrew Rybchenko fprintf(stderr, 370261a3b0e5SAndrew Rybchenko "\nPort %" PRIu16 ": %s called upon invalid event %d\n", 370376ad4a2dSGaetan Rivet port_id, __func__, type); 370476ad4a2dSGaetan Rivet fflush(stderr); 37053af72783SGaetan Rivet } else if (event_print_mask & (UINT32_C(1) << type)) { 3706f431e010SHerakliusz Lipiec printf("\nPort %" PRIu16 ": %s event\n", port_id, 370797b5d8b5SThomas Monjalon eth_event_desc[type]); 370876ad4a2dSGaetan Rivet fflush(stdout); 370976ad4a2dSGaetan Rivet } 3710284c908cSGaetan Rivet 3711284c908cSGaetan Rivet switch (type) { 37124f1ed78eSThomas Monjalon case RTE_ETH_EVENT_NEW: 37134f1ed78eSThomas Monjalon ports[port_id].need_setup = 1; 37144f1ed78eSThomas Monjalon ports[port_id].port_status = RTE_PORT_HANDLING; 37154f1ed78eSThomas Monjalon break; 3716284c908cSGaetan Rivet case RTE_ETH_EVENT_INTR_RMV: 37174f1ed78eSThomas Monjalon if (port_id_is_invalid(port_id, DISABLED_WARN)) 37184f1ed78eSThomas Monjalon break; 3719284c908cSGaetan Rivet if (rte_eal_alarm_set(100000, 3720cc1bf307SJeff Guo rmv_port_callback, (void *)(intptr_t)port_id)) 372161a3b0e5SAndrew Rybchenko fprintf(stderr, 372261a3b0e5SAndrew Rybchenko "Could not set up deferred device removal\n"); 3723284c908cSGaetan Rivet break; 372485c6571cSThomas Monjalon case RTE_ETH_EVENT_DESTROY: 372585c6571cSThomas Monjalon ports[port_id].port_status = RTE_PORT_CLOSED; 372685c6571cSThomas Monjalon printf("Port %u is closed\n", port_id); 372785c6571cSThomas Monjalon break; 3728bc70e559SSpike Du case RTE_ETH_EVENT_RX_AVAIL_THRESH: { 3729bc70e559SSpike Du uint16_t rxq_id; 3730bc70e559SSpike Du int ret; 3731bc70e559SSpike Du 3732bc70e559SSpike Du /* avail_thresh query API rewinds rxq_id, no need to check max RxQ num */ 3733bc70e559SSpike Du for (rxq_id = 0; ; rxq_id++) { 3734bc70e559SSpike Du ret = rte_eth_rx_avail_thresh_query(port_id, &rxq_id, 3735bc70e559SSpike Du NULL); 3736bc70e559SSpike Du if (ret <= 0) 3737bc70e559SSpike Du break; 3738bc70e559SSpike Du printf("Received avail_thresh event, port: %u, rxq_id: %u\n", 3739bc70e559SSpike Du port_id, rxq_id); 3740f41a5092SSpike Du 3741f41a5092SSpike Du #ifdef RTE_NET_MLX5 3742f41a5092SSpike Du mlx5_test_avail_thresh_event_handler(port_id, rxq_id); 3743f41a5092SSpike Du #endif 3744bc70e559SSpike Du } 3745bc70e559SSpike Du break; 3746bc70e559SSpike Du } 3747284c908cSGaetan Rivet default: 3748284c908cSGaetan Rivet break; 3749284c908cSGaetan Rivet } 3750d6af1a13SBernard Iremonger return 0; 375176ad4a2dSGaetan Rivet } 375276ad4a2dSGaetan Rivet 375397b5d8b5SThomas Monjalon static int 375497b5d8b5SThomas Monjalon register_eth_event_callback(void) 375597b5d8b5SThomas Monjalon { 375697b5d8b5SThomas Monjalon int ret; 375797b5d8b5SThomas Monjalon enum rte_eth_event_type event; 375897b5d8b5SThomas Monjalon 375997b5d8b5SThomas Monjalon for (event = RTE_ETH_EVENT_UNKNOWN; 376097b5d8b5SThomas Monjalon event < RTE_ETH_EVENT_MAX; event++) { 376197b5d8b5SThomas Monjalon ret = rte_eth_dev_callback_register(RTE_ETH_ALL, 376297b5d8b5SThomas Monjalon event, 376397b5d8b5SThomas Monjalon eth_event_callback, 376497b5d8b5SThomas Monjalon NULL); 376597b5d8b5SThomas Monjalon if (ret != 0) { 376697b5d8b5SThomas Monjalon TESTPMD_LOG(ERR, "Failed to register callback for " 376797b5d8b5SThomas Monjalon "%s event\n", eth_event_desc[event]); 376897b5d8b5SThomas Monjalon return -1; 376997b5d8b5SThomas Monjalon } 377097b5d8b5SThomas Monjalon } 377197b5d8b5SThomas Monjalon 377297b5d8b5SThomas Monjalon return 0; 377397b5d8b5SThomas Monjalon } 377497b5d8b5SThomas Monjalon 3775fb73e096SJeff Guo /* This function is used by the interrupt thread */ 3776fb73e096SJeff Guo static void 3777cc1bf307SJeff Guo dev_event_callback(const char *device_name, enum rte_dev_event_type type, 3778fb73e096SJeff Guo __rte_unused void *arg) 3779fb73e096SJeff Guo { 37802049c511SJeff Guo uint16_t port_id; 37812049c511SJeff Guo int ret; 37822049c511SJeff Guo 3783fb73e096SJeff Guo if (type >= RTE_DEV_EVENT_MAX) { 3784fb73e096SJeff Guo fprintf(stderr, "%s called upon invalid event %d\n", 3785fb73e096SJeff Guo __func__, type); 3786fb73e096SJeff Guo fflush(stderr); 3787fb73e096SJeff Guo } 3788fb73e096SJeff Guo 3789fb73e096SJeff Guo switch (type) { 3790fb73e096SJeff Guo case RTE_DEV_EVENT_REMOVE: 3791cc1bf307SJeff Guo RTE_LOG(DEBUG, EAL, "The device: %s has been removed!\n", 3792fb73e096SJeff Guo device_name); 37932049c511SJeff Guo ret = rte_eth_dev_get_port_by_name(device_name, &port_id); 37942049c511SJeff Guo if (ret) { 37952049c511SJeff Guo RTE_LOG(ERR, EAL, "can not get port by device %s!\n", 37962049c511SJeff Guo device_name); 37972049c511SJeff Guo return; 37982049c511SJeff Guo } 3799cc1bf307SJeff Guo /* 3800cc1bf307SJeff Guo * Because the user's callback is invoked in eal interrupt 3801cc1bf307SJeff Guo * callback, the interrupt callback need to be finished before 3802cc1bf307SJeff Guo * it can be unregistered when detaching device. So finish 3803cc1bf307SJeff Guo * callback soon and use a deferred removal to detach device 3804cc1bf307SJeff Guo * is need. It is a workaround, once the device detaching be 3805cc1bf307SJeff Guo * moved into the eal in the future, the deferred removal could 3806cc1bf307SJeff Guo * be deleted. 3807cc1bf307SJeff Guo */ 3808cc1bf307SJeff Guo if (rte_eal_alarm_set(100000, 3809cc1bf307SJeff Guo rmv_port_callback, (void *)(intptr_t)port_id)) 3810cc1bf307SJeff Guo RTE_LOG(ERR, EAL, 3811cc1bf307SJeff Guo "Could not set up deferred device removal\n"); 3812fb73e096SJeff Guo break; 3813fb73e096SJeff Guo case RTE_DEV_EVENT_ADD: 3814fb73e096SJeff Guo RTE_LOG(ERR, EAL, "The device: %s has been added!\n", 3815fb73e096SJeff Guo device_name); 3816fb73e096SJeff Guo /* TODO: After finish kernel driver binding, 3817fb73e096SJeff Guo * begin to attach port. 3818fb73e096SJeff Guo */ 3819fb73e096SJeff Guo break; 3820fb73e096SJeff Guo default: 3821fb73e096SJeff Guo break; 3822fb73e096SJeff Guo } 3823fb73e096SJeff Guo } 3824fb73e096SJeff Guo 3825f2c5125aSPablo de Lara static void 3826f4d178c1SXueming Li rxtx_port_config(portid_t pid) 3827f2c5125aSPablo de Lara { 3828d44f8a48SQi Zhang uint16_t qid; 38295e91aeefSWei Zhao uint64_t offloads; 3830f4d178c1SXueming Li struct rte_port *port = &ports[pid]; 3831f2c5125aSPablo de Lara 3832d44f8a48SQi Zhang for (qid = 0; qid < nb_rxq; qid++) { 38333c4426dbSDmitry Kozlyuk offloads = port->rxq[qid].conf.offloads; 38343c4426dbSDmitry Kozlyuk port->rxq[qid].conf = port->dev_info.default_rxconf; 3835f4d178c1SXueming Li 3836f4d178c1SXueming Li if (rxq_share > 0 && 3837f4d178c1SXueming Li (port->dev_info.dev_capa & RTE_ETH_DEV_CAPA_RXQ_SHARE)) { 3838f4d178c1SXueming Li /* Non-zero share group to enable RxQ share. */ 38393c4426dbSDmitry Kozlyuk port->rxq[qid].conf.share_group = pid / rxq_share + 1; 38403c4426dbSDmitry Kozlyuk port->rxq[qid].conf.share_qid = qid; /* Equal mapping. */ 3841f4d178c1SXueming Li } 3842f4d178c1SXueming Li 3843575e0fd1SWei Zhao if (offloads != 0) 38443c4426dbSDmitry Kozlyuk port->rxq[qid].conf.offloads = offloads; 3845d44f8a48SQi Zhang 3846d44f8a48SQi Zhang /* Check if any Rx parameters have been passed */ 3847f2c5125aSPablo de Lara if (rx_pthresh != RTE_PMD_PARAM_UNSET) 38483c4426dbSDmitry Kozlyuk port->rxq[qid].conf.rx_thresh.pthresh = rx_pthresh; 3849f2c5125aSPablo de Lara 3850f2c5125aSPablo de Lara if (rx_hthresh != RTE_PMD_PARAM_UNSET) 38513c4426dbSDmitry Kozlyuk port->rxq[qid].conf.rx_thresh.hthresh = rx_hthresh; 3852f2c5125aSPablo de Lara 3853f2c5125aSPablo de Lara if (rx_wthresh != RTE_PMD_PARAM_UNSET) 38543c4426dbSDmitry Kozlyuk port->rxq[qid].conf.rx_thresh.wthresh = rx_wthresh; 3855f2c5125aSPablo de Lara 3856f2c5125aSPablo de Lara if (rx_free_thresh != RTE_PMD_PARAM_UNSET) 38573c4426dbSDmitry Kozlyuk port->rxq[qid].conf.rx_free_thresh = rx_free_thresh; 3858f2c5125aSPablo de Lara 3859f2c5125aSPablo de Lara if (rx_drop_en != RTE_PMD_PARAM_UNSET) 38603c4426dbSDmitry Kozlyuk port->rxq[qid].conf.rx_drop_en = rx_drop_en; 3861f2c5125aSPablo de Lara 3862d44f8a48SQi Zhang port->nb_rx_desc[qid] = nb_rxd; 3863d44f8a48SQi Zhang } 3864d44f8a48SQi Zhang 3865d44f8a48SQi Zhang for (qid = 0; qid < nb_txq; qid++) { 38663c4426dbSDmitry Kozlyuk offloads = port->txq[qid].conf.offloads; 38673c4426dbSDmitry Kozlyuk port->txq[qid].conf = port->dev_info.default_txconf; 3868575e0fd1SWei Zhao if (offloads != 0) 38693c4426dbSDmitry Kozlyuk port->txq[qid].conf.offloads = offloads; 3870d44f8a48SQi Zhang 3871d44f8a48SQi Zhang /* Check if any Tx parameters have been passed */ 3872f2c5125aSPablo de Lara if (tx_pthresh != RTE_PMD_PARAM_UNSET) 38733c4426dbSDmitry Kozlyuk port->txq[qid].conf.tx_thresh.pthresh = tx_pthresh; 3874f2c5125aSPablo de Lara 3875f2c5125aSPablo de Lara if (tx_hthresh != RTE_PMD_PARAM_UNSET) 38763c4426dbSDmitry Kozlyuk port->txq[qid].conf.tx_thresh.hthresh = tx_hthresh; 3877f2c5125aSPablo de Lara 3878f2c5125aSPablo de Lara if (tx_wthresh != RTE_PMD_PARAM_UNSET) 38793c4426dbSDmitry Kozlyuk port->txq[qid].conf.tx_thresh.wthresh = tx_wthresh; 3880f2c5125aSPablo de Lara 3881f2c5125aSPablo de Lara if (tx_rs_thresh != RTE_PMD_PARAM_UNSET) 38823c4426dbSDmitry Kozlyuk port->txq[qid].conf.tx_rs_thresh = tx_rs_thresh; 3883f2c5125aSPablo de Lara 3884f2c5125aSPablo de Lara if (tx_free_thresh != RTE_PMD_PARAM_UNSET) 38853c4426dbSDmitry Kozlyuk port->txq[qid].conf.tx_free_thresh = tx_free_thresh; 3886d44f8a48SQi Zhang 3887d44f8a48SQi Zhang port->nb_tx_desc[qid] = nb_txd; 3888d44f8a48SQi Zhang } 3889f2c5125aSPablo de Lara } 3890f2c5125aSPablo de Lara 38910c4abd36SSteve Yang /* 3892b563c142SFerruh Yigit * Helper function to set MTU from frame size 38930c4abd36SSteve Yang * 38940c4abd36SSteve Yang * port->dev_info should be set before calling this function. 38950c4abd36SSteve Yang * 38960c4abd36SSteve Yang * return 0 on success, negative on error 38970c4abd36SSteve Yang */ 38980c4abd36SSteve Yang int 3899b563c142SFerruh Yigit update_mtu_from_frame_size(portid_t portid, uint32_t max_rx_pktlen) 39000c4abd36SSteve Yang { 39010c4abd36SSteve Yang struct rte_port *port = &ports[portid]; 39020c4abd36SSteve Yang uint32_t eth_overhead; 39031bb4a528SFerruh Yigit uint16_t mtu, new_mtu; 39040c4abd36SSteve Yang 39051bb4a528SFerruh Yigit eth_overhead = get_eth_overhead(&port->dev_info); 39061bb4a528SFerruh Yigit 39071bb4a528SFerruh Yigit if (rte_eth_dev_get_mtu(portid, &mtu) != 0) { 39081bb4a528SFerruh Yigit printf("Failed to get MTU for port %u\n", portid); 39091bb4a528SFerruh Yigit return -1; 39101bb4a528SFerruh Yigit } 39111bb4a528SFerruh Yigit 39121bb4a528SFerruh Yigit new_mtu = max_rx_pktlen - eth_overhead; 39130c4abd36SSteve Yang 39141bb4a528SFerruh Yigit if (mtu == new_mtu) 39151bb4a528SFerruh Yigit return 0; 39161bb4a528SFerruh Yigit 39171bb4a528SFerruh Yigit if (eth_dev_set_mtu_mp(portid, new_mtu) != 0) { 391861a3b0e5SAndrew Rybchenko fprintf(stderr, 391961a3b0e5SAndrew Rybchenko "Failed to set MTU to %u for port %u\n", 39201bb4a528SFerruh Yigit new_mtu, portid); 39211bb4a528SFerruh Yigit return -1; 39220c4abd36SSteve Yang } 39230c4abd36SSteve Yang 39241bb4a528SFerruh Yigit port->dev_conf.rxmode.mtu = new_mtu; 39251bb4a528SFerruh Yigit 39260c4abd36SSteve Yang return 0; 39270c4abd36SSteve Yang } 39280c4abd36SSteve Yang 3929013af9b6SIntel void 3930013af9b6SIntel init_port_config(void) 3931013af9b6SIntel { 3932013af9b6SIntel portid_t pid; 3933013af9b6SIntel struct rte_port *port; 3934655eae01SJie Wang int ret, i; 3935013af9b6SIntel 39367d89b261SGaetan Rivet RTE_ETH_FOREACH_DEV(pid) { 3937013af9b6SIntel port = &ports[pid]; 39386f51deb9SIvan Ilchenko 39396f51deb9SIvan Ilchenko ret = eth_dev_info_get_print_err(pid, &port->dev_info); 39406f51deb9SIvan Ilchenko if (ret != 0) 39416f51deb9SIvan Ilchenko return; 39426f51deb9SIvan Ilchenko 39433ce690d3SBruce Richardson if (nb_rxq > 1) { 3944013af9b6SIntel port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL; 394590892962SQi Zhang port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 3946422515b9SAdrien Mazarguil rss_hf & port->dev_info.flow_type_rss_offloads; 3947af75078fSIntel } else { 3948013af9b6SIntel port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL; 3949013af9b6SIntel port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0; 3950af75078fSIntel } 39513ce690d3SBruce Richardson 39525f592039SJingjing Wu if (port->dcb_flag == 0) { 3953655eae01SJie Wang if (port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0) { 3954f9295aa2SXiaoyu Min port->dev_conf.rxmode.mq_mode = 3955f9295aa2SXiaoyu Min (enum rte_eth_rx_mq_mode) 3956295968d1SFerruh Yigit (rx_mq_mode & RTE_ETH_MQ_RX_RSS); 3957655eae01SJie Wang } else { 3958295968d1SFerruh Yigit port->dev_conf.rxmode.mq_mode = RTE_ETH_MQ_RX_NONE; 3959655eae01SJie Wang port->dev_conf.rxmode.offloads &= 3960295968d1SFerruh Yigit ~RTE_ETH_RX_OFFLOAD_RSS_HASH; 3961655eae01SJie Wang 3962655eae01SJie Wang for (i = 0; 3963655eae01SJie Wang i < port->dev_info.nb_rx_queues; 3964655eae01SJie Wang i++) 39653c4426dbSDmitry Kozlyuk port->rxq[i].conf.offloads &= 3966295968d1SFerruh Yigit ~RTE_ETH_RX_OFFLOAD_RSS_HASH; 3967655eae01SJie Wang } 39683ce690d3SBruce Richardson } 39693ce690d3SBruce Richardson 3970f4d178c1SXueming Li rxtx_port_config(pid); 3971013af9b6SIntel 3972a5279d25SIgor Romanov ret = eth_macaddr_get_print_err(pid, &port->eth_addr); 3973a5279d25SIgor Romanov if (ret != 0) 3974a5279d25SIgor Romanov return; 3975013af9b6SIntel 39760a0821bcSPaulis Gributs if (lsc_interrupt && (*port->dev_info.dev_flags & RTE_ETH_DEV_INTR_LSC)) 39778ea656f8SGaetan Rivet port->dev_conf.intr_conf.lsc = 1; 39780a0821bcSPaulis Gributs if (rmv_interrupt && (*port->dev_info.dev_flags & RTE_ETH_DEV_INTR_RMV)) 3979284c908cSGaetan Rivet port->dev_conf.intr_conf.rmv = 1; 3980013af9b6SIntel } 3981013af9b6SIntel } 3982013af9b6SIntel 398341b05095SBernard Iremonger void set_port_slave_flag(portid_t slave_pid) 398441b05095SBernard Iremonger { 398541b05095SBernard Iremonger struct rte_port *port; 398641b05095SBernard Iremonger 398741b05095SBernard Iremonger port = &ports[slave_pid]; 398841b05095SBernard Iremonger port->slave_flag = 1; 398941b05095SBernard Iremonger } 399041b05095SBernard Iremonger 399141b05095SBernard Iremonger void clear_port_slave_flag(portid_t slave_pid) 399241b05095SBernard Iremonger { 399341b05095SBernard Iremonger struct rte_port *port; 399441b05095SBernard Iremonger 399541b05095SBernard Iremonger port = &ports[slave_pid]; 399641b05095SBernard Iremonger port->slave_flag = 0; 399741b05095SBernard Iremonger } 399841b05095SBernard Iremonger 39990e545d30SBernard Iremonger uint8_t port_is_bonding_slave(portid_t slave_pid) 40000e545d30SBernard Iremonger { 40010e545d30SBernard Iremonger struct rte_port *port; 40020a0821bcSPaulis Gributs struct rte_eth_dev_info dev_info; 40030a0821bcSPaulis Gributs int ret; 40040e545d30SBernard Iremonger 40050e545d30SBernard Iremonger port = &ports[slave_pid]; 40060a0821bcSPaulis Gributs ret = eth_dev_info_get_print_err(slave_pid, &dev_info); 40070a0821bcSPaulis Gributs if (ret != 0) { 40080a0821bcSPaulis Gributs TESTPMD_LOG(ERR, 40090a0821bcSPaulis Gributs "Failed to get device info for port id %d," 40100a0821bcSPaulis Gributs "cannot determine if the port is a bonded slave", 40110a0821bcSPaulis Gributs slave_pid); 40120a0821bcSPaulis Gributs return 0; 40130a0821bcSPaulis Gributs } 40140a0821bcSPaulis Gributs if ((*dev_info.dev_flags & RTE_ETH_DEV_BONDED_SLAVE) || (port->slave_flag == 1)) 4015b8b8b344SMatan Azrad return 1; 4016b8b8b344SMatan Azrad return 0; 40170e545d30SBernard Iremonger } 40180e545d30SBernard Iremonger 4019013af9b6SIntel const uint16_t vlan_tags[] = { 4020013af9b6SIntel 0, 1, 2, 3, 4, 5, 6, 7, 4021013af9b6SIntel 8, 9, 10, 11, 12, 13, 14, 15, 4022013af9b6SIntel 16, 17, 18, 19, 20, 21, 22, 23, 4023013af9b6SIntel 24, 25, 26, 27, 28, 29, 30, 31 4024013af9b6SIntel }; 4025013af9b6SIntel 4026013af9b6SIntel static int 4027ac7c491cSKonstantin Ananyev get_eth_dcb_conf(portid_t pid, struct rte_eth_conf *eth_conf, 40281a572499SJingjing Wu enum dcb_mode_enable dcb_mode, 40291a572499SJingjing Wu enum rte_eth_nb_tcs num_tcs, 40301a572499SJingjing Wu uint8_t pfc_en) 4031013af9b6SIntel { 4032013af9b6SIntel uint8_t i; 4033ac7c491cSKonstantin Ananyev int32_t rc; 4034ac7c491cSKonstantin Ananyev struct rte_eth_rss_conf rss_conf; 4035af75078fSIntel 4036af75078fSIntel /* 4037013af9b6SIntel * Builds up the correct configuration for dcb+vt based on the vlan tags array 4038013af9b6SIntel * given above, and the number of traffic classes available for use. 4039af75078fSIntel */ 40401a572499SJingjing Wu if (dcb_mode == DCB_VT_ENABLED) { 40411a572499SJingjing Wu struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf = 40421a572499SJingjing Wu ð_conf->rx_adv_conf.vmdq_dcb_conf; 40431a572499SJingjing Wu struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf = 40441a572499SJingjing Wu ð_conf->tx_adv_conf.vmdq_dcb_tx_conf; 4045013af9b6SIntel 4046547d946cSNirmoy Das /* VMDQ+DCB RX and TX configurations */ 40471a572499SJingjing Wu vmdq_rx_conf->enable_default_pool = 0; 40481a572499SJingjing Wu vmdq_rx_conf->default_pool = 0; 40491a572499SJingjing Wu vmdq_rx_conf->nb_queue_pools = 4050295968d1SFerruh Yigit (num_tcs == RTE_ETH_4_TCS ? RTE_ETH_32_POOLS : RTE_ETH_16_POOLS); 40511a572499SJingjing Wu vmdq_tx_conf->nb_queue_pools = 4052295968d1SFerruh Yigit (num_tcs == RTE_ETH_4_TCS ? RTE_ETH_32_POOLS : RTE_ETH_16_POOLS); 4053013af9b6SIntel 40541a572499SJingjing Wu vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools; 40551a572499SJingjing Wu for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) { 40561a572499SJingjing Wu vmdq_rx_conf->pool_map[i].vlan_id = vlan_tags[i]; 40571a572499SJingjing Wu vmdq_rx_conf->pool_map[i].pools = 40581a572499SJingjing Wu 1 << (i % vmdq_rx_conf->nb_queue_pools); 4059af75078fSIntel } 4060295968d1SFerruh Yigit for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++) { 4061f59908feSWei Dai vmdq_rx_conf->dcb_tc[i] = i % num_tcs; 4062f59908feSWei Dai vmdq_tx_conf->dcb_tc[i] = i % num_tcs; 4063013af9b6SIntel } 4064013af9b6SIntel 4065013af9b6SIntel /* set DCB mode of RX and TX of multiple queues */ 4066f9295aa2SXiaoyu Min eth_conf->rxmode.mq_mode = 4067f9295aa2SXiaoyu Min (enum rte_eth_rx_mq_mode) 4068295968d1SFerruh Yigit (rx_mq_mode & RTE_ETH_MQ_RX_VMDQ_DCB); 4069295968d1SFerruh Yigit eth_conf->txmode.mq_mode = RTE_ETH_MQ_TX_VMDQ_DCB; 40701a572499SJingjing Wu } else { 40711a572499SJingjing Wu struct rte_eth_dcb_rx_conf *rx_conf = 40721a572499SJingjing Wu ð_conf->rx_adv_conf.dcb_rx_conf; 40731a572499SJingjing Wu struct rte_eth_dcb_tx_conf *tx_conf = 40741a572499SJingjing Wu ð_conf->tx_adv_conf.dcb_tx_conf; 4075013af9b6SIntel 40765139bc12STing Xu memset(&rss_conf, 0, sizeof(struct rte_eth_rss_conf)); 40775139bc12STing Xu 4078ac7c491cSKonstantin Ananyev rc = rte_eth_dev_rss_hash_conf_get(pid, &rss_conf); 4079ac7c491cSKonstantin Ananyev if (rc != 0) 4080ac7c491cSKonstantin Ananyev return rc; 4081ac7c491cSKonstantin Ananyev 40821a572499SJingjing Wu rx_conf->nb_tcs = num_tcs; 40831a572499SJingjing Wu tx_conf->nb_tcs = num_tcs; 40841a572499SJingjing Wu 4085295968d1SFerruh Yigit for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++) { 4086bcd0e432SJingjing Wu rx_conf->dcb_tc[i] = i % num_tcs; 4087bcd0e432SJingjing Wu tx_conf->dcb_tc[i] = i % num_tcs; 4088013af9b6SIntel } 4089ac7c491cSKonstantin Ananyev 4090f9295aa2SXiaoyu Min eth_conf->rxmode.mq_mode = 4091f9295aa2SXiaoyu Min (enum rte_eth_rx_mq_mode) 4092295968d1SFerruh Yigit (rx_mq_mode & RTE_ETH_MQ_RX_DCB_RSS); 4093ac7c491cSKonstantin Ananyev eth_conf->rx_adv_conf.rss_conf = rss_conf; 4094295968d1SFerruh Yigit eth_conf->txmode.mq_mode = RTE_ETH_MQ_TX_DCB; 40951a572499SJingjing Wu } 40961a572499SJingjing Wu 40971a572499SJingjing Wu if (pfc_en) 40981a572499SJingjing Wu eth_conf->dcb_capability_en = 4099295968d1SFerruh Yigit RTE_ETH_DCB_PG_SUPPORT | RTE_ETH_DCB_PFC_SUPPORT; 4100013af9b6SIntel else 4101295968d1SFerruh Yigit eth_conf->dcb_capability_en = RTE_ETH_DCB_PG_SUPPORT; 4102013af9b6SIntel 4103013af9b6SIntel return 0; 4104013af9b6SIntel } 4105013af9b6SIntel 4106013af9b6SIntel int 41071a572499SJingjing Wu init_port_dcb_config(portid_t pid, 41081a572499SJingjing Wu enum dcb_mode_enable dcb_mode, 41091a572499SJingjing Wu enum rte_eth_nb_tcs num_tcs, 41101a572499SJingjing Wu uint8_t pfc_en) 4111013af9b6SIntel { 4112013af9b6SIntel struct rte_eth_conf port_conf; 4113013af9b6SIntel struct rte_port *rte_port; 4114013af9b6SIntel int retval; 4115013af9b6SIntel uint16_t i; 4116013af9b6SIntel 4117a550baf2SMin Hu (Connor) if (num_procs > 1) { 4118a550baf2SMin Hu (Connor) printf("The multi-process feature doesn't support dcb.\n"); 4119a550baf2SMin Hu (Connor) return -ENOTSUP; 4120a550baf2SMin Hu (Connor) } 41212a977b89SWenzhuo Lu rte_port = &ports[pid]; 4122013af9b6SIntel 4123c1ba6c32SHuisong Li /* retain the original device configuration. */ 4124c1ba6c32SHuisong Li memcpy(&port_conf, &rte_port->dev_conf, sizeof(struct rte_eth_conf)); 4125d5354e89SYanglong Wu 4126013af9b6SIntel /*set configuration of DCB in vt mode and DCB in non-vt mode*/ 4127ac7c491cSKonstantin Ananyev retval = get_eth_dcb_conf(pid, &port_conf, dcb_mode, num_tcs, pfc_en); 4128013af9b6SIntel if (retval < 0) 4129013af9b6SIntel return retval; 4130295968d1SFerruh Yigit port_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_VLAN_FILTER; 4131cbe70fdeSJie Wang /* remove RSS HASH offload for DCB in vt mode */ 4132cbe70fdeSJie Wang if (port_conf.rxmode.mq_mode == RTE_ETH_MQ_RX_VMDQ_DCB) { 4133cbe70fdeSJie Wang port_conf.rxmode.offloads &= ~RTE_ETH_RX_OFFLOAD_RSS_HASH; 4134cbe70fdeSJie Wang for (i = 0; i < nb_rxq; i++) 41353c4426dbSDmitry Kozlyuk rte_port->rxq[i].conf.offloads &= 4136cbe70fdeSJie Wang ~RTE_ETH_RX_OFFLOAD_RSS_HASH; 4137cbe70fdeSJie Wang } 4138013af9b6SIntel 41392f203d44SQi Zhang /* re-configure the device . */ 41402b0e0ebaSChenbo Xia retval = rte_eth_dev_configure(pid, nb_rxq, nb_rxq, &port_conf); 41412b0e0ebaSChenbo Xia if (retval < 0) 41422b0e0ebaSChenbo Xia return retval; 41436f51deb9SIvan Ilchenko 41446f51deb9SIvan Ilchenko retval = eth_dev_info_get_print_err(pid, &rte_port->dev_info); 41456f51deb9SIvan Ilchenko if (retval != 0) 41466f51deb9SIvan Ilchenko return retval; 41472a977b89SWenzhuo Lu 41482a977b89SWenzhuo Lu /* If dev_info.vmdq_pool_base is greater than 0, 41492a977b89SWenzhuo Lu * the queue id of vmdq pools is started after pf queues. 41502a977b89SWenzhuo Lu */ 41512a977b89SWenzhuo Lu if (dcb_mode == DCB_VT_ENABLED && 41522a977b89SWenzhuo Lu rte_port->dev_info.vmdq_pool_base > 0) { 415361a3b0e5SAndrew Rybchenko fprintf(stderr, 415461a3b0e5SAndrew Rybchenko "VMDQ_DCB multi-queue mode is nonsensical for port %d.\n", 415561a3b0e5SAndrew Rybchenko pid); 41562a977b89SWenzhuo Lu return -1; 41572a977b89SWenzhuo Lu } 41582a977b89SWenzhuo Lu 41592a977b89SWenzhuo Lu /* Assume the ports in testpmd have the same dcb capability 41602a977b89SWenzhuo Lu * and has the same number of rxq and txq in dcb mode 41612a977b89SWenzhuo Lu */ 41622a977b89SWenzhuo Lu if (dcb_mode == DCB_VT_ENABLED) { 416386ef65eeSBernard Iremonger if (rte_port->dev_info.max_vfs > 0) { 416486ef65eeSBernard Iremonger nb_rxq = rte_port->dev_info.nb_rx_queues; 416586ef65eeSBernard Iremonger nb_txq = rte_port->dev_info.nb_tx_queues; 416686ef65eeSBernard Iremonger } else { 41672a977b89SWenzhuo Lu nb_rxq = rte_port->dev_info.max_rx_queues; 41682a977b89SWenzhuo Lu nb_txq = rte_port->dev_info.max_tx_queues; 416986ef65eeSBernard Iremonger } 41702a977b89SWenzhuo Lu } else { 41712a977b89SWenzhuo Lu /*if vt is disabled, use all pf queues */ 41722a977b89SWenzhuo Lu if (rte_port->dev_info.vmdq_pool_base == 0) { 41732a977b89SWenzhuo Lu nb_rxq = rte_port->dev_info.max_rx_queues; 41742a977b89SWenzhuo Lu nb_txq = rte_port->dev_info.max_tx_queues; 41752a977b89SWenzhuo Lu } else { 41762a977b89SWenzhuo Lu nb_rxq = (queueid_t)num_tcs; 41772a977b89SWenzhuo Lu nb_txq = (queueid_t)num_tcs; 41782a977b89SWenzhuo Lu 41792a977b89SWenzhuo Lu } 41802a977b89SWenzhuo Lu } 41812a977b89SWenzhuo Lu rx_free_thresh = 64; 41822a977b89SWenzhuo Lu 4183013af9b6SIntel memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf)); 4184013af9b6SIntel 4185f4d178c1SXueming Li rxtx_port_config(pid); 4186013af9b6SIntel /* VLAN filter */ 4187295968d1SFerruh Yigit rte_port->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_VLAN_FILTER; 41881a572499SJingjing Wu for (i = 0; i < RTE_DIM(vlan_tags); i++) 4189013af9b6SIntel rx_vft_set(pid, vlan_tags[i], 1); 4190013af9b6SIntel 4191a5279d25SIgor Romanov retval = eth_macaddr_get_print_err(pid, &rte_port->eth_addr); 4192a5279d25SIgor Romanov if (retval != 0) 4193a5279d25SIgor Romanov return retval; 4194a5279d25SIgor Romanov 41957741e4cfSIntel rte_port->dcb_flag = 1; 41967741e4cfSIntel 4197a690a070SHuisong Li /* Enter DCB configuration status */ 4198a690a070SHuisong Li dcb_config = 1; 4199a690a070SHuisong Li 4200013af9b6SIntel return 0; 4201af75078fSIntel } 4202af75078fSIntel 4203ffc468ffSTetsuya Mukawa static void 4204ffc468ffSTetsuya Mukawa init_port(void) 4205ffc468ffSTetsuya Mukawa { 42061b9f2746SGregory Etelson int i; 42071b9f2746SGregory Etelson 4208ffc468ffSTetsuya Mukawa /* Configuration of Ethernet ports. */ 4209ffc468ffSTetsuya Mukawa ports = rte_zmalloc("testpmd: ports", 4210ffc468ffSTetsuya Mukawa sizeof(struct rte_port) * RTE_MAX_ETHPORTS, 4211ffc468ffSTetsuya Mukawa RTE_CACHE_LINE_SIZE); 4212ffc468ffSTetsuya Mukawa if (ports == NULL) { 4213ffc468ffSTetsuya Mukawa rte_exit(EXIT_FAILURE, 4214ffc468ffSTetsuya Mukawa "rte_zmalloc(%d struct rte_port) failed\n", 4215ffc468ffSTetsuya Mukawa RTE_MAX_ETHPORTS); 4216ffc468ffSTetsuya Mukawa } 4217*236bc417SGregory Etelson for (i = 0; i < RTE_MAX_ETHPORTS; i++) { 4218*236bc417SGregory Etelson ports[i].fwd_mac_swap = 1; 421963b72657SIvan Ilchenko ports[i].xstats_info.allocated = false; 42201b9f2746SGregory Etelson LIST_INIT(&ports[i].flow_tunnel_list); 4221*236bc417SGregory Etelson } 422229841336SPhil Yang /* Initialize ports NUMA structures */ 422329841336SPhil Yang memset(port_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS); 422429841336SPhil Yang memset(rxring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS); 422529841336SPhil Yang memset(txring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS); 4226ffc468ffSTetsuya Mukawa } 4227ffc468ffSTetsuya Mukawa 4228d3a274ceSZhihong Wang static void 4229d3a274ceSZhihong Wang force_quit(void) 4230d3a274ceSZhihong Wang { 4231d3a274ceSZhihong Wang pmd_test_exit(); 4232d3a274ceSZhihong Wang prompt_exit(); 4233d3a274ceSZhihong Wang } 4234d3a274ceSZhihong Wang 4235d3a274ceSZhihong Wang static void 4236cfea1f30SPablo de Lara print_stats(void) 4237cfea1f30SPablo de Lara { 4238cfea1f30SPablo de Lara uint8_t i; 4239cfea1f30SPablo de Lara const char clr[] = { 27, '[', '2', 'J', '\0' }; 4240cfea1f30SPablo de Lara const char top_left[] = { 27, '[', '1', ';', '1', 'H', '\0' }; 4241cfea1f30SPablo de Lara 4242cfea1f30SPablo de Lara /* Clear screen and move to top left */ 4243cfea1f30SPablo de Lara printf("%s%s", clr, top_left); 4244cfea1f30SPablo de Lara 4245cfea1f30SPablo de Lara printf("\nPort statistics ===================================="); 4246cfea1f30SPablo de Lara for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) 4247cfea1f30SPablo de Lara nic_stats_display(fwd_ports_ids[i]); 4248683d1e82SIgor Romanov 4249683d1e82SIgor Romanov fflush(stdout); 4250cfea1f30SPablo de Lara } 4251cfea1f30SPablo de Lara 4252cfea1f30SPablo de Lara static void 4253d3a274ceSZhihong Wang signal_handler(int signum) 4254d3a274ceSZhihong Wang { 4255d3a274ceSZhihong Wang if (signum == SIGINT || signum == SIGTERM) { 425661a3b0e5SAndrew Rybchenko fprintf(stderr, "\nSignal %d received, preparing to exit...\n", 4257d3a274ceSZhihong Wang signum); 4258a8d0d473SBruce Richardson #ifdef RTE_LIB_PDUMP 4259102b7329SReshma Pattan /* uninitialize packet capture framework */ 4260102b7329SReshma Pattan rte_pdump_uninit(); 4261102b7329SReshma Pattan #endif 4262a8d0d473SBruce Richardson #ifdef RTE_LIB_LATENCYSTATS 42638b36297dSAmit Gupta if (latencystats_enabled != 0) 426462d3216dSReshma Pattan rte_latencystats_uninit(); 426562d3216dSReshma Pattan #endif 4266d3a274ceSZhihong Wang force_quit(); 4267d9a191a0SPhil Yang /* Set flag to indicate the force termination. */ 4268d9a191a0SPhil Yang f_quit = 1; 4269d3a274ceSZhihong Wang /* exit with the expected status */ 4270761f7ae1SJie Zhou #ifndef RTE_EXEC_ENV_WINDOWS 4271d3a274ceSZhihong Wang signal(signum, SIG_DFL); 4272d3a274ceSZhihong Wang kill(getpid(), signum); 4273761f7ae1SJie Zhou #endif 4274d3a274ceSZhihong Wang } 4275d3a274ceSZhihong Wang } 4276d3a274ceSZhihong Wang 4277af75078fSIntel int 4278af75078fSIntel main(int argc, char** argv) 4279af75078fSIntel { 4280af75078fSIntel int diag; 4281f8244c63SZhiyong Yang portid_t port_id; 42824918a357SXiaoyun Li uint16_t count; 4283fb73e096SJeff Guo int ret; 4284af75078fSIntel 4285d3a274ceSZhihong Wang signal(SIGINT, signal_handler); 4286d3a274ceSZhihong Wang signal(SIGTERM, signal_handler); 4287d3a274ceSZhihong Wang 4288285fd101SOlivier Matz testpmd_logtype = rte_log_register("testpmd"); 4289285fd101SOlivier Matz if (testpmd_logtype < 0) 429016267ceeSStephen Hemminger rte_exit(EXIT_FAILURE, "Cannot register log type"); 4291285fd101SOlivier Matz rte_log_set_level(testpmd_logtype, RTE_LOG_DEBUG); 4292285fd101SOlivier Matz 42939201806eSStephen Hemminger diag = rte_eal_init(argc, argv); 42949201806eSStephen Hemminger if (diag < 0) 429516267ceeSStephen Hemminger rte_exit(EXIT_FAILURE, "Cannot init EAL: %s\n", 429616267ceeSStephen Hemminger rte_strerror(rte_errno)); 42979201806eSStephen Hemminger 429897b5d8b5SThomas Monjalon ret = register_eth_event_callback(); 429997b5d8b5SThomas Monjalon if (ret != 0) 430016267ceeSStephen Hemminger rte_exit(EXIT_FAILURE, "Cannot register for ethdev events"); 430197b5d8b5SThomas Monjalon 4302a8d0d473SBruce Richardson #ifdef RTE_LIB_PDUMP 43034aa0d012SAnatoly Burakov /* initialize packet capture framework */ 4304e9436f54STiwei Bie rte_pdump_init(); 43054aa0d012SAnatoly Burakov #endif 43064aa0d012SAnatoly Burakov 43074918a357SXiaoyun Li count = 0; 43084918a357SXiaoyun Li RTE_ETH_FOREACH_DEV(port_id) { 43094918a357SXiaoyun Li ports_ids[count] = port_id; 43104918a357SXiaoyun Li count++; 43114918a357SXiaoyun Li } 43124918a357SXiaoyun Li nb_ports = (portid_t) count; 43134aa0d012SAnatoly Burakov if (nb_ports == 0) 43144aa0d012SAnatoly Burakov TESTPMD_LOG(WARNING, "No probed ethernet devices\n"); 43154aa0d012SAnatoly Burakov 43164aa0d012SAnatoly Burakov /* allocate port structures, and init them */ 43174aa0d012SAnatoly Burakov init_port(); 43184aa0d012SAnatoly Burakov 43194aa0d012SAnatoly Burakov set_def_fwd_config(); 43204aa0d012SAnatoly Burakov if (nb_lcores == 0) 432116267ceeSStephen Hemminger rte_exit(EXIT_FAILURE, "No cores defined for forwarding\n" 432216267ceeSStephen Hemminger "Check the core mask argument\n"); 43234aa0d012SAnatoly Burakov 4324e505d84cSAnatoly Burakov /* Bitrate/latency stats disabled by default */ 4325a8d0d473SBruce Richardson #ifdef RTE_LIB_BITRATESTATS 4326e505d84cSAnatoly Burakov bitrate_enabled = 0; 4327e505d84cSAnatoly Burakov #endif 4328a8d0d473SBruce Richardson #ifdef RTE_LIB_LATENCYSTATS 4329e505d84cSAnatoly Burakov latencystats_enabled = 0; 4330e505d84cSAnatoly Burakov #endif 4331e505d84cSAnatoly Burakov 4332fb7b8b32SAnatoly Burakov /* on FreeBSD, mlockall() is disabled by default */ 43335fbc1d49SBruce Richardson #ifdef RTE_EXEC_ENV_FREEBSD 4334fb7b8b32SAnatoly Burakov do_mlockall = 0; 4335fb7b8b32SAnatoly Burakov #else 4336fb7b8b32SAnatoly Burakov do_mlockall = 1; 4337fb7b8b32SAnatoly Burakov #endif 4338fb7b8b32SAnatoly Burakov 4339e505d84cSAnatoly Burakov argc -= diag; 4340e505d84cSAnatoly Burakov argv += diag; 4341e505d84cSAnatoly Burakov if (argc > 1) 4342e505d84cSAnatoly Burakov launch_args_parse(argc, argv); 4343e505d84cSAnatoly Burakov 4344761f7ae1SJie Zhou #ifndef RTE_EXEC_ENV_WINDOWS 4345e505d84cSAnatoly Burakov if (do_mlockall && mlockall(MCL_CURRENT | MCL_FUTURE)) { 4346285fd101SOlivier Matz TESTPMD_LOG(NOTICE, "mlockall() failed with error \"%s\"\n", 43471c036b16SEelco Chaudron strerror(errno)); 43481c036b16SEelco Chaudron } 4349761f7ae1SJie Zhou #endif 43501c036b16SEelco Chaudron 435199cabef0SPablo de Lara if (tx_first && interactive) 435299cabef0SPablo de Lara rte_exit(EXIT_FAILURE, "--tx-first cannot be used on " 435399cabef0SPablo de Lara "interactive mode.\n"); 43548820cba4SDavid Hunt 43558820cba4SDavid Hunt if (tx_first && lsc_interrupt) { 435661a3b0e5SAndrew Rybchenko fprintf(stderr, 435761a3b0e5SAndrew Rybchenko "Warning: lsc_interrupt needs to be off when using tx_first. Disabling.\n"); 43588820cba4SDavid Hunt lsc_interrupt = 0; 43598820cba4SDavid Hunt } 43608820cba4SDavid Hunt 43615a8fb55cSReshma Pattan if (!nb_rxq && !nb_txq) 436261a3b0e5SAndrew Rybchenko fprintf(stderr, 436361a3b0e5SAndrew Rybchenko "Warning: Either rx or tx queues should be non-zero\n"); 43645a8fb55cSReshma Pattan 43655a8fb55cSReshma Pattan if (nb_rxq > 1 && nb_rxq > nb_txq) 436661a3b0e5SAndrew Rybchenko fprintf(stderr, 436761a3b0e5SAndrew Rybchenko "Warning: nb_rxq=%d enables RSS configuration, but nb_txq=%d will prevent to fully test it.\n", 4368af75078fSIntel nb_rxq, nb_txq); 4369af75078fSIntel 4370af75078fSIntel init_config(); 4371fb73e096SJeff Guo 4372fb73e096SJeff Guo if (hot_plug) { 43732049c511SJeff Guo ret = rte_dev_hotplug_handle_enable(); 4374fb73e096SJeff Guo if (ret) { 43752049c511SJeff Guo RTE_LOG(ERR, EAL, 43762049c511SJeff Guo "fail to enable hotplug handling."); 4377fb73e096SJeff Guo return -1; 4378fb73e096SJeff Guo } 4379fb73e096SJeff Guo 43802049c511SJeff Guo ret = rte_dev_event_monitor_start(); 43812049c511SJeff Guo if (ret) { 43822049c511SJeff Guo RTE_LOG(ERR, EAL, 43832049c511SJeff Guo "fail to start device event monitoring."); 43842049c511SJeff Guo return -1; 43852049c511SJeff Guo } 43862049c511SJeff Guo 43872049c511SJeff Guo ret = rte_dev_event_callback_register(NULL, 4388cc1bf307SJeff Guo dev_event_callback, NULL); 43892049c511SJeff Guo if (ret) { 43902049c511SJeff Guo RTE_LOG(ERR, EAL, 43912049c511SJeff Guo "fail to register device event callback\n"); 43922049c511SJeff Guo return -1; 43932049c511SJeff Guo } 4394fb73e096SJeff Guo } 4395fb73e096SJeff Guo 43966937d210SStephen Hemminger if (!no_device_start && start_port(RTE_PORT_ALL) != 0) 4397148f963fSBruce Richardson rte_exit(EXIT_FAILURE, "Start ports failed\n"); 4398af75078fSIntel 4399ce8d5614SIntel /* set all ports to promiscuous mode by default */ 440034fc1051SIvan Ilchenko RTE_ETH_FOREACH_DEV(port_id) { 440134fc1051SIvan Ilchenko ret = rte_eth_promiscuous_enable(port_id); 440234fc1051SIvan Ilchenko if (ret != 0) 440361a3b0e5SAndrew Rybchenko fprintf(stderr, 440461a3b0e5SAndrew Rybchenko "Error during enabling promiscuous mode for port %u: %s - ignore\n", 440534fc1051SIvan Ilchenko port_id, rte_strerror(-ret)); 440634fc1051SIvan Ilchenko } 4407af75078fSIntel 4408bb9be9a4SDavid Marchand #ifdef RTE_LIB_METRICS 44097e4441c8SRemy Horton /* Init metrics library */ 44107e4441c8SRemy Horton rte_metrics_init(rte_socket_id()); 4411bb9be9a4SDavid Marchand #endif 44127e4441c8SRemy Horton 4413a8d0d473SBruce Richardson #ifdef RTE_LIB_LATENCYSTATS 441462d3216dSReshma Pattan if (latencystats_enabled != 0) { 441562d3216dSReshma Pattan int ret = rte_latencystats_init(1, NULL); 441662d3216dSReshma Pattan if (ret) 441761a3b0e5SAndrew Rybchenko fprintf(stderr, 441861a3b0e5SAndrew Rybchenko "Warning: latencystats init() returned error %d\n", 441961a3b0e5SAndrew Rybchenko ret); 442061a3b0e5SAndrew Rybchenko fprintf(stderr, "Latencystats running on lcore %d\n", 442162d3216dSReshma Pattan latencystats_lcore_id); 442262d3216dSReshma Pattan } 442362d3216dSReshma Pattan #endif 442462d3216dSReshma Pattan 44257e4441c8SRemy Horton /* Setup bitrate stats */ 4426a8d0d473SBruce Richardson #ifdef RTE_LIB_BITRATESTATS 4427e25e6c70SRemy Horton if (bitrate_enabled != 0) { 44287e4441c8SRemy Horton bitrate_data = rte_stats_bitrate_create(); 44297e4441c8SRemy Horton if (bitrate_data == NULL) 4430e25e6c70SRemy Horton rte_exit(EXIT_FAILURE, 4431e25e6c70SRemy Horton "Could not allocate bitrate data.\n"); 44327e4441c8SRemy Horton rte_stats_bitrate_reg(bitrate_data); 4433e25e6c70SRemy Horton } 44347e4441c8SRemy Horton #endif 4435a8d0d473SBruce Richardson #ifdef RTE_LIB_CMDLINE 4436592ab76fSDavid Marchand if (init_cmdline() != 0) 4437592ab76fSDavid Marchand rte_exit(EXIT_FAILURE, 4438592ab76fSDavid Marchand "Could not initialise cmdline context.\n"); 4439592ab76fSDavid Marchand 444081ef862bSAllain Legacy if (strlen(cmdline_filename) != 0) 444181ef862bSAllain Legacy cmdline_read_from_file(cmdline_filename); 444281ef862bSAllain Legacy 4443ca7feb22SCyril Chemparathy if (interactive == 1) { 4444ca7feb22SCyril Chemparathy if (auto_start) { 4445ca7feb22SCyril Chemparathy printf("Start automatic packet forwarding\n"); 4446ca7feb22SCyril Chemparathy start_packet_forwarding(0); 4447ca7feb22SCyril Chemparathy } 4448af75078fSIntel prompt(); 44490de738cfSJiayu Hu pmd_test_exit(); 4450ca7feb22SCyril Chemparathy } else 44510d56cb81SThomas Monjalon #endif 44520d56cb81SThomas Monjalon { 4453af75078fSIntel char c; 4454af75078fSIntel int rc; 4455af75078fSIntel 4456d9a191a0SPhil Yang f_quit = 0; 4457d9a191a0SPhil Yang 4458af75078fSIntel printf("No commandline core given, start packet forwarding\n"); 445999cabef0SPablo de Lara start_packet_forwarding(tx_first); 4460cfea1f30SPablo de Lara if (stats_period != 0) { 4461cfea1f30SPablo de Lara uint64_t prev_time = 0, cur_time, diff_time = 0; 4462cfea1f30SPablo de Lara uint64_t timer_period; 4463cfea1f30SPablo de Lara 4464cfea1f30SPablo de Lara /* Convert to number of cycles */ 4465cfea1f30SPablo de Lara timer_period = stats_period * rte_get_timer_hz(); 4466cfea1f30SPablo de Lara 4467d9a191a0SPhil Yang while (f_quit == 0) { 4468cfea1f30SPablo de Lara cur_time = rte_get_timer_cycles(); 4469cfea1f30SPablo de Lara diff_time += cur_time - prev_time; 4470cfea1f30SPablo de Lara 4471cfea1f30SPablo de Lara if (diff_time >= timer_period) { 4472cfea1f30SPablo de Lara print_stats(); 4473cfea1f30SPablo de Lara /* Reset the timer */ 4474cfea1f30SPablo de Lara diff_time = 0; 4475cfea1f30SPablo de Lara } 4476cfea1f30SPablo de Lara /* Sleep to avoid unnecessary checks */ 4477cfea1f30SPablo de Lara prev_time = cur_time; 4478761f7ae1SJie Zhou rte_delay_us_sleep(US_PER_S); 4479cfea1f30SPablo de Lara } 4480cfea1f30SPablo de Lara } 4481cfea1f30SPablo de Lara 4482af75078fSIntel printf("Press enter to exit\n"); 4483af75078fSIntel rc = read(0, &c, 1); 4484d3a274ceSZhihong Wang pmd_test_exit(); 4485af75078fSIntel if (rc < 0) 4486af75078fSIntel return 1; 4487af75078fSIntel } 4488af75078fSIntel 44895e516c89SStephen Hemminger ret = rte_eal_cleanup(); 44905e516c89SStephen Hemminger if (ret != 0) 44915e516c89SStephen Hemminger rte_exit(EXIT_FAILURE, 44925e516c89SStephen Hemminger "EAL cleanup failed: %s\n", strerror(-ret)); 44935e516c89SStephen Hemminger 44945e516c89SStephen Hemminger return EXIT_SUCCESS; 4495af75078fSIntel } 4496