1174a1631SBruce Richardson /* SPDX-License-Identifier: BSD-3-Clause 2174a1631SBruce Richardson * Copyright(c) 2010-2017 Intel Corporation 3af75078fSIntel */ 4af75078fSIntel 5af75078fSIntel #include <stdarg.h> 6af75078fSIntel #include <stdio.h> 7af75078fSIntel #include <stdlib.h> 8af75078fSIntel #include <signal.h> 9af75078fSIntel #include <string.h> 10af75078fSIntel #include <time.h> 11af75078fSIntel #include <fcntl.h> 12761f7ae1SJie Zhou #ifndef RTE_EXEC_ENV_WINDOWS 131c036b16SEelco Chaudron #include <sys/mman.h> 14761f7ae1SJie Zhou #endif 15af75078fSIntel #include <sys/types.h> 16af75078fSIntel #include <errno.h> 17fb73e096SJeff Guo #include <stdbool.h> 18af75078fSIntel 19af75078fSIntel #include <sys/queue.h> 20af75078fSIntel #include <sys/stat.h> 21af75078fSIntel 22af75078fSIntel #include <stdint.h> 23af75078fSIntel #include <unistd.h> 24af75078fSIntel #include <inttypes.h> 25af75078fSIntel 26af75078fSIntel #include <rte_common.h> 27d1eb542eSOlivier Matz #include <rte_errno.h> 28af75078fSIntel #include <rte_byteorder.h> 29af75078fSIntel #include <rte_log.h> 30af75078fSIntel #include <rte_debug.h> 31af75078fSIntel #include <rte_cycles.h> 32af75078fSIntel #include <rte_memory.h> 33af75078fSIntel #include <rte_memcpy.h> 34af75078fSIntel #include <rte_launch.h> 35770ebc06SDavid Marchand #include <rte_bus.h> 36af75078fSIntel #include <rte_eal.h> 37284c908cSGaetan Rivet #include <rte_alarm.h> 38af75078fSIntel #include <rte_per_lcore.h> 39af75078fSIntel #include <rte_lcore.h> 40af75078fSIntel #include <rte_branch_prediction.h> 41af75078fSIntel #include <rte_mempool.h> 42af75078fSIntel #include <rte_malloc.h> 43af75078fSIntel #include <rte_mbuf.h> 440e798567SPavan Nikhilesh #include <rte_mbuf_pool_ops.h> 45af75078fSIntel #include <rte_interrupts.h> 46af75078fSIntel #include <rte_ether.h> 47af75078fSIntel #include <rte_ethdev.h> 48edab33b1STetsuya Mukawa #include <rte_dev.h> 49af75078fSIntel #include <rte_string_fns.h> 50a8d0d473SBruce Richardson #ifdef RTE_NET_IXGBE 51e261265eSRadu Nicolau #include <rte_pmd_ixgbe.h> 52e261265eSRadu Nicolau #endif 53a8d0d473SBruce Richardson #ifdef RTE_LIB_PDUMP 54102b7329SReshma Pattan #include <rte_pdump.h> 55102b7329SReshma Pattan #endif 56938a184aSAdrien Mazarguil #include <rte_flow.h> 57bb9be9a4SDavid Marchand #ifdef RTE_LIB_METRICS 587e4441c8SRemy Horton #include <rte_metrics.h> 59bb9be9a4SDavid Marchand #endif 60a8d0d473SBruce Richardson #ifdef RTE_LIB_BITRATESTATS 617e4441c8SRemy Horton #include <rte_bitrate.h> 627e4441c8SRemy Horton #endif 63a8d0d473SBruce Richardson #ifdef RTE_LIB_LATENCYSTATS 6462d3216dSReshma Pattan #include <rte_latencystats.h> 6562d3216dSReshma Pattan #endif 66761f7ae1SJie Zhou #ifdef RTE_EXEC_ENV_WINDOWS 67761f7ae1SJie Zhou #include <process.h> 68761f7ae1SJie Zhou #endif 69e46372d7SHuisong Li #ifdef RTE_NET_BOND 70e46372d7SHuisong Li #include <rte_eth_bond.h> 71e46372d7SHuisong Li #endif 72f41a5092SSpike Du #ifdef RTE_NET_MLX5 73f41a5092SSpike Du #include "mlx5_testpmd.h" 74f41a5092SSpike Du #endif 75af75078fSIntel 76af75078fSIntel #include "testpmd.h" 77af75078fSIntel 78c7f5dba7SAnatoly Burakov #ifndef MAP_HUGETLB 79c7f5dba7SAnatoly Burakov /* FreeBSD may not have MAP_HUGETLB (in fact, it probably doesn't) */ 80c7f5dba7SAnatoly Burakov #define HUGE_FLAG (0x40000) 81c7f5dba7SAnatoly Burakov #else 82c7f5dba7SAnatoly Burakov #define HUGE_FLAG MAP_HUGETLB 83c7f5dba7SAnatoly Burakov #endif 84c7f5dba7SAnatoly Burakov 85c7f5dba7SAnatoly Burakov #ifndef MAP_HUGE_SHIFT 86c7f5dba7SAnatoly Burakov /* older kernels (or FreeBSD) will not have this define */ 87c7f5dba7SAnatoly Burakov #define HUGE_SHIFT (26) 88c7f5dba7SAnatoly Burakov #else 89c7f5dba7SAnatoly Burakov #define HUGE_SHIFT MAP_HUGE_SHIFT 90c7f5dba7SAnatoly Burakov #endif 91c7f5dba7SAnatoly Burakov 92c7f5dba7SAnatoly Burakov #define EXTMEM_HEAP_NAME "extmem" 9313b19642SDmitry Kozlyuk /* 9413b19642SDmitry Kozlyuk * Zone size with the malloc overhead (max of debug and release variants) 9513b19642SDmitry Kozlyuk * must fit into the smallest supported hugepage size (2M), 9613b19642SDmitry Kozlyuk * so that an IOVA-contiguous zone of this size can always be allocated 9713b19642SDmitry Kozlyuk * if there are free 2M hugepages. 9813b19642SDmitry Kozlyuk */ 9913b19642SDmitry Kozlyuk #define EXTBUF_ZONE_SIZE (RTE_PGSIZE_2M - 4 * RTE_CACHE_LINE_SIZE) 100c7f5dba7SAnatoly Burakov 101af75078fSIntel uint16_t verbose_level = 0; /**< Silent by default. */ 102285fd101SOlivier Matz int testpmd_logtype; /**< Log type for testpmd logs */ 103af75078fSIntel 104cb056611SStephen Hemminger /* use main core for command line ? */ 105af75078fSIntel uint8_t interactive = 0; 106ca7feb22SCyril Chemparathy uint8_t auto_start = 0; 10799cabef0SPablo de Lara uint8_t tx_first; 10881ef862bSAllain Legacy char cmdline_filename[PATH_MAX] = {0}; 109af75078fSIntel 110af75078fSIntel /* 111af75078fSIntel * NUMA support configuration. 112af75078fSIntel * When set, the NUMA support attempts to dispatch the allocation of the 113af75078fSIntel * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the 114af75078fSIntel * probed ports among the CPU sockets 0 and 1. 115af75078fSIntel * Otherwise, all memory is allocated from CPU socket 0. 116af75078fSIntel */ 117999b2ee0SBruce Richardson uint8_t numa_support = 1; /**< numa enabled by default */ 118af75078fSIntel 119af75078fSIntel /* 120b6ea6408SIntel * In UMA mode,all memory is allocated from socket 0 if --socket-num is 121b6ea6408SIntel * not configured. 122b6ea6408SIntel */ 123b6ea6408SIntel uint8_t socket_num = UMA_NO_CONFIG; 124b6ea6408SIntel 125b6ea6408SIntel /* 126c7f5dba7SAnatoly Burakov * Select mempool allocation type: 127c7f5dba7SAnatoly Burakov * - native: use regular DPDK memory 128c7f5dba7SAnatoly Burakov * - anon: use regular DPDK memory to create mempool, but populate using 129c7f5dba7SAnatoly Burakov * anonymous memory (may not be IOVA-contiguous) 130c7f5dba7SAnatoly Burakov * - xmem: use externally allocated hugepage memory 131148f963fSBruce Richardson */ 132c7f5dba7SAnatoly Burakov uint8_t mp_alloc_type = MP_ALLOC_NATIVE; 133148f963fSBruce Richardson 134148f963fSBruce Richardson /* 13563531389SGeorgios Katsikas * Store specified sockets on which memory pool to be used by ports 13663531389SGeorgios Katsikas * is allocated. 13763531389SGeorgios Katsikas */ 13863531389SGeorgios Katsikas uint8_t port_numa[RTE_MAX_ETHPORTS]; 13963531389SGeorgios Katsikas 14063531389SGeorgios Katsikas /* 14163531389SGeorgios Katsikas * Store specified sockets on which RX ring to be used by ports 14263531389SGeorgios Katsikas * is allocated. 14363531389SGeorgios Katsikas */ 14463531389SGeorgios Katsikas uint8_t rxring_numa[RTE_MAX_ETHPORTS]; 14563531389SGeorgios Katsikas 14663531389SGeorgios Katsikas /* 14763531389SGeorgios Katsikas * Store specified sockets on which TX ring to be used by ports 14863531389SGeorgios Katsikas * is allocated. 14963531389SGeorgios Katsikas */ 15063531389SGeorgios Katsikas uint8_t txring_numa[RTE_MAX_ETHPORTS]; 15163531389SGeorgios Katsikas 15263531389SGeorgios Katsikas /* 153af75078fSIntel * Record the Ethernet address of peer target ports to which packets are 154af75078fSIntel * forwarded. 155547d946cSNirmoy Das * Must be instantiated with the ethernet addresses of peer traffic generator 156af75078fSIntel * ports. 157af75078fSIntel */ 1586d13ea8eSOlivier Matz struct rte_ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS]; 159af75078fSIntel portid_t nb_peer_eth_addrs = 0; 160af75078fSIntel 161af75078fSIntel /* 162af75078fSIntel * Probed Target Environment. 163af75078fSIntel */ 164af75078fSIntel struct rte_port *ports; /**< For all probed ethernet ports. */ 165af75078fSIntel portid_t nb_ports; /**< Number of probed ethernet ports. */ 166af75078fSIntel struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */ 167af75078fSIntel lcoreid_t nb_lcores; /**< Number of probed logical cores. */ 168af75078fSIntel 1694918a357SXiaoyun Li portid_t ports_ids[RTE_MAX_ETHPORTS]; /**< Store all port ids. */ 1704918a357SXiaoyun Li 171af75078fSIntel /* 172af75078fSIntel * Test Forwarding Configuration. 173af75078fSIntel * nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores 174af75078fSIntel * nb_fwd_ports <= nb_cfg_ports <= nb_ports 175af75078fSIntel */ 176af75078fSIntel lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */ 177af75078fSIntel lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */ 178af75078fSIntel portid_t nb_cfg_ports; /**< Number of configured ports. */ 179af75078fSIntel portid_t nb_fwd_ports; /**< Number of forwarding ports. */ 180af75078fSIntel 181af75078fSIntel unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */ 182af75078fSIntel portid_t fwd_ports_ids[RTE_MAX_ETHPORTS]; /**< Port ids configuration. */ 183af75078fSIntel 184af75078fSIntel struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */ 185af75078fSIntel streamid_t nb_fwd_streams; /**< Is equal to (nb_ports * nb_rxq). */ 186af75078fSIntel 187af75078fSIntel /* 188af75078fSIntel * Forwarding engines. 189af75078fSIntel */ 190af75078fSIntel struct fwd_engine * fwd_engines[] = { 191af75078fSIntel &io_fwd_engine, 192af75078fSIntel &mac_fwd_engine, 193d47388f1SCyril Chemparathy &mac_swap_engine, 194e9e23a61SCyril Chemparathy &flow_gen_engine, 195af75078fSIntel &rx_only_engine, 196af75078fSIntel &tx_only_engine, 197af75078fSIntel &csum_fwd_engine, 198168dfa61SIvan Boule &icmp_echo_engine, 1993c156061SJens Freimann &noisy_vnf_engine, 2002564abdaSShiri Kuzin &five_tuple_swap_fwd_engine, 201af75078fSIntel #ifdef RTE_LIBRTE_IEEE1588 202af75078fSIntel &ieee1588_fwd_engine, 203af75078fSIntel #endif 20459840375SXueming Li &shared_rxq_engine, 205af75078fSIntel NULL, 206af75078fSIntel }; 207af75078fSIntel 20826cbb419SViacheslav Ovsiienko struct rte_mempool *mempools[RTE_MAX_NUMA_NODES * MAX_SEGS_BUFFER_SPLIT]; 20959fcf854SShahaf Shuler uint16_t mempool_flags; 210401b744dSShahaf Shuler 211af75078fSIntel struct fwd_config cur_fwd_config; 212af75078fSIntel struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */ 213bf56fce1SZhihong Wang uint32_t retry_enabled; 214bf56fce1SZhihong Wang uint32_t burst_tx_delay_time = BURST_TX_WAIT_US; 215bf56fce1SZhihong Wang uint32_t burst_tx_retry_num = BURST_TX_RETRIES; 216af75078fSIntel 21726cbb419SViacheslav Ovsiienko uint32_t mbuf_data_size_n = 1; /* Number of specified mbuf sizes. */ 21826cbb419SViacheslav Ovsiienko uint16_t mbuf_data_size[MAX_SEGS_BUFFER_SPLIT] = { 21926cbb419SViacheslav Ovsiienko DEFAULT_MBUF_DATA_SIZE 22026cbb419SViacheslav Ovsiienko }; /**< Mbuf data space size. */ 221c8798818SIntel uint32_t param_total_num_mbufs = 0; /**< number of mbufs in all pools - if 222c8798818SIntel * specified on command-line. */ 223cfea1f30SPablo de Lara uint16_t stats_period; /**< Period to show statistics (disabled by default) */ 224d9a191a0SPhil Yang 22563b72657SIvan Ilchenko /** Extended statistics to show. */ 22663b72657SIvan Ilchenko struct rte_eth_xstat_name *xstats_display; 22763b72657SIvan Ilchenko 22863b72657SIvan Ilchenko unsigned int xstats_display_num; /**< Size of extended statistics to show */ 22963b72657SIvan Ilchenko 230d9a191a0SPhil Yang /* 231d9a191a0SPhil Yang * In container, it cannot terminate the process which running with 'stats-period' 232d9a191a0SPhil Yang * option. Set flag to exit stats period loop after received SIGINT/SIGTERM. 233d9a191a0SPhil Yang */ 234d9a191a0SPhil Yang uint8_t f_quit; 2353889a322SHuisong Li uint8_t cl_quit; /* Quit testpmd from cmdline. */ 236d9a191a0SPhil Yang 237af75078fSIntel /* 2381bb4a528SFerruh Yigit * Max Rx frame size, set by '--max-pkt-len' parameter. 2391bb4a528SFerruh Yigit */ 2401bb4a528SFerruh Yigit uint32_t max_rx_pkt_len; 2411bb4a528SFerruh Yigit 2421bb4a528SFerruh Yigit /* 2430f2096d7SViacheslav Ovsiienko * Configuration of packet segments used to scatter received packets 2440f2096d7SViacheslav Ovsiienko * if some of split features is configured. 2450f2096d7SViacheslav Ovsiienko */ 2460f2096d7SViacheslav Ovsiienko uint16_t rx_pkt_seg_lengths[MAX_SEGS_BUFFER_SPLIT]; 2470f2096d7SViacheslav Ovsiienko uint8_t rx_pkt_nb_segs; /**< Number of segments to split */ 24891c78e09SViacheslav Ovsiienko uint16_t rx_pkt_seg_offsets[MAX_SEGS_BUFFER_SPLIT]; 24991c78e09SViacheslav Ovsiienko uint8_t rx_pkt_nb_offs; /**< Number of specified offsets */ 2500f2096d7SViacheslav Ovsiienko 2510f2096d7SViacheslav Ovsiienko /* 252af75078fSIntel * Configuration of packet segments used by the "txonly" processing engine. 253af75078fSIntel */ 254af75078fSIntel uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */ 255af75078fSIntel uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = { 256af75078fSIntel TXONLY_DEF_PACKET_LEN, 257af75078fSIntel }; 258af75078fSIntel uint8_t tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */ 259af75078fSIntel 26079bec05bSKonstantin Ananyev enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF; 26179bec05bSKonstantin Ananyev /**< Split policy for packets to TX. */ 26279bec05bSKonstantin Ananyev 26382010ef5SYongseok Koh uint8_t txonly_multi_flow; 26482010ef5SYongseok Koh /**< Whether multiple flows are generated in TXONLY mode. */ 26582010ef5SYongseok Koh 2664940344dSViacheslav Ovsiienko uint32_t tx_pkt_times_inter; 2674940344dSViacheslav Ovsiienko /**< Timings for send scheduling in TXONLY mode, time between bursts. */ 2684940344dSViacheslav Ovsiienko 2694940344dSViacheslav Ovsiienko uint32_t tx_pkt_times_intra; 2704940344dSViacheslav Ovsiienko /**< Timings for send scheduling in TXONLY mode, time between packets. */ 2714940344dSViacheslav Ovsiienko 272af75078fSIntel uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */ 2736c02043eSIgor Russkikh uint16_t nb_pkt_flowgen_clones; /**< Number of Tx packet clones to send in flowgen mode. */ 274861e7684SZhihong Wang int nb_flows_flowgen = 1024; /**< Number of flows in flowgen mode. */ 275e9378bbcSCunming Liang uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */ 276af75078fSIntel 277900550deSIntel /* current configuration is in DCB or not,0 means it is not in DCB mode */ 278900550deSIntel uint8_t dcb_config = 0; 279900550deSIntel 280af75078fSIntel /* 281af75078fSIntel * Configurable number of RX/TX queues. 282af75078fSIntel */ 2831c69df45SOri Kam queueid_t nb_hairpinq; /**< Number of hairpin queues per port. */ 284af75078fSIntel queueid_t nb_rxq = 1; /**< Number of RX queues per port. */ 285af75078fSIntel queueid_t nb_txq = 1; /**< Number of TX queues per port. */ 286af75078fSIntel 287af75078fSIntel /* 288af75078fSIntel * Configurable number of RX/TX ring descriptors. 2898599ed31SRemy Horton * Defaults are supplied by drivers via ethdev. 290af75078fSIntel */ 2914ed89049SDavid Marchand #define RX_DESC_DEFAULT 0 2924ed89049SDavid Marchand #define TX_DESC_DEFAULT 0 2934ed89049SDavid Marchand uint16_t nb_rxd = RX_DESC_DEFAULT; /**< Number of RX descriptors. */ 2944ed89049SDavid Marchand uint16_t nb_txd = TX_DESC_DEFAULT; /**< Number of TX descriptors. */ 295af75078fSIntel 296f2c5125aSPablo de Lara #define RTE_PMD_PARAM_UNSET -1 297af75078fSIntel /* 298af75078fSIntel * Configurable values of RX and TX ring threshold registers. 299af75078fSIntel */ 300af75078fSIntel 301f2c5125aSPablo de Lara int8_t rx_pthresh = RTE_PMD_PARAM_UNSET; 302f2c5125aSPablo de Lara int8_t rx_hthresh = RTE_PMD_PARAM_UNSET; 303f2c5125aSPablo de Lara int8_t rx_wthresh = RTE_PMD_PARAM_UNSET; 304af75078fSIntel 305f2c5125aSPablo de Lara int8_t tx_pthresh = RTE_PMD_PARAM_UNSET; 306f2c5125aSPablo de Lara int8_t tx_hthresh = RTE_PMD_PARAM_UNSET; 307f2c5125aSPablo de Lara int8_t tx_wthresh = RTE_PMD_PARAM_UNSET; 308af75078fSIntel 309af75078fSIntel /* 310af75078fSIntel * Configurable value of RX free threshold. 311af75078fSIntel */ 312f2c5125aSPablo de Lara int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET; 313af75078fSIntel 314af75078fSIntel /* 315ce8d5614SIntel * Configurable value of RX drop enable. 316ce8d5614SIntel */ 317f2c5125aSPablo de Lara int8_t rx_drop_en = RTE_PMD_PARAM_UNSET; 318ce8d5614SIntel 319ce8d5614SIntel /* 320af75078fSIntel * Configurable value of TX free threshold. 321af75078fSIntel */ 322f2c5125aSPablo de Lara int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET; 323af75078fSIntel 324af75078fSIntel /* 325af75078fSIntel * Configurable value of TX RS bit threshold. 326af75078fSIntel */ 327f2c5125aSPablo de Lara int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET; 328af75078fSIntel 329af75078fSIntel /* 3303c156061SJens Freimann * Configurable value of buffered packets before sending. 3313c156061SJens Freimann */ 3323c156061SJens Freimann uint16_t noisy_tx_sw_bufsz; 3333c156061SJens Freimann 3343c156061SJens Freimann /* 3353c156061SJens Freimann * Configurable value of packet buffer timeout. 3363c156061SJens Freimann */ 3373c156061SJens Freimann uint16_t noisy_tx_sw_buf_flush_time; 3383c156061SJens Freimann 3393c156061SJens Freimann /* 3403c156061SJens Freimann * Configurable value for size of VNF internal memory area 3413c156061SJens Freimann * used for simulating noisy neighbour behaviour 3423c156061SJens Freimann */ 3433c156061SJens Freimann uint64_t noisy_lkup_mem_sz; 3443c156061SJens Freimann 3453c156061SJens Freimann /* 3463c156061SJens Freimann * Configurable value of number of random writes done in 3473c156061SJens Freimann * VNF simulation memory area. 3483c156061SJens Freimann */ 3493c156061SJens Freimann uint64_t noisy_lkup_num_writes; 3503c156061SJens Freimann 3513c156061SJens Freimann /* 3523c156061SJens Freimann * Configurable value of number of random reads done in 3533c156061SJens Freimann * VNF simulation memory area. 3543c156061SJens Freimann */ 3553c156061SJens Freimann uint64_t noisy_lkup_num_reads; 3563c156061SJens Freimann 3573c156061SJens Freimann /* 3583c156061SJens Freimann * Configurable value of number of random reads/writes done in 3593c156061SJens Freimann * VNF simulation memory area. 3603c156061SJens Freimann */ 3613c156061SJens Freimann uint64_t noisy_lkup_num_reads_writes; 3623c156061SJens Freimann 3633c156061SJens Freimann /* 364af75078fSIntel * Receive Side Scaling (RSS) configuration. 365af75078fSIntel */ 366295968d1SFerruh Yigit uint64_t rss_hf = RTE_ETH_RSS_IP; /* RSS IP by default. */ 367af75078fSIntel 368af75078fSIntel /* 369af75078fSIntel * Port topology configuration 370af75078fSIntel */ 371af75078fSIntel uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */ 372af75078fSIntel 3737741e4cfSIntel /* 3747741e4cfSIntel * Avoids to flush all the RX streams before starts forwarding. 3757741e4cfSIntel */ 3767741e4cfSIntel uint8_t no_flush_rx = 0; /* flush by default */ 3777741e4cfSIntel 378af75078fSIntel /* 3797ee3e944SVasily Philipov * Flow API isolated mode. 3807ee3e944SVasily Philipov */ 3817ee3e944SVasily Philipov uint8_t flow_isolate_all; 3827ee3e944SVasily Philipov 3837ee3e944SVasily Philipov /* 384bc202406SDavid Marchand * Avoids to check link status when starting/stopping a port. 385bc202406SDavid Marchand */ 386bc202406SDavid Marchand uint8_t no_link_check = 0; /* check by default */ 387bc202406SDavid Marchand 388bc202406SDavid Marchand /* 3896937d210SStephen Hemminger * Don't automatically start all ports in interactive mode. 3906937d210SStephen Hemminger */ 3916937d210SStephen Hemminger uint8_t no_device_start = 0; 3926937d210SStephen Hemminger 3936937d210SStephen Hemminger /* 3948ea656f8SGaetan Rivet * Enable link status change notification 3958ea656f8SGaetan Rivet */ 3968ea656f8SGaetan Rivet uint8_t lsc_interrupt = 1; /* enabled by default */ 3978ea656f8SGaetan Rivet 3988ea656f8SGaetan Rivet /* 399284c908cSGaetan Rivet * Enable device removal notification. 400284c908cSGaetan Rivet */ 401284c908cSGaetan Rivet uint8_t rmv_interrupt = 1; /* enabled by default */ 402284c908cSGaetan Rivet 403fb73e096SJeff Guo uint8_t hot_plug = 0; /**< hotplug disabled by default. */ 404fb73e096SJeff Guo 4054f1ed78eSThomas Monjalon /* After attach, port setup is called on event or by iterator */ 4064f1ed78eSThomas Monjalon bool setup_on_probe_event = true; 4074f1ed78eSThomas Monjalon 408b0a9354aSPavan Nikhilesh /* Clear ptypes on port initialization. */ 409b0a9354aSPavan Nikhilesh uint8_t clear_ptypes = true; 410b0a9354aSPavan Nikhilesh 41101817b10SBing Zhao /* Hairpin ports configuration mode. */ 412*23095155SDariusz Sosnowski uint32_t hairpin_mode; 41301817b10SBing Zhao 41497b5d8b5SThomas Monjalon /* Pretty printing of ethdev events */ 41597b5d8b5SThomas Monjalon static const char * const eth_event_desc[] = { 41697b5d8b5SThomas Monjalon [RTE_ETH_EVENT_UNKNOWN] = "unknown", 41797b5d8b5SThomas Monjalon [RTE_ETH_EVENT_INTR_LSC] = "link state change", 41897b5d8b5SThomas Monjalon [RTE_ETH_EVENT_QUEUE_STATE] = "queue state", 41997b5d8b5SThomas Monjalon [RTE_ETH_EVENT_INTR_RESET] = "reset", 42097b5d8b5SThomas Monjalon [RTE_ETH_EVENT_VF_MBOX] = "VF mbox", 42197b5d8b5SThomas Monjalon [RTE_ETH_EVENT_IPSEC] = "IPsec", 42297b5d8b5SThomas Monjalon [RTE_ETH_EVENT_MACSEC] = "MACsec", 42397b5d8b5SThomas Monjalon [RTE_ETH_EVENT_INTR_RMV] = "device removal", 42497b5d8b5SThomas Monjalon [RTE_ETH_EVENT_NEW] = "device probed", 42597b5d8b5SThomas Monjalon [RTE_ETH_EVENT_DESTROY] = "device released", 4260e459ffaSDong Zhou [RTE_ETH_EVENT_FLOW_AGED] = "flow aged", 427bc70e559SSpike Du [RTE_ETH_EVENT_RX_AVAIL_THRESH] = "RxQ available descriptors threshold reached", 42897b5d8b5SThomas Monjalon [RTE_ETH_EVENT_MAX] = NULL, 42997b5d8b5SThomas Monjalon }; 43097b5d8b5SThomas Monjalon 431284c908cSGaetan Rivet /* 4323af72783SGaetan Rivet * Display or mask ether events 4333af72783SGaetan Rivet * Default to all events except VF_MBOX 4343af72783SGaetan Rivet */ 4353af72783SGaetan Rivet uint32_t event_print_mask = (UINT32_C(1) << RTE_ETH_EVENT_UNKNOWN) | 4363af72783SGaetan Rivet (UINT32_C(1) << RTE_ETH_EVENT_INTR_LSC) | 4373af72783SGaetan Rivet (UINT32_C(1) << RTE_ETH_EVENT_QUEUE_STATE) | 4383af72783SGaetan Rivet (UINT32_C(1) << RTE_ETH_EVENT_INTR_RESET) | 439badb87c1SAnoob Joseph (UINT32_C(1) << RTE_ETH_EVENT_IPSEC) | 4403af72783SGaetan Rivet (UINT32_C(1) << RTE_ETH_EVENT_MACSEC) | 4410e459ffaSDong Zhou (UINT32_C(1) << RTE_ETH_EVENT_INTR_RMV) | 4420e459ffaSDong Zhou (UINT32_C(1) << RTE_ETH_EVENT_FLOW_AGED); 443e505d84cSAnatoly Burakov /* 444e505d84cSAnatoly Burakov * Decide if all memory are locked for performance. 445e505d84cSAnatoly Burakov */ 446e505d84cSAnatoly Burakov int do_mlockall = 0; 4473af72783SGaetan Rivet 448a8d0d473SBruce Richardson #ifdef RTE_LIB_LATENCYSTATS 44962d3216dSReshma Pattan 45062d3216dSReshma Pattan /* 45162d3216dSReshma Pattan * Set when latency stats is enabled in the commandline 45262d3216dSReshma Pattan */ 45362d3216dSReshma Pattan uint8_t latencystats_enabled; 45462d3216dSReshma Pattan 45562d3216dSReshma Pattan /* 4567be78d02SJosh Soref * Lcore ID to service latency statistics. 45762d3216dSReshma Pattan */ 45862d3216dSReshma Pattan lcoreid_t latencystats_lcore_id = -1; 45962d3216dSReshma Pattan 46062d3216dSReshma Pattan #endif 46162d3216dSReshma Pattan 4627b7e5ba7SIntel /* 463af75078fSIntel * Ethernet device configuration. 464af75078fSIntel */ 4651bb4a528SFerruh Yigit struct rte_eth_rxmode rx_mode; 466af75078fSIntel 46707e5f7bdSShahaf Shuler struct rte_eth_txmode tx_mode = { 468295968d1SFerruh Yigit .offloads = RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE, 46907e5f7bdSShahaf Shuler }; 470fd8c20aaSShahaf Shuler 4712950a769SDeclan Doherty volatile int test_done = 1; /* stop packet forwarding when set to 1. */ 472af75078fSIntel 473a4fd5eeeSElza Mathew /* 474a4fd5eeeSElza Mathew * Display zero values by default for xstats 475a4fd5eeeSElza Mathew */ 476a4fd5eeeSElza Mathew uint8_t xstats_hide_zero; 477a4fd5eeeSElza Mathew 478bc700b67SDharmik Thakkar /* 479bc700b67SDharmik Thakkar * Measure of CPU cycles disabled by default 480bc700b67SDharmik Thakkar */ 481bc700b67SDharmik Thakkar uint8_t record_core_cycles; 482bc700b67SDharmik Thakkar 4830e4b1963SDharmik Thakkar /* 4840e4b1963SDharmik Thakkar * Display of RX and TX bursts disabled by default 4850e4b1963SDharmik Thakkar */ 4860e4b1963SDharmik Thakkar uint8_t record_burst_stats; 4870e4b1963SDharmik Thakkar 488f4d178c1SXueming Li /* 489f4d178c1SXueming Li * Number of ports per shared Rx queue group, 0 disable. 490f4d178c1SXueming Li */ 491f4d178c1SXueming Li uint32_t rxq_share; 492f4d178c1SXueming Li 493c9cafcc8SShahaf Shuler unsigned int num_sockets = 0; 494c9cafcc8SShahaf Shuler unsigned int socket_ids[RTE_MAX_NUMA_NODES]; 4957acf894dSStephen Hurd 496a8d0d473SBruce Richardson #ifdef RTE_LIB_BITRATESTATS 4977e4441c8SRemy Horton /* Bitrate statistics */ 4987e4441c8SRemy Horton struct rte_stats_bitrates *bitrate_data; 499e25e6c70SRemy Horton lcoreid_t bitrate_lcore_id; 500e25e6c70SRemy Horton uint8_t bitrate_enabled; 501e25e6c70SRemy Horton #endif 5027e4441c8SRemy Horton 5036970401eSDavid Marchand #ifdef RTE_LIB_GRO 504b40f8d78SJiayu Hu struct gro_status gro_ports[RTE_MAX_ETHPORTS]; 505b7091f1dSJiayu Hu uint8_t gro_flush_cycles = GRO_DEFAULT_FLUSH_CYCLES; 5066970401eSDavid Marchand #endif 507b40f8d78SJiayu Hu 508f9295aa2SXiaoyu Min /* 509f9295aa2SXiaoyu Min * hexadecimal bitmask of RX mq mode can be enabled. 510f9295aa2SXiaoyu Min */ 511295968d1SFerruh Yigit enum rte_eth_rx_mq_mode rx_mq_mode = RTE_ETH_MQ_RX_VMDQ_DCB_RSS; 512f9295aa2SXiaoyu Min 513b7b78a08SAjit Khaparde /* 514b7b78a08SAjit Khaparde * Used to set forced link speed 515b7b78a08SAjit Khaparde */ 516b7b78a08SAjit Khaparde uint32_t eth_link_speed; 517b7b78a08SAjit Khaparde 518a550baf2SMin Hu (Connor) /* 519a550baf2SMin Hu (Connor) * ID of the current process in multi-process, used to 520a550baf2SMin Hu (Connor) * configure the queues to be polled. 521a550baf2SMin Hu (Connor) */ 522a550baf2SMin Hu (Connor) int proc_id; 523a550baf2SMin Hu (Connor) 524a550baf2SMin Hu (Connor) /* 525a550baf2SMin Hu (Connor) * Number of processes in multi-process, used to 526a550baf2SMin Hu (Connor) * configure the queues to be polled. 527a550baf2SMin Hu (Connor) */ 528a550baf2SMin Hu (Connor) unsigned int num_procs = 1; 529a550baf2SMin Hu (Connor) 530f6d8a6d3SIvan Malov static void 531f6d8a6d3SIvan Malov eth_rx_metadata_negotiate_mp(uint16_t port_id) 532f6d8a6d3SIvan Malov { 533f6d8a6d3SIvan Malov uint64_t rx_meta_features = 0; 534f6d8a6d3SIvan Malov int ret; 535f6d8a6d3SIvan Malov 536f6d8a6d3SIvan Malov if (!is_proc_primary()) 537f6d8a6d3SIvan Malov return; 538f6d8a6d3SIvan Malov 539f6d8a6d3SIvan Malov rx_meta_features |= RTE_ETH_RX_METADATA_USER_FLAG; 540f6d8a6d3SIvan Malov rx_meta_features |= RTE_ETH_RX_METADATA_USER_MARK; 541f6d8a6d3SIvan Malov rx_meta_features |= RTE_ETH_RX_METADATA_TUNNEL_ID; 542f6d8a6d3SIvan Malov 543f6d8a6d3SIvan Malov ret = rte_eth_rx_metadata_negotiate(port_id, &rx_meta_features); 544f6d8a6d3SIvan Malov if (ret == 0) { 545f6d8a6d3SIvan Malov if (!(rx_meta_features & RTE_ETH_RX_METADATA_USER_FLAG)) { 546f6d8a6d3SIvan Malov TESTPMD_LOG(DEBUG, "Flow action FLAG will not affect Rx mbufs on port %u\n", 547f6d8a6d3SIvan Malov port_id); 548f6d8a6d3SIvan Malov } 549f6d8a6d3SIvan Malov 550f6d8a6d3SIvan Malov if (!(rx_meta_features & RTE_ETH_RX_METADATA_USER_MARK)) { 551f6d8a6d3SIvan Malov TESTPMD_LOG(DEBUG, "Flow action MARK will not affect Rx mbufs on port %u\n", 552f6d8a6d3SIvan Malov port_id); 553f6d8a6d3SIvan Malov } 554f6d8a6d3SIvan Malov 555f6d8a6d3SIvan Malov if (!(rx_meta_features & RTE_ETH_RX_METADATA_TUNNEL_ID)) { 556f6d8a6d3SIvan Malov TESTPMD_LOG(DEBUG, "Flow tunnel offload support might be limited or unavailable on port %u\n", 557f6d8a6d3SIvan Malov port_id); 558f6d8a6d3SIvan Malov } 559f6d8a6d3SIvan Malov } else if (ret != -ENOTSUP) { 560f6d8a6d3SIvan Malov rte_exit(EXIT_FAILURE, "Error when negotiating Rx meta features on port %u: %s\n", 561f6d8a6d3SIvan Malov port_id, rte_strerror(-ret)); 562f6d8a6d3SIvan Malov } 563f6d8a6d3SIvan Malov } 564f6d8a6d3SIvan Malov 565a550baf2SMin Hu (Connor) static int 566a550baf2SMin Hu (Connor) eth_dev_configure_mp(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q, 567a550baf2SMin Hu (Connor) const struct rte_eth_conf *dev_conf) 568a550baf2SMin Hu (Connor) { 569a550baf2SMin Hu (Connor) if (is_proc_primary()) 570a550baf2SMin Hu (Connor) return rte_eth_dev_configure(port_id, nb_rx_q, nb_tx_q, 571a550baf2SMin Hu (Connor) dev_conf); 572a550baf2SMin Hu (Connor) return 0; 573a550baf2SMin Hu (Connor) } 574a550baf2SMin Hu (Connor) 575a550baf2SMin Hu (Connor) static int 576e46372d7SHuisong Li change_bonding_slave_port_status(portid_t bond_pid, bool is_stop) 577e46372d7SHuisong Li { 578e46372d7SHuisong Li #ifdef RTE_NET_BOND 579e46372d7SHuisong Li 580e46372d7SHuisong Li portid_t slave_pids[RTE_MAX_ETHPORTS]; 581e46372d7SHuisong Li struct rte_port *port; 582e46372d7SHuisong Li int num_slaves; 583e46372d7SHuisong Li portid_t slave_pid; 584e46372d7SHuisong Li int i; 585e46372d7SHuisong Li 586e46372d7SHuisong Li num_slaves = rte_eth_bond_slaves_get(bond_pid, slave_pids, 587e46372d7SHuisong Li RTE_MAX_ETHPORTS); 588e46372d7SHuisong Li if (num_slaves < 0) { 589e46372d7SHuisong Li fprintf(stderr, "Failed to get slave list for port = %u\n", 590e46372d7SHuisong Li bond_pid); 591e46372d7SHuisong Li return num_slaves; 592e46372d7SHuisong Li } 593e46372d7SHuisong Li 594e46372d7SHuisong Li for (i = 0; i < num_slaves; i++) { 595e46372d7SHuisong Li slave_pid = slave_pids[i]; 596e46372d7SHuisong Li port = &ports[slave_pid]; 597e46372d7SHuisong Li port->port_status = 598e46372d7SHuisong Li is_stop ? RTE_PORT_STOPPED : RTE_PORT_STARTED; 599e46372d7SHuisong Li } 600e46372d7SHuisong Li #else 601e46372d7SHuisong Li RTE_SET_USED(bond_pid); 602e46372d7SHuisong Li RTE_SET_USED(is_stop); 603e46372d7SHuisong Li #endif 604e46372d7SHuisong Li return 0; 605e46372d7SHuisong Li } 606e46372d7SHuisong Li 607e46372d7SHuisong Li static int 608a550baf2SMin Hu (Connor) eth_dev_start_mp(uint16_t port_id) 609a550baf2SMin Hu (Connor) { 610e46372d7SHuisong Li int ret; 611e46372d7SHuisong Li 612e46372d7SHuisong Li if (is_proc_primary()) { 613e46372d7SHuisong Li ret = rte_eth_dev_start(port_id); 614e46372d7SHuisong Li if (ret != 0) 615e46372d7SHuisong Li return ret; 616e46372d7SHuisong Li 617e46372d7SHuisong Li struct rte_port *port = &ports[port_id]; 618e46372d7SHuisong Li 619e46372d7SHuisong Li /* 620e46372d7SHuisong Li * Starting a bonded port also starts all slaves under the bonded 621e46372d7SHuisong Li * device. So if this port is bond device, we need to modify the 622e46372d7SHuisong Li * port status of these slaves. 623e46372d7SHuisong Li */ 624e46372d7SHuisong Li if (port->bond_flag == 1) 625e46372d7SHuisong Li return change_bonding_slave_port_status(port_id, false); 626e46372d7SHuisong Li } 627a550baf2SMin Hu (Connor) 628a550baf2SMin Hu (Connor) return 0; 629a550baf2SMin Hu (Connor) } 630a550baf2SMin Hu (Connor) 631a550baf2SMin Hu (Connor) static int 632a550baf2SMin Hu (Connor) eth_dev_stop_mp(uint16_t port_id) 633a550baf2SMin Hu (Connor) { 634e46372d7SHuisong Li int ret; 635e46372d7SHuisong Li 636e46372d7SHuisong Li if (is_proc_primary()) { 637e46372d7SHuisong Li ret = rte_eth_dev_stop(port_id); 638e46372d7SHuisong Li if (ret != 0) 639e46372d7SHuisong Li return ret; 640e46372d7SHuisong Li 641e46372d7SHuisong Li struct rte_port *port = &ports[port_id]; 642e46372d7SHuisong Li 643e46372d7SHuisong Li /* 644e46372d7SHuisong Li * Stopping a bonded port also stops all slaves under the bonded 645e46372d7SHuisong Li * device. So if this port is bond device, we need to modify the 646e46372d7SHuisong Li * port status of these slaves. 647e46372d7SHuisong Li */ 648e46372d7SHuisong Li if (port->bond_flag == 1) 649e46372d7SHuisong Li return change_bonding_slave_port_status(port_id, true); 650e46372d7SHuisong Li } 651a550baf2SMin Hu (Connor) 652a550baf2SMin Hu (Connor) return 0; 653a550baf2SMin Hu (Connor) } 654a550baf2SMin Hu (Connor) 655a550baf2SMin Hu (Connor) static void 656a550baf2SMin Hu (Connor) mempool_free_mp(struct rte_mempool *mp) 657a550baf2SMin Hu (Connor) { 658a550baf2SMin Hu (Connor) if (is_proc_primary()) 659a550baf2SMin Hu (Connor) rte_mempool_free(mp); 660a550baf2SMin Hu (Connor) } 661a550baf2SMin Hu (Connor) 662a550baf2SMin Hu (Connor) static int 663a550baf2SMin Hu (Connor) eth_dev_set_mtu_mp(uint16_t port_id, uint16_t mtu) 664a550baf2SMin Hu (Connor) { 665a550baf2SMin Hu (Connor) if (is_proc_primary()) 666a550baf2SMin Hu (Connor) return rte_eth_dev_set_mtu(port_id, mtu); 667a550baf2SMin Hu (Connor) 668a550baf2SMin Hu (Connor) return 0; 669a550baf2SMin Hu (Connor) } 670a550baf2SMin Hu (Connor) 671ed30d9b6SIntel /* Forward function declarations */ 672c9cce428SThomas Monjalon static void setup_attached_port(portid_t pi); 673edab33b1STetsuya Mukawa static void check_all_ports_link_status(uint32_t port_mask); 674f8244c63SZhiyong Yang static int eth_event_callback(portid_t port_id, 67576ad4a2dSGaetan Rivet enum rte_eth_event_type type, 676d6af1a13SBernard Iremonger void *param, void *ret_param); 677cc1bf307SJeff Guo static void dev_event_callback(const char *device_name, 678fb73e096SJeff Guo enum rte_dev_event_type type, 679fb73e096SJeff Guo void *param); 68063b72657SIvan Ilchenko static void fill_xstats_display_info(void); 681ce8d5614SIntel 682ce8d5614SIntel /* 683ce8d5614SIntel * Check if all the ports are started. 684ce8d5614SIntel * If yes, return positive value. If not, return zero. 685ce8d5614SIntel */ 686ce8d5614SIntel static int all_ports_started(void); 687ed30d9b6SIntel 6886970401eSDavid Marchand #ifdef RTE_LIB_GSO 68952f38a20SJiayu Hu struct gso_status gso_ports[RTE_MAX_ETHPORTS]; 69035b2d13fSOlivier Matz uint16_t gso_max_segment_size = RTE_ETHER_MAX_LEN - RTE_ETHER_CRC_LEN; 6916970401eSDavid Marchand #endif 69252f38a20SJiayu Hu 693b57b66a9SOri Kam /* Holds the registered mbuf dynamic flags names. */ 694b57b66a9SOri Kam char dynf_names[64][RTE_MBUF_DYN_NAMESIZE]; 695b57b66a9SOri Kam 69663b72657SIvan Ilchenko 697af75078fSIntel /* 69898a7ea33SJerin Jacob * Helper function to check if socket is already discovered. 699c9cafcc8SShahaf Shuler * If yes, return positive value. If not, return zero. 700c9cafcc8SShahaf Shuler */ 701c9cafcc8SShahaf Shuler int 702c9cafcc8SShahaf Shuler new_socket_id(unsigned int socket_id) 703c9cafcc8SShahaf Shuler { 704c9cafcc8SShahaf Shuler unsigned int i; 705c9cafcc8SShahaf Shuler 706c9cafcc8SShahaf Shuler for (i = 0; i < num_sockets; i++) { 707c9cafcc8SShahaf Shuler if (socket_ids[i] == socket_id) 708c9cafcc8SShahaf Shuler return 0; 709c9cafcc8SShahaf Shuler } 710c9cafcc8SShahaf Shuler return 1; 711c9cafcc8SShahaf Shuler } 712c9cafcc8SShahaf Shuler 713c9cafcc8SShahaf Shuler /* 714af75078fSIntel * Setup default configuration. 715af75078fSIntel */ 716af75078fSIntel static void 717af75078fSIntel set_default_fwd_lcores_config(void) 718af75078fSIntel { 719af75078fSIntel unsigned int i; 720af75078fSIntel unsigned int nb_lc; 7217acf894dSStephen Hurd unsigned int sock_num; 722af75078fSIntel 723af75078fSIntel nb_lc = 0; 724af75078fSIntel for (i = 0; i < RTE_MAX_LCORE; i++) { 725dbfb8ec7SPhil Yang if (!rte_lcore_is_enabled(i)) 726dbfb8ec7SPhil Yang continue; 727c9cafcc8SShahaf Shuler sock_num = rte_lcore_to_socket_id(i); 728c9cafcc8SShahaf Shuler if (new_socket_id(sock_num)) { 729c9cafcc8SShahaf Shuler if (num_sockets >= RTE_MAX_NUMA_NODES) { 730c9cafcc8SShahaf Shuler rte_exit(EXIT_FAILURE, 731c9cafcc8SShahaf Shuler "Total sockets greater than %u\n", 732c9cafcc8SShahaf Shuler RTE_MAX_NUMA_NODES); 733c9cafcc8SShahaf Shuler } 734c9cafcc8SShahaf Shuler socket_ids[num_sockets++] = sock_num; 7357acf894dSStephen Hurd } 736cb056611SStephen Hemminger if (i == rte_get_main_lcore()) 737f54fe5eeSStephen Hurd continue; 738f54fe5eeSStephen Hurd fwd_lcores_cpuids[nb_lc++] = i; 739af75078fSIntel } 740af75078fSIntel nb_lcores = (lcoreid_t) nb_lc; 741af75078fSIntel nb_cfg_lcores = nb_lcores; 742af75078fSIntel nb_fwd_lcores = 1; 743af75078fSIntel } 744af75078fSIntel 745af75078fSIntel static void 746af75078fSIntel set_def_peer_eth_addrs(void) 747af75078fSIntel { 748af75078fSIntel portid_t i; 749af75078fSIntel 750af75078fSIntel for (i = 0; i < RTE_MAX_ETHPORTS; i++) { 75135b2d13fSOlivier Matz peer_eth_addrs[i].addr_bytes[0] = RTE_ETHER_LOCAL_ADMIN_ADDR; 752af75078fSIntel peer_eth_addrs[i].addr_bytes[5] = i; 753af75078fSIntel } 754af75078fSIntel } 755af75078fSIntel 756af75078fSIntel static void 757af75078fSIntel set_default_fwd_ports_config(void) 758af75078fSIntel { 759af75078fSIntel portid_t pt_id; 76065a7360cSMatan Azrad int i = 0; 761af75078fSIntel 762effdb8bbSPhil Yang RTE_ETH_FOREACH_DEV(pt_id) { 76365a7360cSMatan Azrad fwd_ports_ids[i++] = pt_id; 764af75078fSIntel 765effdb8bbSPhil Yang /* Update sockets info according to the attached device */ 766effdb8bbSPhil Yang int socket_id = rte_eth_dev_socket_id(pt_id); 767effdb8bbSPhil Yang if (socket_id >= 0 && new_socket_id(socket_id)) { 768effdb8bbSPhil Yang if (num_sockets >= RTE_MAX_NUMA_NODES) { 769effdb8bbSPhil Yang rte_exit(EXIT_FAILURE, 770effdb8bbSPhil Yang "Total sockets greater than %u\n", 771effdb8bbSPhil Yang RTE_MAX_NUMA_NODES); 772effdb8bbSPhil Yang } 773effdb8bbSPhil Yang socket_ids[num_sockets++] = socket_id; 774effdb8bbSPhil Yang } 775effdb8bbSPhil Yang } 776effdb8bbSPhil Yang 777af75078fSIntel nb_cfg_ports = nb_ports; 778af75078fSIntel nb_fwd_ports = nb_ports; 779af75078fSIntel } 780af75078fSIntel 781af75078fSIntel void 782af75078fSIntel set_def_fwd_config(void) 783af75078fSIntel { 784af75078fSIntel set_default_fwd_lcores_config(); 785af75078fSIntel set_def_peer_eth_addrs(); 786af75078fSIntel set_default_fwd_ports_config(); 787af75078fSIntel } 788af75078fSIntel 789761f7ae1SJie Zhou #ifndef RTE_EXEC_ENV_WINDOWS 790c7f5dba7SAnatoly Burakov /* extremely pessimistic estimation of memory required to create a mempool */ 791c7f5dba7SAnatoly Burakov static int 792c7f5dba7SAnatoly Burakov calc_mem_size(uint32_t nb_mbufs, uint32_t mbuf_sz, size_t pgsz, size_t *out) 793c7f5dba7SAnatoly Burakov { 794c7f5dba7SAnatoly Burakov unsigned int n_pages, mbuf_per_pg, leftover; 795c7f5dba7SAnatoly Burakov uint64_t total_mem, mbuf_mem, obj_sz; 796c7f5dba7SAnatoly Burakov 797c7f5dba7SAnatoly Burakov /* there is no good way to predict how much space the mempool will 798c7f5dba7SAnatoly Burakov * occupy because it will allocate chunks on the fly, and some of those 799c7f5dba7SAnatoly Burakov * will come from default DPDK memory while some will come from our 800c7f5dba7SAnatoly Burakov * external memory, so just assume 128MB will be enough for everyone. 801c7f5dba7SAnatoly Burakov */ 802c7f5dba7SAnatoly Burakov uint64_t hdr_mem = 128 << 20; 803c7f5dba7SAnatoly Burakov 804c7f5dba7SAnatoly Burakov /* account for possible non-contiguousness */ 805c7f5dba7SAnatoly Burakov obj_sz = rte_mempool_calc_obj_size(mbuf_sz, 0, NULL); 806c7f5dba7SAnatoly Burakov if (obj_sz > pgsz) { 807c7f5dba7SAnatoly Burakov TESTPMD_LOG(ERR, "Object size is bigger than page size\n"); 808c7f5dba7SAnatoly Burakov return -1; 809c7f5dba7SAnatoly Burakov } 810c7f5dba7SAnatoly Burakov 811c7f5dba7SAnatoly Burakov mbuf_per_pg = pgsz / obj_sz; 812c7f5dba7SAnatoly Burakov leftover = (nb_mbufs % mbuf_per_pg) > 0; 813c7f5dba7SAnatoly Burakov n_pages = (nb_mbufs / mbuf_per_pg) + leftover; 814c7f5dba7SAnatoly Burakov 815c7f5dba7SAnatoly Burakov mbuf_mem = n_pages * pgsz; 816c7f5dba7SAnatoly Burakov 817c7f5dba7SAnatoly Burakov total_mem = RTE_ALIGN(hdr_mem + mbuf_mem, pgsz); 818c7f5dba7SAnatoly Burakov 819c7f5dba7SAnatoly Burakov if (total_mem > SIZE_MAX) { 820c7f5dba7SAnatoly Burakov TESTPMD_LOG(ERR, "Memory size too big\n"); 821c7f5dba7SAnatoly Burakov return -1; 822c7f5dba7SAnatoly Burakov } 823c7f5dba7SAnatoly Burakov *out = (size_t)total_mem; 824c7f5dba7SAnatoly Burakov 825c7f5dba7SAnatoly Burakov return 0; 826c7f5dba7SAnatoly Burakov } 827c7f5dba7SAnatoly Burakov 828c7f5dba7SAnatoly Burakov static int 829c7f5dba7SAnatoly Burakov pagesz_flags(uint64_t page_sz) 830c7f5dba7SAnatoly Burakov { 831c7f5dba7SAnatoly Burakov /* as per mmap() manpage, all page sizes are log2 of page size 832c7f5dba7SAnatoly Burakov * shifted by MAP_HUGE_SHIFT 833c7f5dba7SAnatoly Burakov */ 8349d650537SAnatoly Burakov int log2 = rte_log2_u64(page_sz); 835c7f5dba7SAnatoly Burakov 836c7f5dba7SAnatoly Burakov return (log2 << HUGE_SHIFT); 837c7f5dba7SAnatoly Burakov } 838c7f5dba7SAnatoly Burakov 839c7f5dba7SAnatoly Burakov static void * 840c7f5dba7SAnatoly Burakov alloc_mem(size_t memsz, size_t pgsz, bool huge) 841c7f5dba7SAnatoly Burakov { 842c7f5dba7SAnatoly Burakov void *addr; 843c7f5dba7SAnatoly Burakov int flags; 844c7f5dba7SAnatoly Burakov 845c7f5dba7SAnatoly Burakov /* allocate anonymous hugepages */ 846c7f5dba7SAnatoly Burakov flags = MAP_ANONYMOUS | MAP_PRIVATE; 847c7f5dba7SAnatoly Burakov if (huge) 848c7f5dba7SAnatoly Burakov flags |= HUGE_FLAG | pagesz_flags(pgsz); 849c7f5dba7SAnatoly Burakov 850c7f5dba7SAnatoly Burakov addr = mmap(NULL, memsz, PROT_READ | PROT_WRITE, flags, -1, 0); 851c7f5dba7SAnatoly Burakov if (addr == MAP_FAILED) 852c7f5dba7SAnatoly Burakov return NULL; 853c7f5dba7SAnatoly Burakov 854c7f5dba7SAnatoly Burakov return addr; 855c7f5dba7SAnatoly Burakov } 856c7f5dba7SAnatoly Burakov 857c7f5dba7SAnatoly Burakov struct extmem_param { 858c7f5dba7SAnatoly Burakov void *addr; 859c7f5dba7SAnatoly Burakov size_t len; 860c7f5dba7SAnatoly Burakov size_t pgsz; 861c7f5dba7SAnatoly Burakov rte_iova_t *iova_table; 862c7f5dba7SAnatoly Burakov unsigned int iova_table_len; 863c7f5dba7SAnatoly Burakov }; 864c7f5dba7SAnatoly Burakov 865c7f5dba7SAnatoly Burakov static int 866c7f5dba7SAnatoly Burakov create_extmem(uint32_t nb_mbufs, uint32_t mbuf_sz, struct extmem_param *param, 867c7f5dba7SAnatoly Burakov bool huge) 868c7f5dba7SAnatoly Burakov { 869c7f5dba7SAnatoly Burakov uint64_t pgsizes[] = {RTE_PGSIZE_2M, RTE_PGSIZE_1G, /* x86_64, ARM */ 870c7f5dba7SAnatoly Burakov RTE_PGSIZE_16M, RTE_PGSIZE_16G}; /* POWER */ 871c7f5dba7SAnatoly Burakov unsigned int cur_page, n_pages, pgsz_idx; 872c7f5dba7SAnatoly Burakov size_t mem_sz, cur_pgsz; 873c7f5dba7SAnatoly Burakov rte_iova_t *iovas = NULL; 874c7f5dba7SAnatoly Burakov void *addr; 875c7f5dba7SAnatoly Burakov int ret; 876c7f5dba7SAnatoly Burakov 877c7f5dba7SAnatoly Burakov for (pgsz_idx = 0; pgsz_idx < RTE_DIM(pgsizes); pgsz_idx++) { 878c7f5dba7SAnatoly Burakov /* skip anything that is too big */ 879c7f5dba7SAnatoly Burakov if (pgsizes[pgsz_idx] > SIZE_MAX) 880c7f5dba7SAnatoly Burakov continue; 881c7f5dba7SAnatoly Burakov 882c7f5dba7SAnatoly Burakov cur_pgsz = pgsizes[pgsz_idx]; 883c7f5dba7SAnatoly Burakov 884c7f5dba7SAnatoly Burakov /* if we were told not to allocate hugepages, override */ 885c7f5dba7SAnatoly Burakov if (!huge) 886c7f5dba7SAnatoly Burakov cur_pgsz = sysconf(_SC_PAGESIZE); 887c7f5dba7SAnatoly Burakov 888c7f5dba7SAnatoly Burakov ret = calc_mem_size(nb_mbufs, mbuf_sz, cur_pgsz, &mem_sz); 889c7f5dba7SAnatoly Burakov if (ret < 0) { 890c7f5dba7SAnatoly Burakov TESTPMD_LOG(ERR, "Cannot calculate memory size\n"); 891c7f5dba7SAnatoly Burakov return -1; 892c7f5dba7SAnatoly Burakov } 893c7f5dba7SAnatoly Burakov 894c7f5dba7SAnatoly Burakov /* allocate our memory */ 895c7f5dba7SAnatoly Burakov addr = alloc_mem(mem_sz, cur_pgsz, huge); 896c7f5dba7SAnatoly Burakov 897c7f5dba7SAnatoly Burakov /* if we couldn't allocate memory with a specified page size, 898c7f5dba7SAnatoly Burakov * that doesn't mean we can't do it with other page sizes, so 899c7f5dba7SAnatoly Burakov * try another one. 900c7f5dba7SAnatoly Burakov */ 901c7f5dba7SAnatoly Burakov if (addr == NULL) 902c7f5dba7SAnatoly Burakov continue; 903c7f5dba7SAnatoly Burakov 904c7f5dba7SAnatoly Burakov /* store IOVA addresses for every page in this memory area */ 905c7f5dba7SAnatoly Burakov n_pages = mem_sz / cur_pgsz; 906c7f5dba7SAnatoly Burakov 907c7f5dba7SAnatoly Burakov iovas = malloc(sizeof(*iovas) * n_pages); 908c7f5dba7SAnatoly Burakov 909c7f5dba7SAnatoly Burakov if (iovas == NULL) { 910c7f5dba7SAnatoly Burakov TESTPMD_LOG(ERR, "Cannot allocate memory for iova addresses\n"); 911c7f5dba7SAnatoly Burakov goto fail; 912c7f5dba7SAnatoly Burakov } 913c7f5dba7SAnatoly Burakov /* lock memory if it's not huge pages */ 914c7f5dba7SAnatoly Burakov if (!huge) 915c7f5dba7SAnatoly Burakov mlock(addr, mem_sz); 916c7f5dba7SAnatoly Burakov 917c7f5dba7SAnatoly Burakov /* populate IOVA addresses */ 918c7f5dba7SAnatoly Burakov for (cur_page = 0; cur_page < n_pages; cur_page++) { 919c7f5dba7SAnatoly Burakov rte_iova_t iova; 920c7f5dba7SAnatoly Burakov size_t offset; 921c7f5dba7SAnatoly Burakov void *cur; 922c7f5dba7SAnatoly Burakov 923c7f5dba7SAnatoly Burakov offset = cur_pgsz * cur_page; 924c7f5dba7SAnatoly Burakov cur = RTE_PTR_ADD(addr, offset); 925c7f5dba7SAnatoly Burakov 926c7f5dba7SAnatoly Burakov /* touch the page before getting its IOVA */ 927c7f5dba7SAnatoly Burakov *(volatile char *)cur = 0; 928c7f5dba7SAnatoly Burakov 929c7f5dba7SAnatoly Burakov iova = rte_mem_virt2iova(cur); 930c7f5dba7SAnatoly Burakov 931c7f5dba7SAnatoly Burakov iovas[cur_page] = iova; 932c7f5dba7SAnatoly Burakov } 933c7f5dba7SAnatoly Burakov 934c7f5dba7SAnatoly Burakov break; 935c7f5dba7SAnatoly Burakov } 936c7f5dba7SAnatoly Burakov /* if we couldn't allocate anything */ 937c7f5dba7SAnatoly Burakov if (iovas == NULL) 938c7f5dba7SAnatoly Burakov return -1; 939c7f5dba7SAnatoly Burakov 940c7f5dba7SAnatoly Burakov param->addr = addr; 941c7f5dba7SAnatoly Burakov param->len = mem_sz; 942c7f5dba7SAnatoly Burakov param->pgsz = cur_pgsz; 943c7f5dba7SAnatoly Burakov param->iova_table = iovas; 944c7f5dba7SAnatoly Burakov param->iova_table_len = n_pages; 945c7f5dba7SAnatoly Burakov 946c7f5dba7SAnatoly Burakov return 0; 947c7f5dba7SAnatoly Burakov fail: 948c7f5dba7SAnatoly Burakov free(iovas); 949c7f5dba7SAnatoly Burakov if (addr) 950c7f5dba7SAnatoly Burakov munmap(addr, mem_sz); 951c7f5dba7SAnatoly Burakov 952c7f5dba7SAnatoly Burakov return -1; 953c7f5dba7SAnatoly Burakov } 954c7f5dba7SAnatoly Burakov 955c7f5dba7SAnatoly Burakov static int 956c7f5dba7SAnatoly Burakov setup_extmem(uint32_t nb_mbufs, uint32_t mbuf_sz, bool huge) 957c7f5dba7SAnatoly Burakov { 958c7f5dba7SAnatoly Burakov struct extmem_param param; 959c7f5dba7SAnatoly Burakov int socket_id, ret; 960c7f5dba7SAnatoly Burakov 961c7f5dba7SAnatoly Burakov memset(¶m, 0, sizeof(param)); 962c7f5dba7SAnatoly Burakov 963c7f5dba7SAnatoly Burakov /* check if our heap exists */ 964c7f5dba7SAnatoly Burakov socket_id = rte_malloc_heap_get_socket(EXTMEM_HEAP_NAME); 965c7f5dba7SAnatoly Burakov if (socket_id < 0) { 966c7f5dba7SAnatoly Burakov /* create our heap */ 967c7f5dba7SAnatoly Burakov ret = rte_malloc_heap_create(EXTMEM_HEAP_NAME); 968c7f5dba7SAnatoly Burakov if (ret < 0) { 969c7f5dba7SAnatoly Burakov TESTPMD_LOG(ERR, "Cannot create heap\n"); 970c7f5dba7SAnatoly Burakov return -1; 971c7f5dba7SAnatoly Burakov } 972c7f5dba7SAnatoly Burakov } 973c7f5dba7SAnatoly Burakov 974c7f5dba7SAnatoly Burakov ret = create_extmem(nb_mbufs, mbuf_sz, ¶m, huge); 975c7f5dba7SAnatoly Burakov if (ret < 0) { 976c7f5dba7SAnatoly Burakov TESTPMD_LOG(ERR, "Cannot create memory area\n"); 977c7f5dba7SAnatoly Burakov return -1; 978c7f5dba7SAnatoly Burakov } 979c7f5dba7SAnatoly Burakov 980c7f5dba7SAnatoly Burakov /* we now have a valid memory area, so add it to heap */ 981c7f5dba7SAnatoly Burakov ret = rte_malloc_heap_memory_add(EXTMEM_HEAP_NAME, 982c7f5dba7SAnatoly Burakov param.addr, param.len, param.iova_table, 983c7f5dba7SAnatoly Burakov param.iova_table_len, param.pgsz); 984c7f5dba7SAnatoly Burakov 985c7f5dba7SAnatoly Burakov /* when using VFIO, memory is automatically mapped for DMA by EAL */ 986c7f5dba7SAnatoly Burakov 987c7f5dba7SAnatoly Burakov /* not needed any more */ 988c7f5dba7SAnatoly Burakov free(param.iova_table); 989c7f5dba7SAnatoly Burakov 990c7f5dba7SAnatoly Burakov if (ret < 0) { 991c7f5dba7SAnatoly Burakov TESTPMD_LOG(ERR, "Cannot add memory to heap\n"); 992c7f5dba7SAnatoly Burakov munmap(param.addr, param.len); 993c7f5dba7SAnatoly Burakov return -1; 994c7f5dba7SAnatoly Burakov } 995c7f5dba7SAnatoly Burakov 996c7f5dba7SAnatoly Burakov /* success */ 997c7f5dba7SAnatoly Burakov 998c7f5dba7SAnatoly Burakov TESTPMD_LOG(DEBUG, "Allocated %zuMB of external memory\n", 999c7f5dba7SAnatoly Burakov param.len >> 20); 1000c7f5dba7SAnatoly Burakov 1001c7f5dba7SAnatoly Burakov return 0; 1002c7f5dba7SAnatoly Burakov } 10033a0968c8SShahaf Shuler static void 10043a0968c8SShahaf Shuler dma_unmap_cb(struct rte_mempool *mp __rte_unused, void *opaque __rte_unused, 10053a0968c8SShahaf Shuler struct rte_mempool_memhdr *memhdr, unsigned mem_idx __rte_unused) 10063a0968c8SShahaf Shuler { 10073a0968c8SShahaf Shuler uint16_t pid = 0; 10083a0968c8SShahaf Shuler int ret; 10093a0968c8SShahaf Shuler 10103a0968c8SShahaf Shuler RTE_ETH_FOREACH_DEV(pid) { 10110a0821bcSPaulis Gributs struct rte_eth_dev_info dev_info; 10123a0968c8SShahaf Shuler 10130a0821bcSPaulis Gributs ret = eth_dev_info_get_print_err(pid, &dev_info); 10140a0821bcSPaulis Gributs if (ret != 0) { 10150a0821bcSPaulis Gributs TESTPMD_LOG(DEBUG, 10160a0821bcSPaulis Gributs "unable to get device info for port %d on addr 0x%p," 10170a0821bcSPaulis Gributs "mempool unmapping will not be performed\n", 10180a0821bcSPaulis Gributs pid, memhdr->addr); 10190a0821bcSPaulis Gributs continue; 10200a0821bcSPaulis Gributs } 10210a0821bcSPaulis Gributs 10220a0821bcSPaulis Gributs ret = rte_dev_dma_unmap(dev_info.device, memhdr->addr, 0, memhdr->len); 10233a0968c8SShahaf Shuler if (ret) { 10243a0968c8SShahaf Shuler TESTPMD_LOG(DEBUG, 10253a0968c8SShahaf Shuler "unable to DMA unmap addr 0x%p " 10263a0968c8SShahaf Shuler "for device %s\n", 1027ec5ecd7eSDavid Marchand memhdr->addr, rte_dev_name(dev_info.device)); 10283a0968c8SShahaf Shuler } 10293a0968c8SShahaf Shuler } 10303a0968c8SShahaf Shuler ret = rte_extmem_unregister(memhdr->addr, memhdr->len); 10313a0968c8SShahaf Shuler if (ret) { 10323a0968c8SShahaf Shuler TESTPMD_LOG(DEBUG, 10333a0968c8SShahaf Shuler "unable to un-register addr 0x%p\n", memhdr->addr); 10343a0968c8SShahaf Shuler } 10353a0968c8SShahaf Shuler } 10363a0968c8SShahaf Shuler 10373a0968c8SShahaf Shuler static void 10383a0968c8SShahaf Shuler dma_map_cb(struct rte_mempool *mp __rte_unused, void *opaque __rte_unused, 10393a0968c8SShahaf Shuler struct rte_mempool_memhdr *memhdr, unsigned mem_idx __rte_unused) 10403a0968c8SShahaf Shuler { 10413a0968c8SShahaf Shuler uint16_t pid = 0; 10423a0968c8SShahaf Shuler size_t page_size = sysconf(_SC_PAGESIZE); 10433a0968c8SShahaf Shuler int ret; 10443a0968c8SShahaf Shuler 10453a0968c8SShahaf Shuler ret = rte_extmem_register(memhdr->addr, memhdr->len, NULL, 0, 10463a0968c8SShahaf Shuler page_size); 10473a0968c8SShahaf Shuler if (ret) { 10483a0968c8SShahaf Shuler TESTPMD_LOG(DEBUG, 10493a0968c8SShahaf Shuler "unable to register addr 0x%p\n", memhdr->addr); 10503a0968c8SShahaf Shuler return; 10513a0968c8SShahaf Shuler } 10523a0968c8SShahaf Shuler RTE_ETH_FOREACH_DEV(pid) { 10530a0821bcSPaulis Gributs struct rte_eth_dev_info dev_info; 10543a0968c8SShahaf Shuler 10550a0821bcSPaulis Gributs ret = eth_dev_info_get_print_err(pid, &dev_info); 10560a0821bcSPaulis Gributs if (ret != 0) { 10570a0821bcSPaulis Gributs TESTPMD_LOG(DEBUG, 10580a0821bcSPaulis Gributs "unable to get device info for port %d on addr 0x%p," 10590a0821bcSPaulis Gributs "mempool mapping will not be performed\n", 10600a0821bcSPaulis Gributs pid, memhdr->addr); 10610a0821bcSPaulis Gributs continue; 10620a0821bcSPaulis Gributs } 10630a0821bcSPaulis Gributs ret = rte_dev_dma_map(dev_info.device, memhdr->addr, 0, memhdr->len); 10643a0968c8SShahaf Shuler if (ret) { 10653a0968c8SShahaf Shuler TESTPMD_LOG(DEBUG, 10663a0968c8SShahaf Shuler "unable to DMA map addr 0x%p " 10673a0968c8SShahaf Shuler "for device %s\n", 1068ec5ecd7eSDavid Marchand memhdr->addr, rte_dev_name(dev_info.device)); 10693a0968c8SShahaf Shuler } 10703a0968c8SShahaf Shuler } 10713a0968c8SShahaf Shuler } 1072761f7ae1SJie Zhou #endif 1073c7f5dba7SAnatoly Burakov 107472512e18SViacheslav Ovsiienko static unsigned int 107572512e18SViacheslav Ovsiienko setup_extbuf(uint32_t nb_mbufs, uint16_t mbuf_sz, unsigned int socket_id, 107672512e18SViacheslav Ovsiienko char *pool_name, struct rte_pktmbuf_extmem **ext_mem) 107772512e18SViacheslav Ovsiienko { 107872512e18SViacheslav Ovsiienko struct rte_pktmbuf_extmem *xmem; 107972512e18SViacheslav Ovsiienko unsigned int ext_num, zone_num, elt_num; 108072512e18SViacheslav Ovsiienko uint16_t elt_size; 108172512e18SViacheslav Ovsiienko 108272512e18SViacheslav Ovsiienko elt_size = RTE_ALIGN_CEIL(mbuf_sz, RTE_CACHE_LINE_SIZE); 108372512e18SViacheslav Ovsiienko elt_num = EXTBUF_ZONE_SIZE / elt_size; 108472512e18SViacheslav Ovsiienko zone_num = (nb_mbufs + elt_num - 1) / elt_num; 108572512e18SViacheslav Ovsiienko 108672512e18SViacheslav Ovsiienko xmem = malloc(sizeof(struct rte_pktmbuf_extmem) * zone_num); 108772512e18SViacheslav Ovsiienko if (xmem == NULL) { 108872512e18SViacheslav Ovsiienko TESTPMD_LOG(ERR, "Cannot allocate memory for " 108972512e18SViacheslav Ovsiienko "external buffer descriptors\n"); 109072512e18SViacheslav Ovsiienko *ext_mem = NULL; 109172512e18SViacheslav Ovsiienko return 0; 109272512e18SViacheslav Ovsiienko } 109372512e18SViacheslav Ovsiienko for (ext_num = 0; ext_num < zone_num; ext_num++) { 109472512e18SViacheslav Ovsiienko struct rte_pktmbuf_extmem *xseg = xmem + ext_num; 109572512e18SViacheslav Ovsiienko const struct rte_memzone *mz; 109672512e18SViacheslav Ovsiienko char mz_name[RTE_MEMZONE_NAMESIZE]; 109772512e18SViacheslav Ovsiienko int ret; 109872512e18SViacheslav Ovsiienko 109972512e18SViacheslav Ovsiienko ret = snprintf(mz_name, sizeof(mz_name), 110072512e18SViacheslav Ovsiienko RTE_MEMPOOL_MZ_FORMAT "_xb_%u", pool_name, ext_num); 110172512e18SViacheslav Ovsiienko if (ret < 0 || ret >= (int)sizeof(mz_name)) { 110272512e18SViacheslav Ovsiienko errno = ENAMETOOLONG; 110372512e18SViacheslav Ovsiienko ext_num = 0; 110472512e18SViacheslav Ovsiienko break; 110572512e18SViacheslav Ovsiienko } 110613b19642SDmitry Kozlyuk mz = rte_memzone_reserve(mz_name, EXTBUF_ZONE_SIZE, 110772512e18SViacheslav Ovsiienko socket_id, 110872512e18SViacheslav Ovsiienko RTE_MEMZONE_IOVA_CONTIG | 110972512e18SViacheslav Ovsiienko RTE_MEMZONE_1GB | 111013b19642SDmitry Kozlyuk RTE_MEMZONE_SIZE_HINT_ONLY); 111172512e18SViacheslav Ovsiienko if (mz == NULL) { 111272512e18SViacheslav Ovsiienko /* 111372512e18SViacheslav Ovsiienko * The caller exits on external buffer creation 111472512e18SViacheslav Ovsiienko * error, so there is no need to free memzones. 111572512e18SViacheslav Ovsiienko */ 111672512e18SViacheslav Ovsiienko errno = ENOMEM; 111772512e18SViacheslav Ovsiienko ext_num = 0; 111872512e18SViacheslav Ovsiienko break; 111972512e18SViacheslav Ovsiienko } 112072512e18SViacheslav Ovsiienko xseg->buf_ptr = mz->addr; 112172512e18SViacheslav Ovsiienko xseg->buf_iova = mz->iova; 112272512e18SViacheslav Ovsiienko xseg->buf_len = EXTBUF_ZONE_SIZE; 112372512e18SViacheslav Ovsiienko xseg->elt_size = elt_size; 112472512e18SViacheslav Ovsiienko } 112572512e18SViacheslav Ovsiienko if (ext_num == 0 && xmem != NULL) { 112672512e18SViacheslav Ovsiienko free(xmem); 112772512e18SViacheslav Ovsiienko xmem = NULL; 112872512e18SViacheslav Ovsiienko } 112972512e18SViacheslav Ovsiienko *ext_mem = xmem; 113072512e18SViacheslav Ovsiienko return ext_num; 113172512e18SViacheslav Ovsiienko } 113272512e18SViacheslav Ovsiienko 1133af75078fSIntel /* 1134af75078fSIntel * Configuration initialisation done once at init time. 1135af75078fSIntel */ 1136401b744dSShahaf Shuler static struct rte_mempool * 1137af75078fSIntel mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf, 113826cbb419SViacheslav Ovsiienko unsigned int socket_id, uint16_t size_idx) 1139af75078fSIntel { 1140af75078fSIntel char pool_name[RTE_MEMPOOL_NAMESIZE]; 1141bece7b6cSChristian Ehrhardt struct rte_mempool *rte_mp = NULL; 1142761f7ae1SJie Zhou #ifndef RTE_EXEC_ENV_WINDOWS 1143af75078fSIntel uint32_t mb_size; 1144af75078fSIntel 1145dfb03bbeSOlivier Matz mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size; 1146761f7ae1SJie Zhou #endif 114726cbb419SViacheslav Ovsiienko mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name), size_idx); 1148a550baf2SMin Hu (Connor) if (!is_proc_primary()) { 1149a550baf2SMin Hu (Connor) rte_mp = rte_mempool_lookup(pool_name); 1150a550baf2SMin Hu (Connor) if (rte_mp == NULL) 1151a550baf2SMin Hu (Connor) rte_exit(EXIT_FAILURE, 1152a550baf2SMin Hu (Connor) "Get mbuf pool for socket %u failed: %s\n", 1153a550baf2SMin Hu (Connor) socket_id, rte_strerror(rte_errno)); 1154a550baf2SMin Hu (Connor) return rte_mp; 1155a550baf2SMin Hu (Connor) } 1156148f963fSBruce Richardson 1157285fd101SOlivier Matz TESTPMD_LOG(INFO, 1158d1eb542eSOlivier Matz "create a new mbuf pool <%s>: n=%u, size=%u, socket=%u\n", 1159d1eb542eSOlivier Matz pool_name, nb_mbuf, mbuf_seg_size, socket_id); 1160d1eb542eSOlivier Matz 1161c7f5dba7SAnatoly Burakov switch (mp_alloc_type) { 1162c7f5dba7SAnatoly Burakov case MP_ALLOC_NATIVE: 1163c7f5dba7SAnatoly Burakov { 1164c7f5dba7SAnatoly Burakov /* wrapper to rte_mempool_create() */ 1165c7f5dba7SAnatoly Burakov TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n", 1166c7f5dba7SAnatoly Burakov rte_mbuf_best_mempool_ops()); 1167c7f5dba7SAnatoly Burakov rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf, 1168c7f5dba7SAnatoly Burakov mb_mempool_cache, 0, mbuf_seg_size, socket_id); 1169c7f5dba7SAnatoly Burakov break; 1170c7f5dba7SAnatoly Burakov } 1171761f7ae1SJie Zhou #ifndef RTE_EXEC_ENV_WINDOWS 1172c7f5dba7SAnatoly Burakov case MP_ALLOC_ANON: 1173c7f5dba7SAnatoly Burakov { 1174b19a0c75SOlivier Matz rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf, 1175c7f5dba7SAnatoly Burakov mb_size, (unsigned int) mb_mempool_cache, 1176148f963fSBruce Richardson sizeof(struct rte_pktmbuf_pool_private), 117759fcf854SShahaf Shuler socket_id, mempool_flags); 117824427bb9SOlivier Matz if (rte_mp == NULL) 117924427bb9SOlivier Matz goto err; 1180b19a0c75SOlivier Matz 1181b19a0c75SOlivier Matz if (rte_mempool_populate_anon(rte_mp) == 0) { 1182b19a0c75SOlivier Matz rte_mempool_free(rte_mp); 1183b19a0c75SOlivier Matz rte_mp = NULL; 118424427bb9SOlivier Matz goto err; 1185b19a0c75SOlivier Matz } 1186b19a0c75SOlivier Matz rte_pktmbuf_pool_init(rte_mp, NULL); 1187b19a0c75SOlivier Matz rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL); 11883a0968c8SShahaf Shuler rte_mempool_mem_iter(rte_mp, dma_map_cb, NULL); 1189c7f5dba7SAnatoly Burakov break; 1190c7f5dba7SAnatoly Burakov } 1191c7f5dba7SAnatoly Burakov case MP_ALLOC_XMEM: 1192c7f5dba7SAnatoly Burakov case MP_ALLOC_XMEM_HUGE: 1193c7f5dba7SAnatoly Burakov { 1194c7f5dba7SAnatoly Burakov int heap_socket; 1195c7f5dba7SAnatoly Burakov bool huge = mp_alloc_type == MP_ALLOC_XMEM_HUGE; 1196c7f5dba7SAnatoly Burakov 1197c7f5dba7SAnatoly Burakov if (setup_extmem(nb_mbuf, mbuf_seg_size, huge) < 0) 1198c7f5dba7SAnatoly Burakov rte_exit(EXIT_FAILURE, "Could not create external memory\n"); 1199c7f5dba7SAnatoly Burakov 1200c7f5dba7SAnatoly Burakov heap_socket = 1201c7f5dba7SAnatoly Burakov rte_malloc_heap_get_socket(EXTMEM_HEAP_NAME); 1202c7f5dba7SAnatoly Burakov if (heap_socket < 0) 1203c7f5dba7SAnatoly Burakov rte_exit(EXIT_FAILURE, "Could not get external memory socket ID\n"); 1204c7f5dba7SAnatoly Burakov 12050e798567SPavan Nikhilesh TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n", 12060e798567SPavan Nikhilesh rte_mbuf_best_mempool_ops()); 1207ea0c20eaSOlivier Matz rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf, 1208c7f5dba7SAnatoly Burakov mb_mempool_cache, 0, mbuf_seg_size, 1209c7f5dba7SAnatoly Burakov heap_socket); 1210c7f5dba7SAnatoly Burakov break; 1211c7f5dba7SAnatoly Burakov } 1212761f7ae1SJie Zhou #endif 121372512e18SViacheslav Ovsiienko case MP_ALLOC_XBUF: 121472512e18SViacheslav Ovsiienko { 121572512e18SViacheslav Ovsiienko struct rte_pktmbuf_extmem *ext_mem; 121672512e18SViacheslav Ovsiienko unsigned int ext_num; 121772512e18SViacheslav Ovsiienko 121872512e18SViacheslav Ovsiienko ext_num = setup_extbuf(nb_mbuf, mbuf_seg_size, 121972512e18SViacheslav Ovsiienko socket_id, pool_name, &ext_mem); 122072512e18SViacheslav Ovsiienko if (ext_num == 0) 122172512e18SViacheslav Ovsiienko rte_exit(EXIT_FAILURE, 122272512e18SViacheslav Ovsiienko "Can't create pinned data buffers\n"); 122372512e18SViacheslav Ovsiienko 122472512e18SViacheslav Ovsiienko TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n", 122572512e18SViacheslav Ovsiienko rte_mbuf_best_mempool_ops()); 122672512e18SViacheslav Ovsiienko rte_mp = rte_pktmbuf_pool_create_extbuf 122772512e18SViacheslav Ovsiienko (pool_name, nb_mbuf, mb_mempool_cache, 122872512e18SViacheslav Ovsiienko 0, mbuf_seg_size, socket_id, 122972512e18SViacheslav Ovsiienko ext_mem, ext_num); 123072512e18SViacheslav Ovsiienko free(ext_mem); 123172512e18SViacheslav Ovsiienko break; 123272512e18SViacheslav Ovsiienko } 1233c7f5dba7SAnatoly Burakov default: 1234c7f5dba7SAnatoly Burakov { 1235c7f5dba7SAnatoly Burakov rte_exit(EXIT_FAILURE, "Invalid mempool creation mode\n"); 1236c7f5dba7SAnatoly Burakov } 1237bece7b6cSChristian Ehrhardt } 1238148f963fSBruce Richardson 1239761f7ae1SJie Zhou #ifndef RTE_EXEC_ENV_WINDOWS 124024427bb9SOlivier Matz err: 1241761f7ae1SJie Zhou #endif 1242af75078fSIntel if (rte_mp == NULL) { 1243d1eb542eSOlivier Matz rte_exit(EXIT_FAILURE, 1244d1eb542eSOlivier Matz "Creation of mbuf pool for socket %u failed: %s\n", 1245d1eb542eSOlivier Matz socket_id, rte_strerror(rte_errno)); 1246148f963fSBruce Richardson } else if (verbose_level > 0) { 1247591a9d79SStephen Hemminger rte_mempool_dump(stdout, rte_mp); 1248af75078fSIntel } 1249401b744dSShahaf Shuler return rte_mp; 1250af75078fSIntel } 1251af75078fSIntel 125220a0286fSLiu Xiaofeng /* 125320a0286fSLiu Xiaofeng * Check given socket id is valid or not with NUMA mode, 125420a0286fSLiu Xiaofeng * if valid, return 0, else return -1 125520a0286fSLiu Xiaofeng */ 125620a0286fSLiu Xiaofeng static int 125720a0286fSLiu Xiaofeng check_socket_id(const unsigned int socket_id) 125820a0286fSLiu Xiaofeng { 125920a0286fSLiu Xiaofeng static int warning_once = 0; 126020a0286fSLiu Xiaofeng 1261c9cafcc8SShahaf Shuler if (new_socket_id(socket_id)) { 126220a0286fSLiu Xiaofeng if (!warning_once && numa_support) 126361a3b0e5SAndrew Rybchenko fprintf(stderr, 126461a3b0e5SAndrew Rybchenko "Warning: NUMA should be configured manually by using --port-numa-config and --ring-numa-config parameters along with --numa.\n"); 126520a0286fSLiu Xiaofeng warning_once = 1; 126620a0286fSLiu Xiaofeng return -1; 126720a0286fSLiu Xiaofeng } 126820a0286fSLiu Xiaofeng return 0; 126920a0286fSLiu Xiaofeng } 127020a0286fSLiu Xiaofeng 12713f7311baSWei Dai /* 12723f7311baSWei Dai * Get the allowed maximum number of RX queues. 12733f7311baSWei Dai * *pid return the port id which has minimal value of 12743f7311baSWei Dai * max_rx_queues in all ports. 12753f7311baSWei Dai */ 12763f7311baSWei Dai queueid_t 12773f7311baSWei Dai get_allowed_max_nb_rxq(portid_t *pid) 12783f7311baSWei Dai { 12799e6b36c3SDavid Marchand queueid_t allowed_max_rxq = RTE_MAX_QUEUES_PER_PORT; 12806f51deb9SIvan Ilchenko bool max_rxq_valid = false; 12813f7311baSWei Dai portid_t pi; 12823f7311baSWei Dai struct rte_eth_dev_info dev_info; 12833f7311baSWei Dai 12843f7311baSWei Dai RTE_ETH_FOREACH_DEV(pi) { 12856f51deb9SIvan Ilchenko if (eth_dev_info_get_print_err(pi, &dev_info) != 0) 12866f51deb9SIvan Ilchenko continue; 12876f51deb9SIvan Ilchenko 12886f51deb9SIvan Ilchenko max_rxq_valid = true; 12893f7311baSWei Dai if (dev_info.max_rx_queues < allowed_max_rxq) { 12903f7311baSWei Dai allowed_max_rxq = dev_info.max_rx_queues; 12913f7311baSWei Dai *pid = pi; 12923f7311baSWei Dai } 12933f7311baSWei Dai } 12946f51deb9SIvan Ilchenko return max_rxq_valid ? allowed_max_rxq : 0; 12953f7311baSWei Dai } 12963f7311baSWei Dai 12973f7311baSWei Dai /* 12983f7311baSWei Dai * Check input rxq is valid or not. 12993f7311baSWei Dai * If input rxq is not greater than any of maximum number 13003f7311baSWei Dai * of RX queues of all ports, it is valid. 13013f7311baSWei Dai * if valid, return 0, else return -1 13023f7311baSWei Dai */ 13033f7311baSWei Dai int 13043f7311baSWei Dai check_nb_rxq(queueid_t rxq) 13053f7311baSWei Dai { 13063f7311baSWei Dai queueid_t allowed_max_rxq; 13073f7311baSWei Dai portid_t pid = 0; 13083f7311baSWei Dai 13093f7311baSWei Dai allowed_max_rxq = get_allowed_max_nb_rxq(&pid); 13103f7311baSWei Dai if (rxq > allowed_max_rxq) { 131161a3b0e5SAndrew Rybchenko fprintf(stderr, 131261a3b0e5SAndrew Rybchenko "Fail: input rxq (%u) can't be greater than max_rx_queues (%u) of port %u\n", 131361a3b0e5SAndrew Rybchenko rxq, allowed_max_rxq, pid); 13143f7311baSWei Dai return -1; 13153f7311baSWei Dai } 13163f7311baSWei Dai return 0; 13173f7311baSWei Dai } 13183f7311baSWei Dai 131936db4f6cSWei Dai /* 132036db4f6cSWei Dai * Get the allowed maximum number of TX queues. 132136db4f6cSWei Dai * *pid return the port id which has minimal value of 132236db4f6cSWei Dai * max_tx_queues in all ports. 132336db4f6cSWei Dai */ 132436db4f6cSWei Dai queueid_t 132536db4f6cSWei Dai get_allowed_max_nb_txq(portid_t *pid) 132636db4f6cSWei Dai { 13279e6b36c3SDavid Marchand queueid_t allowed_max_txq = RTE_MAX_QUEUES_PER_PORT; 13286f51deb9SIvan Ilchenko bool max_txq_valid = false; 132936db4f6cSWei Dai portid_t pi; 133036db4f6cSWei Dai struct rte_eth_dev_info dev_info; 133136db4f6cSWei Dai 133236db4f6cSWei Dai RTE_ETH_FOREACH_DEV(pi) { 13336f51deb9SIvan Ilchenko if (eth_dev_info_get_print_err(pi, &dev_info) != 0) 13346f51deb9SIvan Ilchenko continue; 13356f51deb9SIvan Ilchenko 13366f51deb9SIvan Ilchenko max_txq_valid = true; 133736db4f6cSWei Dai if (dev_info.max_tx_queues < allowed_max_txq) { 133836db4f6cSWei Dai allowed_max_txq = dev_info.max_tx_queues; 133936db4f6cSWei Dai *pid = pi; 134036db4f6cSWei Dai } 134136db4f6cSWei Dai } 13426f51deb9SIvan Ilchenko return max_txq_valid ? allowed_max_txq : 0; 134336db4f6cSWei Dai } 134436db4f6cSWei Dai 134536db4f6cSWei Dai /* 134636db4f6cSWei Dai * Check input txq is valid or not. 134736db4f6cSWei Dai * If input txq is not greater than any of maximum number 134836db4f6cSWei Dai * of TX queues of all ports, it is valid. 134936db4f6cSWei Dai * if valid, return 0, else return -1 135036db4f6cSWei Dai */ 135136db4f6cSWei Dai int 135236db4f6cSWei Dai check_nb_txq(queueid_t txq) 135336db4f6cSWei Dai { 135436db4f6cSWei Dai queueid_t allowed_max_txq; 135536db4f6cSWei Dai portid_t pid = 0; 135636db4f6cSWei Dai 135736db4f6cSWei Dai allowed_max_txq = get_allowed_max_nb_txq(&pid); 135836db4f6cSWei Dai if (txq > allowed_max_txq) { 135961a3b0e5SAndrew Rybchenko fprintf(stderr, 136061a3b0e5SAndrew Rybchenko "Fail: input txq (%u) can't be greater than max_tx_queues (%u) of port %u\n", 136161a3b0e5SAndrew Rybchenko txq, allowed_max_txq, pid); 136236db4f6cSWei Dai return -1; 136336db4f6cSWei Dai } 136436db4f6cSWei Dai return 0; 136536db4f6cSWei Dai } 136636db4f6cSWei Dai 13671c69df45SOri Kam /* 136899e040d3SLijun Ou * Get the allowed maximum number of RXDs of every rx queue. 136999e040d3SLijun Ou * *pid return the port id which has minimal value of 137099e040d3SLijun Ou * max_rxd in all queues of all ports. 137199e040d3SLijun Ou */ 137299e040d3SLijun Ou static uint16_t 137399e040d3SLijun Ou get_allowed_max_nb_rxd(portid_t *pid) 137499e040d3SLijun Ou { 137599e040d3SLijun Ou uint16_t allowed_max_rxd = UINT16_MAX; 137699e040d3SLijun Ou portid_t pi; 137799e040d3SLijun Ou struct rte_eth_dev_info dev_info; 137899e040d3SLijun Ou 137999e040d3SLijun Ou RTE_ETH_FOREACH_DEV(pi) { 138099e040d3SLijun Ou if (eth_dev_info_get_print_err(pi, &dev_info) != 0) 138199e040d3SLijun Ou continue; 138299e040d3SLijun Ou 138399e040d3SLijun Ou if (dev_info.rx_desc_lim.nb_max < allowed_max_rxd) { 138499e040d3SLijun Ou allowed_max_rxd = dev_info.rx_desc_lim.nb_max; 138599e040d3SLijun Ou *pid = pi; 138699e040d3SLijun Ou } 138799e040d3SLijun Ou } 138899e040d3SLijun Ou return allowed_max_rxd; 138999e040d3SLijun Ou } 139099e040d3SLijun Ou 139199e040d3SLijun Ou /* 139299e040d3SLijun Ou * Get the allowed minimal number of RXDs of every rx queue. 139399e040d3SLijun Ou * *pid return the port id which has minimal value of 139499e040d3SLijun Ou * min_rxd in all queues of all ports. 139599e040d3SLijun Ou */ 139699e040d3SLijun Ou static uint16_t 139799e040d3SLijun Ou get_allowed_min_nb_rxd(portid_t *pid) 139899e040d3SLijun Ou { 139999e040d3SLijun Ou uint16_t allowed_min_rxd = 0; 140099e040d3SLijun Ou portid_t pi; 140199e040d3SLijun Ou struct rte_eth_dev_info dev_info; 140299e040d3SLijun Ou 140399e040d3SLijun Ou RTE_ETH_FOREACH_DEV(pi) { 140499e040d3SLijun Ou if (eth_dev_info_get_print_err(pi, &dev_info) != 0) 140599e040d3SLijun Ou continue; 140699e040d3SLijun Ou 140799e040d3SLijun Ou if (dev_info.rx_desc_lim.nb_min > allowed_min_rxd) { 140899e040d3SLijun Ou allowed_min_rxd = dev_info.rx_desc_lim.nb_min; 140999e040d3SLijun Ou *pid = pi; 141099e040d3SLijun Ou } 141199e040d3SLijun Ou } 141299e040d3SLijun Ou 141399e040d3SLijun Ou return allowed_min_rxd; 141499e040d3SLijun Ou } 141599e040d3SLijun Ou 141699e040d3SLijun Ou /* 141799e040d3SLijun Ou * Check input rxd is valid or not. 141899e040d3SLijun Ou * If input rxd is not greater than any of maximum number 141999e040d3SLijun Ou * of RXDs of every Rx queues and is not less than any of 142099e040d3SLijun Ou * minimal number of RXDs of every Rx queues, it is valid. 142199e040d3SLijun Ou * if valid, return 0, else return -1 142299e040d3SLijun Ou */ 142399e040d3SLijun Ou int 142499e040d3SLijun Ou check_nb_rxd(queueid_t rxd) 142599e040d3SLijun Ou { 142699e040d3SLijun Ou uint16_t allowed_max_rxd; 142799e040d3SLijun Ou uint16_t allowed_min_rxd; 142899e040d3SLijun Ou portid_t pid = 0; 142999e040d3SLijun Ou 143099e040d3SLijun Ou allowed_max_rxd = get_allowed_max_nb_rxd(&pid); 143199e040d3SLijun Ou if (rxd > allowed_max_rxd) { 143261a3b0e5SAndrew Rybchenko fprintf(stderr, 143361a3b0e5SAndrew Rybchenko "Fail: input rxd (%u) can't be greater than max_rxds (%u) of port %u\n", 143461a3b0e5SAndrew Rybchenko rxd, allowed_max_rxd, pid); 143599e040d3SLijun Ou return -1; 143699e040d3SLijun Ou } 143799e040d3SLijun Ou 143899e040d3SLijun Ou allowed_min_rxd = get_allowed_min_nb_rxd(&pid); 143999e040d3SLijun Ou if (rxd < allowed_min_rxd) { 144061a3b0e5SAndrew Rybchenko fprintf(stderr, 144161a3b0e5SAndrew Rybchenko "Fail: input rxd (%u) can't be less than min_rxds (%u) of port %u\n", 144261a3b0e5SAndrew Rybchenko rxd, allowed_min_rxd, pid); 144399e040d3SLijun Ou return -1; 144499e040d3SLijun Ou } 144599e040d3SLijun Ou 144699e040d3SLijun Ou return 0; 144799e040d3SLijun Ou } 144899e040d3SLijun Ou 144999e040d3SLijun Ou /* 145099e040d3SLijun Ou * Get the allowed maximum number of TXDs of every rx queues. 145199e040d3SLijun Ou * *pid return the port id which has minimal value of 145299e040d3SLijun Ou * max_txd in every tx queue. 145399e040d3SLijun Ou */ 145499e040d3SLijun Ou static uint16_t 145599e040d3SLijun Ou get_allowed_max_nb_txd(portid_t *pid) 145699e040d3SLijun Ou { 145799e040d3SLijun Ou uint16_t allowed_max_txd = UINT16_MAX; 145899e040d3SLijun Ou portid_t pi; 145999e040d3SLijun Ou struct rte_eth_dev_info dev_info; 146099e040d3SLijun Ou 146199e040d3SLijun Ou RTE_ETH_FOREACH_DEV(pi) { 146299e040d3SLijun Ou if (eth_dev_info_get_print_err(pi, &dev_info) != 0) 146399e040d3SLijun Ou continue; 146499e040d3SLijun Ou 146599e040d3SLijun Ou if (dev_info.tx_desc_lim.nb_max < allowed_max_txd) { 146699e040d3SLijun Ou allowed_max_txd = dev_info.tx_desc_lim.nb_max; 146799e040d3SLijun Ou *pid = pi; 146899e040d3SLijun Ou } 146999e040d3SLijun Ou } 147099e040d3SLijun Ou return allowed_max_txd; 147199e040d3SLijun Ou } 147299e040d3SLijun Ou 147399e040d3SLijun Ou /* 147499e040d3SLijun Ou * Get the allowed maximum number of TXDs of every tx queues. 147599e040d3SLijun Ou * *pid return the port id which has minimal value of 147699e040d3SLijun Ou * min_txd in every tx queue. 147799e040d3SLijun Ou */ 147899e040d3SLijun Ou static uint16_t 147999e040d3SLijun Ou get_allowed_min_nb_txd(portid_t *pid) 148099e040d3SLijun Ou { 148199e040d3SLijun Ou uint16_t allowed_min_txd = 0; 148299e040d3SLijun Ou portid_t pi; 148399e040d3SLijun Ou struct rte_eth_dev_info dev_info; 148499e040d3SLijun Ou 148599e040d3SLijun Ou RTE_ETH_FOREACH_DEV(pi) { 148699e040d3SLijun Ou if (eth_dev_info_get_print_err(pi, &dev_info) != 0) 148799e040d3SLijun Ou continue; 148899e040d3SLijun Ou 148999e040d3SLijun Ou if (dev_info.tx_desc_lim.nb_min > allowed_min_txd) { 149099e040d3SLijun Ou allowed_min_txd = dev_info.tx_desc_lim.nb_min; 149199e040d3SLijun Ou *pid = pi; 149299e040d3SLijun Ou } 149399e040d3SLijun Ou } 149499e040d3SLijun Ou 149599e040d3SLijun Ou return allowed_min_txd; 149699e040d3SLijun Ou } 149799e040d3SLijun Ou 149899e040d3SLijun Ou /* 149999e040d3SLijun Ou * Check input txd is valid or not. 150099e040d3SLijun Ou * If input txd is not greater than any of maximum number 150199e040d3SLijun Ou * of TXDs of every Rx queues, it is valid. 150299e040d3SLijun Ou * if valid, return 0, else return -1 150399e040d3SLijun Ou */ 150499e040d3SLijun Ou int 150599e040d3SLijun Ou check_nb_txd(queueid_t txd) 150699e040d3SLijun Ou { 150799e040d3SLijun Ou uint16_t allowed_max_txd; 150899e040d3SLijun Ou uint16_t allowed_min_txd; 150999e040d3SLijun Ou portid_t pid = 0; 151099e040d3SLijun Ou 151199e040d3SLijun Ou allowed_max_txd = get_allowed_max_nb_txd(&pid); 151299e040d3SLijun Ou if (txd > allowed_max_txd) { 151361a3b0e5SAndrew Rybchenko fprintf(stderr, 151461a3b0e5SAndrew Rybchenko "Fail: input txd (%u) can't be greater than max_txds (%u) of port %u\n", 151561a3b0e5SAndrew Rybchenko txd, allowed_max_txd, pid); 151699e040d3SLijun Ou return -1; 151799e040d3SLijun Ou } 151899e040d3SLijun Ou 151999e040d3SLijun Ou allowed_min_txd = get_allowed_min_nb_txd(&pid); 152099e040d3SLijun Ou if (txd < allowed_min_txd) { 152161a3b0e5SAndrew Rybchenko fprintf(stderr, 152261a3b0e5SAndrew Rybchenko "Fail: input txd (%u) can't be less than min_txds (%u) of port %u\n", 152361a3b0e5SAndrew Rybchenko txd, allowed_min_txd, pid); 152499e040d3SLijun Ou return -1; 152599e040d3SLijun Ou } 152699e040d3SLijun Ou return 0; 152799e040d3SLijun Ou } 152899e040d3SLijun Ou 152999e040d3SLijun Ou 153099e040d3SLijun Ou /* 15311c69df45SOri Kam * Get the allowed maximum number of hairpin queues. 15321c69df45SOri Kam * *pid return the port id which has minimal value of 15331c69df45SOri Kam * max_hairpin_queues in all ports. 15341c69df45SOri Kam */ 15351c69df45SOri Kam queueid_t 15361c69df45SOri Kam get_allowed_max_nb_hairpinq(portid_t *pid) 15371c69df45SOri Kam { 15389e6b36c3SDavid Marchand queueid_t allowed_max_hairpinq = RTE_MAX_QUEUES_PER_PORT; 15391c69df45SOri Kam portid_t pi; 15401c69df45SOri Kam struct rte_eth_hairpin_cap cap; 15411c69df45SOri Kam 15421c69df45SOri Kam RTE_ETH_FOREACH_DEV(pi) { 15431c69df45SOri Kam if (rte_eth_dev_hairpin_capability_get(pi, &cap) != 0) { 15441c69df45SOri Kam *pid = pi; 15451c69df45SOri Kam return 0; 15461c69df45SOri Kam } 15471c69df45SOri Kam if (cap.max_nb_queues < allowed_max_hairpinq) { 15481c69df45SOri Kam allowed_max_hairpinq = cap.max_nb_queues; 15491c69df45SOri Kam *pid = pi; 15501c69df45SOri Kam } 15511c69df45SOri Kam } 15521c69df45SOri Kam return allowed_max_hairpinq; 15531c69df45SOri Kam } 15541c69df45SOri Kam 15551c69df45SOri Kam /* 15561c69df45SOri Kam * Check input hairpin is valid or not. 15571c69df45SOri Kam * If input hairpin is not greater than any of maximum number 15581c69df45SOri Kam * of hairpin queues of all ports, it is valid. 15591c69df45SOri Kam * if valid, return 0, else return -1 15601c69df45SOri Kam */ 15611c69df45SOri Kam int 15621c69df45SOri Kam check_nb_hairpinq(queueid_t hairpinq) 15631c69df45SOri Kam { 15641c69df45SOri Kam queueid_t allowed_max_hairpinq; 15651c69df45SOri Kam portid_t pid = 0; 15661c69df45SOri Kam 15671c69df45SOri Kam allowed_max_hairpinq = get_allowed_max_nb_hairpinq(&pid); 15681c69df45SOri Kam if (hairpinq > allowed_max_hairpinq) { 156961a3b0e5SAndrew Rybchenko fprintf(stderr, 157061a3b0e5SAndrew Rybchenko "Fail: input hairpin (%u) can't be greater than max_hairpin_queues (%u) of port %u\n", 15711c69df45SOri Kam hairpinq, allowed_max_hairpinq, pid); 15721c69df45SOri Kam return -1; 15731c69df45SOri Kam } 15741c69df45SOri Kam return 0; 15751c69df45SOri Kam } 15761c69df45SOri Kam 15771bb4a528SFerruh Yigit static int 15781bb4a528SFerruh Yigit get_eth_overhead(struct rte_eth_dev_info *dev_info) 15791bb4a528SFerruh Yigit { 15801bb4a528SFerruh Yigit uint32_t eth_overhead; 15811bb4a528SFerruh Yigit 15821bb4a528SFerruh Yigit if (dev_info->max_mtu != UINT16_MAX && 15831bb4a528SFerruh Yigit dev_info->max_rx_pktlen > dev_info->max_mtu) 15841bb4a528SFerruh Yigit eth_overhead = dev_info->max_rx_pktlen - dev_info->max_mtu; 15851bb4a528SFerruh Yigit else 15861bb4a528SFerruh Yigit eth_overhead = RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN; 15871bb4a528SFerruh Yigit 15881bb4a528SFerruh Yigit return eth_overhead; 15891bb4a528SFerruh Yigit } 15901bb4a528SFerruh Yigit 1591af75078fSIntel static void 1592b6b8a1ebSViacheslav Ovsiienko init_config_port_offloads(portid_t pid, uint32_t socket_id) 1593b6b8a1ebSViacheslav Ovsiienko { 1594b6b8a1ebSViacheslav Ovsiienko struct rte_port *port = &ports[pid]; 1595b6b8a1ebSViacheslav Ovsiienko int ret; 1596b6b8a1ebSViacheslav Ovsiienko int i; 1597b6b8a1ebSViacheslav Ovsiienko 1598f6d8a6d3SIvan Malov eth_rx_metadata_negotiate_mp(pid); 1599f6d8a6d3SIvan Malov 1600b6b8a1ebSViacheslav Ovsiienko port->dev_conf.txmode = tx_mode; 1601b6b8a1ebSViacheslav Ovsiienko port->dev_conf.rxmode = rx_mode; 1602b6b8a1ebSViacheslav Ovsiienko 1603b6b8a1ebSViacheslav Ovsiienko ret = eth_dev_info_get_print_err(pid, &port->dev_info); 1604b6b8a1ebSViacheslav Ovsiienko if (ret != 0) 1605b6b8a1ebSViacheslav Ovsiienko rte_exit(EXIT_FAILURE, "rte_eth_dev_info_get() failed\n"); 1606b6b8a1ebSViacheslav Ovsiienko 1607295968d1SFerruh Yigit if (!(port->dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)) 1608b6b8a1ebSViacheslav Ovsiienko port->dev_conf.txmode.offloads &= 1609295968d1SFerruh Yigit ~RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE; 1610b6b8a1ebSViacheslav Ovsiienko 1611b6b8a1ebSViacheslav Ovsiienko /* Apply Rx offloads configuration */ 1612b6b8a1ebSViacheslav Ovsiienko for (i = 0; i < port->dev_info.max_rx_queues; i++) 16133c4426dbSDmitry Kozlyuk port->rxq[i].conf.offloads = port->dev_conf.rxmode.offloads; 1614b6b8a1ebSViacheslav Ovsiienko /* Apply Tx offloads configuration */ 1615b6b8a1ebSViacheslav Ovsiienko for (i = 0; i < port->dev_info.max_tx_queues; i++) 16163c4426dbSDmitry Kozlyuk port->txq[i].conf.offloads = port->dev_conf.txmode.offloads; 1617b6b8a1ebSViacheslav Ovsiienko 1618b6b8a1ebSViacheslav Ovsiienko if (eth_link_speed) 1619b6b8a1ebSViacheslav Ovsiienko port->dev_conf.link_speeds = eth_link_speed; 1620b6b8a1ebSViacheslav Ovsiienko 16211bb4a528SFerruh Yigit if (max_rx_pkt_len) 16221bb4a528SFerruh Yigit port->dev_conf.rxmode.mtu = max_rx_pkt_len - 16231bb4a528SFerruh Yigit get_eth_overhead(&port->dev_info); 16241bb4a528SFerruh Yigit 1625b6b8a1ebSViacheslav Ovsiienko /* set flag to initialize port/queue */ 1626b6b8a1ebSViacheslav Ovsiienko port->need_reconfig = 1; 1627b6b8a1ebSViacheslav Ovsiienko port->need_reconfig_queues = 1; 1628b6b8a1ebSViacheslav Ovsiienko port->socket_id = socket_id; 1629b6b8a1ebSViacheslav Ovsiienko port->tx_metadata = 0; 1630b6b8a1ebSViacheslav Ovsiienko 1631b6b8a1ebSViacheslav Ovsiienko /* 1632b6b8a1ebSViacheslav Ovsiienko * Check for maximum number of segments per MTU. 1633b6b8a1ebSViacheslav Ovsiienko * Accordingly update the mbuf data size. 1634b6b8a1ebSViacheslav Ovsiienko */ 1635b6b8a1ebSViacheslav Ovsiienko if (port->dev_info.rx_desc_lim.nb_mtu_seg_max != UINT16_MAX && 1636b6b8a1ebSViacheslav Ovsiienko port->dev_info.rx_desc_lim.nb_mtu_seg_max != 0) { 16371bb4a528SFerruh Yigit uint32_t eth_overhead = get_eth_overhead(&port->dev_info); 16381bb4a528SFerruh Yigit uint16_t mtu; 1639b6b8a1ebSViacheslav Ovsiienko 16401bb4a528SFerruh Yigit if (rte_eth_dev_get_mtu(pid, &mtu) == 0) { 16411bb4a528SFerruh Yigit uint16_t data_size = (mtu + eth_overhead) / 16421bb4a528SFerruh Yigit port->dev_info.rx_desc_lim.nb_mtu_seg_max; 16431bb4a528SFerruh Yigit uint16_t buffer_size = data_size + RTE_PKTMBUF_HEADROOM; 16441bb4a528SFerruh Yigit 16451bb4a528SFerruh Yigit if (buffer_size > mbuf_data_size[0]) { 16461bb4a528SFerruh Yigit mbuf_data_size[0] = buffer_size; 1647b6b8a1ebSViacheslav Ovsiienko TESTPMD_LOG(WARNING, 1648b6b8a1ebSViacheslav Ovsiienko "Configured mbuf size of the first segment %hu\n", 1649b6b8a1ebSViacheslav Ovsiienko mbuf_data_size[0]); 1650b6b8a1ebSViacheslav Ovsiienko } 1651b6b8a1ebSViacheslav Ovsiienko } 1652b6b8a1ebSViacheslav Ovsiienko } 16531bb4a528SFerruh Yigit } 1654b6b8a1ebSViacheslav Ovsiienko 1655b6b8a1ebSViacheslav Ovsiienko static void 1656af75078fSIntel init_config(void) 1657af75078fSIntel { 1658ce8d5614SIntel portid_t pid; 1659af75078fSIntel struct rte_mempool *mbp; 1660af75078fSIntel unsigned int nb_mbuf_per_pool; 1661af75078fSIntel lcoreid_t lc_id; 16626970401eSDavid Marchand #ifdef RTE_LIB_GRO 1663b7091f1dSJiayu Hu struct rte_gro_param gro_param; 16646970401eSDavid Marchand #endif 16656970401eSDavid Marchand #ifdef RTE_LIB_GSO 166652f38a20SJiayu Hu uint32_t gso_types; 16676970401eSDavid Marchand #endif 1668487f9a59SYulong Pei 1669af75078fSIntel /* Configuration of logical cores. */ 1670af75078fSIntel fwd_lcores = rte_zmalloc("testpmd: fwd_lcores", 1671af75078fSIntel sizeof(struct fwd_lcore *) * nb_lcores, 1672fdf20fa7SSergio Gonzalez Monroy RTE_CACHE_LINE_SIZE); 1673af75078fSIntel if (fwd_lcores == NULL) { 1674ce8d5614SIntel rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) " 1675ce8d5614SIntel "failed\n", nb_lcores); 1676af75078fSIntel } 1677af75078fSIntel for (lc_id = 0; lc_id < nb_lcores; lc_id++) { 1678af75078fSIntel fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore", 1679af75078fSIntel sizeof(struct fwd_lcore), 1680fdf20fa7SSergio Gonzalez Monroy RTE_CACHE_LINE_SIZE); 1681af75078fSIntel if (fwd_lcores[lc_id] == NULL) { 1682ce8d5614SIntel rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) " 1683ce8d5614SIntel "failed\n"); 1684af75078fSIntel } 1685af75078fSIntel fwd_lcores[lc_id]->cpuid_idx = lc_id; 1686af75078fSIntel } 1687af75078fSIntel 16887d89b261SGaetan Rivet RTE_ETH_FOREACH_DEV(pid) { 1689b6b8a1ebSViacheslav Ovsiienko uint32_t socket_id; 16906f51deb9SIvan Ilchenko 1691b6ea6408SIntel if (numa_support) { 1692b6b8a1ebSViacheslav Ovsiienko socket_id = port_numa[pid]; 1693b6b8a1ebSViacheslav Ovsiienko if (port_numa[pid] == NUMA_NO_CONFIG) { 1694b6b8a1ebSViacheslav Ovsiienko socket_id = rte_eth_dev_socket_id(pid); 169520a0286fSLiu Xiaofeng 169629841336SPhil Yang /* 169729841336SPhil Yang * if socket_id is invalid, 169829841336SPhil Yang * set to the first available socket. 169929841336SPhil Yang */ 170020a0286fSLiu Xiaofeng if (check_socket_id(socket_id) < 0) 170129841336SPhil Yang socket_id = socket_ids[0]; 1702b6ea6408SIntel } 1703b6b8a1ebSViacheslav Ovsiienko } else { 1704b6b8a1ebSViacheslav Ovsiienko socket_id = (socket_num == UMA_NO_CONFIG) ? 1705b6b8a1ebSViacheslav Ovsiienko 0 : socket_num; 1706b6ea6408SIntel } 1707b6b8a1ebSViacheslav Ovsiienko /* Apply default TxRx configuration for all ports */ 1708b6b8a1ebSViacheslav Ovsiienko init_config_port_offloads(pid, socket_id); 1709ce8d5614SIntel } 17103ab64341SOlivier Matz /* 17113ab64341SOlivier Matz * Create pools of mbuf. 17123ab64341SOlivier Matz * If NUMA support is disabled, create a single pool of mbuf in 17133ab64341SOlivier Matz * socket 0 memory by default. 17143ab64341SOlivier Matz * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1. 17153ab64341SOlivier Matz * 17163ab64341SOlivier Matz * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and 17173ab64341SOlivier Matz * nb_txd can be configured at run time. 17183ab64341SOlivier Matz */ 17193ab64341SOlivier Matz if (param_total_num_mbufs) 17203ab64341SOlivier Matz nb_mbuf_per_pool = param_total_num_mbufs; 17213ab64341SOlivier Matz else { 17224ed89049SDavid Marchand nb_mbuf_per_pool = RX_DESC_MAX + 17233ab64341SOlivier Matz (nb_lcores * mb_mempool_cache) + 17244ed89049SDavid Marchand TX_DESC_MAX + MAX_PKT_BURST; 17253ab64341SOlivier Matz nb_mbuf_per_pool *= RTE_MAX_ETHPORTS; 17263ab64341SOlivier Matz } 17273ab64341SOlivier Matz 1728b6ea6408SIntel if (numa_support) { 172926cbb419SViacheslav Ovsiienko uint8_t i, j; 1730ce8d5614SIntel 1731c9cafcc8SShahaf Shuler for (i = 0; i < num_sockets; i++) 173226cbb419SViacheslav Ovsiienko for (j = 0; j < mbuf_data_size_n; j++) 173326cbb419SViacheslav Ovsiienko mempools[i * MAX_SEGS_BUFFER_SPLIT + j] = 173426cbb419SViacheslav Ovsiienko mbuf_pool_create(mbuf_data_size[j], 1735401b744dSShahaf Shuler nb_mbuf_per_pool, 173626cbb419SViacheslav Ovsiienko socket_ids[i], j); 17373ab64341SOlivier Matz } else { 173826cbb419SViacheslav Ovsiienko uint8_t i; 173926cbb419SViacheslav Ovsiienko 174026cbb419SViacheslav Ovsiienko for (i = 0; i < mbuf_data_size_n; i++) 174126cbb419SViacheslav Ovsiienko mempools[i] = mbuf_pool_create 174226cbb419SViacheslav Ovsiienko (mbuf_data_size[i], 1743401b744dSShahaf Shuler nb_mbuf_per_pool, 174426cbb419SViacheslav Ovsiienko socket_num == UMA_NO_CONFIG ? 174526cbb419SViacheslav Ovsiienko 0 : socket_num, i); 17463ab64341SOlivier Matz } 1747b6ea6408SIntel 1748b6ea6408SIntel init_port_config(); 17495886ae07SAdrien Mazarguil 17506970401eSDavid Marchand #ifdef RTE_LIB_GSO 1751295968d1SFerruh Yigit gso_types = RTE_ETH_TX_OFFLOAD_TCP_TSO | RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO | 1752295968d1SFerruh Yigit RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO | RTE_ETH_TX_OFFLOAD_UDP_TSO; 17536970401eSDavid Marchand #endif 17545886ae07SAdrien Mazarguil /* 17555886ae07SAdrien Mazarguil * Records which Mbuf pool to use by each logical core, if needed. 17565886ae07SAdrien Mazarguil */ 17575886ae07SAdrien Mazarguil for (lc_id = 0; lc_id < nb_lcores; lc_id++) { 17588fd8bebcSAdrien Mazarguil mbp = mbuf_pool_find( 175926cbb419SViacheslav Ovsiienko rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]), 0); 17608fd8bebcSAdrien Mazarguil 17615886ae07SAdrien Mazarguil if (mbp == NULL) 176226cbb419SViacheslav Ovsiienko mbp = mbuf_pool_find(0, 0); 17635886ae07SAdrien Mazarguil fwd_lcores[lc_id]->mbp = mbp; 17646970401eSDavid Marchand #ifdef RTE_LIB_GSO 176552f38a20SJiayu Hu /* initialize GSO context */ 176652f38a20SJiayu Hu fwd_lcores[lc_id]->gso_ctx.direct_pool = mbp; 176752f38a20SJiayu Hu fwd_lcores[lc_id]->gso_ctx.indirect_pool = mbp; 176852f38a20SJiayu Hu fwd_lcores[lc_id]->gso_ctx.gso_types = gso_types; 176935b2d13fSOlivier Matz fwd_lcores[lc_id]->gso_ctx.gso_size = RTE_ETHER_MAX_LEN - 177035b2d13fSOlivier Matz RTE_ETHER_CRC_LEN; 177152f38a20SJiayu Hu fwd_lcores[lc_id]->gso_ctx.flag = 0; 17726970401eSDavid Marchand #endif 17735886ae07SAdrien Mazarguil } 17745886ae07SAdrien Mazarguil 17750c0db76fSBernard Iremonger fwd_config_setup(); 1776b7091f1dSJiayu Hu 17776970401eSDavid Marchand #ifdef RTE_LIB_GRO 1778b7091f1dSJiayu Hu /* create a gro context for each lcore */ 1779b7091f1dSJiayu Hu gro_param.gro_types = RTE_GRO_TCP_IPV4; 1780b7091f1dSJiayu Hu gro_param.max_flow_num = GRO_MAX_FLUSH_CYCLES; 1781b7091f1dSJiayu Hu gro_param.max_item_per_flow = MAX_PKT_BURST; 1782b7091f1dSJiayu Hu for (lc_id = 0; lc_id < nb_lcores; lc_id++) { 1783b7091f1dSJiayu Hu gro_param.socket_id = rte_lcore_to_socket_id( 1784b7091f1dSJiayu Hu fwd_lcores_cpuids[lc_id]); 1785b7091f1dSJiayu Hu fwd_lcores[lc_id]->gro_ctx = rte_gro_ctx_create(&gro_param); 1786b7091f1dSJiayu Hu if (fwd_lcores[lc_id]->gro_ctx == NULL) { 1787b7091f1dSJiayu Hu rte_exit(EXIT_FAILURE, 1788b7091f1dSJiayu Hu "rte_gro_ctx_create() failed\n"); 1789b7091f1dSJiayu Hu } 1790b7091f1dSJiayu Hu } 17916970401eSDavid Marchand #endif 1792ce8d5614SIntel } 1793ce8d5614SIntel 17942950a769SDeclan Doherty 17952950a769SDeclan Doherty void 1796a21d5a4bSDeclan Doherty reconfig(portid_t new_port_id, unsigned socket_id) 17972950a769SDeclan Doherty { 17982950a769SDeclan Doherty /* Reconfiguration of Ethernet ports. */ 1799b6b8a1ebSViacheslav Ovsiienko init_config_port_offloads(new_port_id, socket_id); 18002950a769SDeclan Doherty init_port_config(); 18012950a769SDeclan Doherty } 18022950a769SDeclan Doherty 1803ce8d5614SIntel int 1804ce8d5614SIntel init_fwd_streams(void) 1805ce8d5614SIntel { 1806ce8d5614SIntel portid_t pid; 1807ce8d5614SIntel struct rte_port *port; 1808ce8d5614SIntel streamid_t sm_id, nb_fwd_streams_new; 18095a8fb55cSReshma Pattan queueid_t q; 1810ce8d5614SIntel 1811ce8d5614SIntel /* set socket id according to numa or not */ 18127d89b261SGaetan Rivet RTE_ETH_FOREACH_DEV(pid) { 1813ce8d5614SIntel port = &ports[pid]; 1814ce8d5614SIntel if (nb_rxq > port->dev_info.max_rx_queues) { 181561a3b0e5SAndrew Rybchenko fprintf(stderr, 181661a3b0e5SAndrew Rybchenko "Fail: nb_rxq(%d) is greater than max_rx_queues(%d)\n", 181761a3b0e5SAndrew Rybchenko nb_rxq, port->dev_info.max_rx_queues); 1818ce8d5614SIntel return -1; 1819ce8d5614SIntel } 1820ce8d5614SIntel if (nb_txq > port->dev_info.max_tx_queues) { 182161a3b0e5SAndrew Rybchenko fprintf(stderr, 182261a3b0e5SAndrew Rybchenko "Fail: nb_txq(%d) is greater than max_tx_queues(%d)\n", 182361a3b0e5SAndrew Rybchenko nb_txq, port->dev_info.max_tx_queues); 1824ce8d5614SIntel return -1; 1825ce8d5614SIntel } 182620a0286fSLiu Xiaofeng if (numa_support) { 182720a0286fSLiu Xiaofeng if (port_numa[pid] != NUMA_NO_CONFIG) 182820a0286fSLiu Xiaofeng port->socket_id = port_numa[pid]; 182920a0286fSLiu Xiaofeng else { 1830b6ea6408SIntel port->socket_id = rte_eth_dev_socket_id(pid); 183120a0286fSLiu Xiaofeng 183229841336SPhil Yang /* 183329841336SPhil Yang * if socket_id is invalid, 183429841336SPhil Yang * set to the first available socket. 183529841336SPhil Yang */ 183620a0286fSLiu Xiaofeng if (check_socket_id(port->socket_id) < 0) 183729841336SPhil Yang port->socket_id = socket_ids[0]; 183820a0286fSLiu Xiaofeng } 183920a0286fSLiu Xiaofeng } 1840b6ea6408SIntel else { 1841b6ea6408SIntel if (socket_num == UMA_NO_CONFIG) 1842af75078fSIntel port->socket_id = 0; 1843b6ea6408SIntel else 1844b6ea6408SIntel port->socket_id = socket_num; 1845b6ea6408SIntel } 1846af75078fSIntel } 1847af75078fSIntel 18485a8fb55cSReshma Pattan q = RTE_MAX(nb_rxq, nb_txq); 18495a8fb55cSReshma Pattan if (q == 0) { 185061a3b0e5SAndrew Rybchenko fprintf(stderr, 185161a3b0e5SAndrew Rybchenko "Fail: Cannot allocate fwd streams as number of queues is 0\n"); 18525a8fb55cSReshma Pattan return -1; 18535a8fb55cSReshma Pattan } 18545a8fb55cSReshma Pattan nb_fwd_streams_new = (streamid_t)(nb_ports * q); 1855ce8d5614SIntel if (nb_fwd_streams_new == nb_fwd_streams) 1856ce8d5614SIntel return 0; 1857ce8d5614SIntel /* clear the old */ 1858ce8d5614SIntel if (fwd_streams != NULL) { 1859ce8d5614SIntel for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) { 1860ce8d5614SIntel if (fwd_streams[sm_id] == NULL) 1861ce8d5614SIntel continue; 1862ce8d5614SIntel rte_free(fwd_streams[sm_id]); 1863ce8d5614SIntel fwd_streams[sm_id] = NULL; 1864af75078fSIntel } 1865ce8d5614SIntel rte_free(fwd_streams); 1866ce8d5614SIntel fwd_streams = NULL; 1867ce8d5614SIntel } 1868ce8d5614SIntel 1869ce8d5614SIntel /* init new */ 1870ce8d5614SIntel nb_fwd_streams = nb_fwd_streams_new; 18711f84c469SMatan Azrad if (nb_fwd_streams) { 1872ce8d5614SIntel fwd_streams = rte_zmalloc("testpmd: fwd_streams", 18731f84c469SMatan Azrad sizeof(struct fwd_stream *) * nb_fwd_streams, 18741f84c469SMatan Azrad RTE_CACHE_LINE_SIZE); 1875ce8d5614SIntel if (fwd_streams == NULL) 18761f84c469SMatan Azrad rte_exit(EXIT_FAILURE, "rte_zmalloc(%d" 18771f84c469SMatan Azrad " (struct fwd_stream *)) failed\n", 18781f84c469SMatan Azrad nb_fwd_streams); 1879ce8d5614SIntel 1880af75078fSIntel for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) { 18811f84c469SMatan Azrad fwd_streams[sm_id] = rte_zmalloc("testpmd:" 18821f84c469SMatan Azrad " struct fwd_stream", sizeof(struct fwd_stream), 18831f84c469SMatan Azrad RTE_CACHE_LINE_SIZE); 1884ce8d5614SIntel if (fwd_streams[sm_id] == NULL) 18851f84c469SMatan Azrad rte_exit(EXIT_FAILURE, "rte_zmalloc" 18861f84c469SMatan Azrad "(struct fwd_stream) failed\n"); 18871f84c469SMatan Azrad } 1888af75078fSIntel } 1889ce8d5614SIntel 1890ce8d5614SIntel return 0; 1891af75078fSIntel } 1892af75078fSIntel 1893af75078fSIntel static void 1894af75078fSIntel pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs) 1895af75078fSIntel { 18967569b8c1SHonnappa Nagarahalli uint64_t total_burst, sburst; 189785de481aSHonnappa Nagarahalli uint64_t nb_burst; 18987569b8c1SHonnappa Nagarahalli uint64_t burst_stats[4]; 18997569b8c1SHonnappa Nagarahalli uint16_t pktnb_stats[4]; 1900af75078fSIntel uint16_t nb_pkt; 19017569b8c1SHonnappa Nagarahalli int burst_percent[4], sburstp; 19027569b8c1SHonnappa Nagarahalli int i; 1903af75078fSIntel 1904af75078fSIntel /* 1905af75078fSIntel * First compute the total number of packet bursts and the 1906af75078fSIntel * two highest numbers of bursts of the same number of packets. 1907af75078fSIntel */ 19087569b8c1SHonnappa Nagarahalli memset(&burst_stats, 0x0, sizeof(burst_stats)); 19097569b8c1SHonnappa Nagarahalli memset(&pktnb_stats, 0x0, sizeof(pktnb_stats)); 19107569b8c1SHonnappa Nagarahalli 19117569b8c1SHonnappa Nagarahalli /* Show stats for 0 burst size always */ 19127569b8c1SHonnappa Nagarahalli total_burst = pbs->pkt_burst_spread[0]; 19137569b8c1SHonnappa Nagarahalli burst_stats[0] = pbs->pkt_burst_spread[0]; 19147569b8c1SHonnappa Nagarahalli pktnb_stats[0] = 0; 19157569b8c1SHonnappa Nagarahalli 19167569b8c1SHonnappa Nagarahalli /* Find the next 2 burst sizes with highest occurrences. */ 19176a8b64fdSEli Britstein for (nb_pkt = 1; nb_pkt < MAX_PKT_BURST + 1; nb_pkt++) { 1918af75078fSIntel nb_burst = pbs->pkt_burst_spread[nb_pkt]; 19197569b8c1SHonnappa Nagarahalli 1920af75078fSIntel if (nb_burst == 0) 1921af75078fSIntel continue; 19227569b8c1SHonnappa Nagarahalli 1923af75078fSIntel total_burst += nb_burst; 19247569b8c1SHonnappa Nagarahalli 19257569b8c1SHonnappa Nagarahalli if (nb_burst > burst_stats[1]) { 19267569b8c1SHonnappa Nagarahalli burst_stats[2] = burst_stats[1]; 19277569b8c1SHonnappa Nagarahalli pktnb_stats[2] = pktnb_stats[1]; 1928fe613657SDaniel Shelepov burst_stats[1] = nb_burst; 1929fe613657SDaniel Shelepov pktnb_stats[1] = nb_pkt; 19307569b8c1SHonnappa Nagarahalli } else if (nb_burst > burst_stats[2]) { 19317569b8c1SHonnappa Nagarahalli burst_stats[2] = nb_burst; 19327569b8c1SHonnappa Nagarahalli pktnb_stats[2] = nb_pkt; 1933af75078fSIntel } 1934af75078fSIntel } 1935af75078fSIntel if (total_burst == 0) 1936af75078fSIntel return; 19377569b8c1SHonnappa Nagarahalli 19387569b8c1SHonnappa Nagarahalli printf(" %s-bursts : %"PRIu64" [", rx_tx, total_burst); 19397569b8c1SHonnappa Nagarahalli for (i = 0, sburst = 0, sburstp = 0; i < 4; i++) { 19407569b8c1SHonnappa Nagarahalli if (i == 3) { 19417569b8c1SHonnappa Nagarahalli printf("%d%% of other]\n", 100 - sburstp); 1942af75078fSIntel return; 1943af75078fSIntel } 19447569b8c1SHonnappa Nagarahalli 19457569b8c1SHonnappa Nagarahalli sburst += burst_stats[i]; 19467569b8c1SHonnappa Nagarahalli if (sburst == total_burst) { 19477569b8c1SHonnappa Nagarahalli printf("%d%% of %d pkts]\n", 19487569b8c1SHonnappa Nagarahalli 100 - sburstp, (int) pktnb_stats[i]); 1949af75078fSIntel return; 1950af75078fSIntel } 19517569b8c1SHonnappa Nagarahalli 19527569b8c1SHonnappa Nagarahalli burst_percent[i] = 19537569b8c1SHonnappa Nagarahalli (double)burst_stats[i] / total_burst * 100; 19547569b8c1SHonnappa Nagarahalli printf("%d%% of %d pkts + ", 19557569b8c1SHonnappa Nagarahalli burst_percent[i], (int) pktnb_stats[i]); 19567569b8c1SHonnappa Nagarahalli sburstp += burst_percent[i]; 1957af75078fSIntel } 1958af75078fSIntel } 1959af75078fSIntel 1960af75078fSIntel static void 1961af75078fSIntel fwd_stream_stats_display(streamid_t stream_id) 1962af75078fSIntel { 1963af75078fSIntel struct fwd_stream *fs; 1964af75078fSIntel static const char *fwd_top_stats_border = "-------"; 1965af75078fSIntel 1966af75078fSIntel fs = fwd_streams[stream_id]; 1967af75078fSIntel if ((fs->rx_packets == 0) && (fs->tx_packets == 0) && 1968af75078fSIntel (fs->fwd_dropped == 0)) 1969af75078fSIntel return; 1970af75078fSIntel printf("\n %s Forward Stats for RX Port=%2d/Queue=%2d -> " 1971af75078fSIntel "TX Port=%2d/Queue=%2d %s\n", 1972af75078fSIntel fwd_top_stats_border, fs->rx_port, fs->rx_queue, 1973af75078fSIntel fs->tx_port, fs->tx_queue, fwd_top_stats_border); 1974c185d42cSDavid Marchand printf(" RX-packets: %-14"PRIu64" TX-packets: %-14"PRIu64 1975c185d42cSDavid Marchand " TX-dropped: %-14"PRIu64, 1976af75078fSIntel fs->rx_packets, fs->tx_packets, fs->fwd_dropped); 1977af75078fSIntel 1978af75078fSIntel /* if checksum mode */ 1979af75078fSIntel if (cur_fwd_eng == &csum_fwd_engine) { 1980c185d42cSDavid Marchand printf(" RX- bad IP checksum: %-14"PRIu64 1981c185d42cSDavid Marchand " Rx- bad L4 checksum: %-14"PRIu64 1982c185d42cSDavid Marchand " Rx- bad outer L4 checksum: %-14"PRIu64"\n", 198358d475b7SJerin Jacob fs->rx_bad_ip_csum, fs->rx_bad_l4_csum, 198458d475b7SJerin Jacob fs->rx_bad_outer_l4_csum); 1985d139cf23SLance Richardson printf(" RX- bad outer IP checksum: %-14"PRIu64"\n", 1986d139cf23SLance Richardson fs->rx_bad_outer_ip_csum); 198794d65546SDavid Marchand } else { 198894d65546SDavid Marchand printf("\n"); 1989af75078fSIntel } 1990af75078fSIntel 19910e4b1963SDharmik Thakkar if (record_burst_stats) { 1992af75078fSIntel pkt_burst_stats_display("RX", &fs->rx_burst_stats); 1993af75078fSIntel pkt_burst_stats_display("TX", &fs->tx_burst_stats); 19940e4b1963SDharmik Thakkar } 1995af75078fSIntel } 1996af75078fSIntel 199753324971SDavid Marchand void 199853324971SDavid Marchand fwd_stats_display(void) 199953324971SDavid Marchand { 200053324971SDavid Marchand static const char *fwd_stats_border = "----------------------"; 200153324971SDavid Marchand static const char *acc_stats_border = "+++++++++++++++"; 200253324971SDavid Marchand struct { 200353324971SDavid Marchand struct fwd_stream *rx_stream; 200453324971SDavid Marchand struct fwd_stream *tx_stream; 200553324971SDavid Marchand uint64_t tx_dropped; 200653324971SDavid Marchand uint64_t rx_bad_ip_csum; 200753324971SDavid Marchand uint64_t rx_bad_l4_csum; 200853324971SDavid Marchand uint64_t rx_bad_outer_l4_csum; 2009d139cf23SLance Richardson uint64_t rx_bad_outer_ip_csum; 201053324971SDavid Marchand } ports_stats[RTE_MAX_ETHPORTS]; 201153324971SDavid Marchand uint64_t total_rx_dropped = 0; 201253324971SDavid Marchand uint64_t total_tx_dropped = 0; 201353324971SDavid Marchand uint64_t total_rx_nombuf = 0; 201453324971SDavid Marchand struct rte_eth_stats stats; 201553324971SDavid Marchand uint64_t fwd_cycles = 0; 201653324971SDavid Marchand uint64_t total_recv = 0; 201753324971SDavid Marchand uint64_t total_xmit = 0; 201853324971SDavid Marchand struct rte_port *port; 201953324971SDavid Marchand streamid_t sm_id; 202053324971SDavid Marchand portid_t pt_id; 2021baef6bbfSMin Hu (Connor) int ret; 202253324971SDavid Marchand int i; 202353324971SDavid Marchand 202453324971SDavid Marchand memset(ports_stats, 0, sizeof(ports_stats)); 202553324971SDavid Marchand 202653324971SDavid Marchand for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) { 202753324971SDavid Marchand struct fwd_stream *fs = fwd_streams[sm_id]; 202853324971SDavid Marchand 202953324971SDavid Marchand if (cur_fwd_config.nb_fwd_streams > 203053324971SDavid Marchand cur_fwd_config.nb_fwd_ports) { 203153324971SDavid Marchand fwd_stream_stats_display(sm_id); 203253324971SDavid Marchand } else { 203353324971SDavid Marchand ports_stats[fs->tx_port].tx_stream = fs; 203453324971SDavid Marchand ports_stats[fs->rx_port].rx_stream = fs; 203553324971SDavid Marchand } 203653324971SDavid Marchand 203753324971SDavid Marchand ports_stats[fs->tx_port].tx_dropped += fs->fwd_dropped; 203853324971SDavid Marchand 203953324971SDavid Marchand ports_stats[fs->rx_port].rx_bad_ip_csum += fs->rx_bad_ip_csum; 204053324971SDavid Marchand ports_stats[fs->rx_port].rx_bad_l4_csum += fs->rx_bad_l4_csum; 204153324971SDavid Marchand ports_stats[fs->rx_port].rx_bad_outer_l4_csum += 204253324971SDavid Marchand fs->rx_bad_outer_l4_csum; 2043d139cf23SLance Richardson ports_stats[fs->rx_port].rx_bad_outer_ip_csum += 2044d139cf23SLance Richardson fs->rx_bad_outer_ip_csum; 204553324971SDavid Marchand 2046bc700b67SDharmik Thakkar if (record_core_cycles) 204753324971SDavid Marchand fwd_cycles += fs->core_cycles; 204853324971SDavid Marchand } 204953324971SDavid Marchand for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) { 205053324971SDavid Marchand pt_id = fwd_ports_ids[i]; 205153324971SDavid Marchand port = &ports[pt_id]; 205253324971SDavid Marchand 2053baef6bbfSMin Hu (Connor) ret = rte_eth_stats_get(pt_id, &stats); 2054baef6bbfSMin Hu (Connor) if (ret != 0) { 2055baef6bbfSMin Hu (Connor) fprintf(stderr, 2056baef6bbfSMin Hu (Connor) "%s: Error: failed to get stats (port %u): %d", 2057baef6bbfSMin Hu (Connor) __func__, pt_id, ret); 2058baef6bbfSMin Hu (Connor) continue; 2059baef6bbfSMin Hu (Connor) } 206053324971SDavid Marchand stats.ipackets -= port->stats.ipackets; 206153324971SDavid Marchand stats.opackets -= port->stats.opackets; 206253324971SDavid Marchand stats.ibytes -= port->stats.ibytes; 206353324971SDavid Marchand stats.obytes -= port->stats.obytes; 206453324971SDavid Marchand stats.imissed -= port->stats.imissed; 206553324971SDavid Marchand stats.oerrors -= port->stats.oerrors; 206653324971SDavid Marchand stats.rx_nombuf -= port->stats.rx_nombuf; 206753324971SDavid Marchand 206853324971SDavid Marchand total_recv += stats.ipackets; 206953324971SDavid Marchand total_xmit += stats.opackets; 207053324971SDavid Marchand total_rx_dropped += stats.imissed; 207153324971SDavid Marchand total_tx_dropped += ports_stats[pt_id].tx_dropped; 207253324971SDavid Marchand total_tx_dropped += stats.oerrors; 207353324971SDavid Marchand total_rx_nombuf += stats.rx_nombuf; 207453324971SDavid Marchand 207553324971SDavid Marchand printf("\n %s Forward statistics for port %-2d %s\n", 207653324971SDavid Marchand fwd_stats_border, pt_id, fwd_stats_border); 207753324971SDavid Marchand 207808dcd187SHuisong Li printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64 207908dcd187SHuisong Li "RX-total: %-"PRIu64"\n", stats.ipackets, stats.imissed, 208053324971SDavid Marchand stats.ipackets + stats.imissed); 208153324971SDavid Marchand 2082d139cf23SLance Richardson if (cur_fwd_eng == &csum_fwd_engine) { 208353324971SDavid Marchand printf(" Bad-ipcsum: %-14"PRIu64 208453324971SDavid Marchand " Bad-l4csum: %-14"PRIu64 208553324971SDavid Marchand "Bad-outer-l4csum: %-14"PRIu64"\n", 208653324971SDavid Marchand ports_stats[pt_id].rx_bad_ip_csum, 208753324971SDavid Marchand ports_stats[pt_id].rx_bad_l4_csum, 208853324971SDavid Marchand ports_stats[pt_id].rx_bad_outer_l4_csum); 2089d139cf23SLance Richardson printf(" Bad-outer-ipcsum: %-14"PRIu64"\n", 2090d139cf23SLance Richardson ports_stats[pt_id].rx_bad_outer_ip_csum); 2091d139cf23SLance Richardson } 209253324971SDavid Marchand if (stats.ierrors + stats.rx_nombuf > 0) { 209308dcd187SHuisong Li printf(" RX-error: %-"PRIu64"\n", stats.ierrors); 209408dcd187SHuisong Li printf(" RX-nombufs: %-14"PRIu64"\n", stats.rx_nombuf); 209553324971SDavid Marchand } 209653324971SDavid Marchand 209708dcd187SHuisong Li printf(" TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64 209853324971SDavid Marchand "TX-total: %-"PRIu64"\n", 209953324971SDavid Marchand stats.opackets, ports_stats[pt_id].tx_dropped, 210053324971SDavid Marchand stats.opackets + ports_stats[pt_id].tx_dropped); 210153324971SDavid Marchand 21020e4b1963SDharmik Thakkar if (record_burst_stats) { 210353324971SDavid Marchand if (ports_stats[pt_id].rx_stream) 210453324971SDavid Marchand pkt_burst_stats_display("RX", 210553324971SDavid Marchand &ports_stats[pt_id].rx_stream->rx_burst_stats); 210653324971SDavid Marchand if (ports_stats[pt_id].tx_stream) 210753324971SDavid Marchand pkt_burst_stats_display("TX", 210853324971SDavid Marchand &ports_stats[pt_id].tx_stream->tx_burst_stats); 21090e4b1963SDharmik Thakkar } 211053324971SDavid Marchand 211153324971SDavid Marchand printf(" %s--------------------------------%s\n", 211253324971SDavid Marchand fwd_stats_border, fwd_stats_border); 211353324971SDavid Marchand } 211453324971SDavid Marchand 211553324971SDavid Marchand printf("\n %s Accumulated forward statistics for all ports" 211653324971SDavid Marchand "%s\n", 211753324971SDavid Marchand acc_stats_border, acc_stats_border); 211853324971SDavid Marchand printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: " 211953324971SDavid Marchand "%-"PRIu64"\n" 212053324971SDavid Marchand " TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: " 212153324971SDavid Marchand "%-"PRIu64"\n", 212253324971SDavid Marchand total_recv, total_rx_dropped, total_recv + total_rx_dropped, 212353324971SDavid Marchand total_xmit, total_tx_dropped, total_xmit + total_tx_dropped); 212453324971SDavid Marchand if (total_rx_nombuf > 0) 212553324971SDavid Marchand printf(" RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf); 212653324971SDavid Marchand printf(" %s++++++++++++++++++++++++++++++++++++++++++++++" 212753324971SDavid Marchand "%s\n", 212853324971SDavid Marchand acc_stats_border, acc_stats_border); 2129bc700b67SDharmik Thakkar if (record_core_cycles) { 21304c0497b1SDharmik Thakkar #define CYC_PER_MHZ 1E6 21313a164e00SPhil Yang if (total_recv > 0 || total_xmit > 0) { 21323a164e00SPhil Yang uint64_t total_pkts = 0; 21333a164e00SPhil Yang if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 || 21343a164e00SPhil Yang strcmp(cur_fwd_eng->fwd_mode_name, "flowgen") == 0) 21353a164e00SPhil Yang total_pkts = total_xmit; 21363a164e00SPhil Yang else 21373a164e00SPhil Yang total_pkts = total_recv; 21383a164e00SPhil Yang 21391920832aSDharmik Thakkar printf("\n CPU cycles/packet=%.2F (total cycles=" 21403a164e00SPhil Yang "%"PRIu64" / total %s packets=%"PRIu64") at %"PRIu64 21414c0497b1SDharmik Thakkar " MHz Clock\n", 21423a164e00SPhil Yang (double) fwd_cycles / total_pkts, 21433a164e00SPhil Yang fwd_cycles, cur_fwd_eng->fwd_mode_name, total_pkts, 21444c0497b1SDharmik Thakkar (uint64_t)(rte_get_tsc_hz() / CYC_PER_MHZ)); 21453a164e00SPhil Yang } 2146bc700b67SDharmik Thakkar } 214753324971SDavid Marchand } 214853324971SDavid Marchand 214953324971SDavid Marchand void 215053324971SDavid Marchand fwd_stats_reset(void) 215153324971SDavid Marchand { 215253324971SDavid Marchand streamid_t sm_id; 215353324971SDavid Marchand portid_t pt_id; 2154baef6bbfSMin Hu (Connor) int ret; 215553324971SDavid Marchand int i; 215653324971SDavid Marchand 215753324971SDavid Marchand for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) { 215853324971SDavid Marchand pt_id = fwd_ports_ids[i]; 2159baef6bbfSMin Hu (Connor) ret = rte_eth_stats_get(pt_id, &ports[pt_id].stats); 2160baef6bbfSMin Hu (Connor) if (ret != 0) 2161baef6bbfSMin Hu (Connor) fprintf(stderr, 2162baef6bbfSMin Hu (Connor) "%s: Error: failed to clear stats (port %u):%d", 2163baef6bbfSMin Hu (Connor) __func__, pt_id, ret); 216453324971SDavid Marchand } 216553324971SDavid Marchand for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) { 216653324971SDavid Marchand struct fwd_stream *fs = fwd_streams[sm_id]; 216753324971SDavid Marchand 216853324971SDavid Marchand fs->rx_packets = 0; 216953324971SDavid Marchand fs->tx_packets = 0; 217053324971SDavid Marchand fs->fwd_dropped = 0; 217153324971SDavid Marchand fs->rx_bad_ip_csum = 0; 217253324971SDavid Marchand fs->rx_bad_l4_csum = 0; 217353324971SDavid Marchand fs->rx_bad_outer_l4_csum = 0; 2174d139cf23SLance Richardson fs->rx_bad_outer_ip_csum = 0; 217553324971SDavid Marchand 217653324971SDavid Marchand memset(&fs->rx_burst_stats, 0, sizeof(fs->rx_burst_stats)); 217753324971SDavid Marchand memset(&fs->tx_burst_stats, 0, sizeof(fs->tx_burst_stats)); 217853324971SDavid Marchand fs->core_cycles = 0; 217953324971SDavid Marchand } 218053324971SDavid Marchand } 218153324971SDavid Marchand 2182af75078fSIntel static void 21837741e4cfSIntel flush_fwd_rx_queues(void) 2184af75078fSIntel { 2185af75078fSIntel struct rte_mbuf *pkts_burst[MAX_PKT_BURST]; 2186af75078fSIntel portid_t rxp; 21877741e4cfSIntel portid_t port_id; 2188af75078fSIntel queueid_t rxq; 2189af75078fSIntel uint16_t nb_rx; 2190af75078fSIntel uint16_t i; 2191af75078fSIntel uint8_t j; 2192f487715fSReshma Pattan uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0; 2193594302c7SJames Poole uint64_t timer_period; 2194f487715fSReshma Pattan 2195a550baf2SMin Hu (Connor) if (num_procs > 1) { 2196a550baf2SMin Hu (Connor) printf("multi-process not support for flushing fwd Rx queues, skip the below lines and return.\n"); 2197a550baf2SMin Hu (Connor) return; 2198a550baf2SMin Hu (Connor) } 2199a550baf2SMin Hu (Connor) 2200f487715fSReshma Pattan /* convert to number of cycles */ 2201594302c7SJames Poole timer_period = rte_get_timer_hz(); /* 1 second timeout */ 2202af75078fSIntel 2203af75078fSIntel for (j = 0; j < 2; j++) { 22047741e4cfSIntel for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) { 2205af75078fSIntel for (rxq = 0; rxq < nb_rxq; rxq++) { 22067741e4cfSIntel port_id = fwd_ports_ids[rxp]; 22073c4426dbSDmitry Kozlyuk 22083c4426dbSDmitry Kozlyuk /* Polling stopped queues is prohibited. */ 22093c4426dbSDmitry Kozlyuk if (ports[port_id].rxq[rxq].state == 22103c4426dbSDmitry Kozlyuk RTE_ETH_QUEUE_STATE_STOPPED) 22113c4426dbSDmitry Kozlyuk continue; 22123c4426dbSDmitry Kozlyuk 2213f487715fSReshma Pattan /** 2214f487715fSReshma Pattan * testpmd can stuck in the below do while loop 2215f487715fSReshma Pattan * if rte_eth_rx_burst() always returns nonzero 2216f487715fSReshma Pattan * packets. So timer is added to exit this loop 2217f487715fSReshma Pattan * after 1sec timer expiry. 2218f487715fSReshma Pattan */ 2219f487715fSReshma Pattan prev_tsc = rte_rdtsc(); 2220af75078fSIntel do { 22217741e4cfSIntel nb_rx = rte_eth_rx_burst(port_id, rxq, 2222013af9b6SIntel pkts_burst, MAX_PKT_BURST); 2223af75078fSIntel for (i = 0; i < nb_rx; i++) 2224af75078fSIntel rte_pktmbuf_free(pkts_burst[i]); 2225f487715fSReshma Pattan 2226f487715fSReshma Pattan cur_tsc = rte_rdtsc(); 2227f487715fSReshma Pattan diff_tsc = cur_tsc - prev_tsc; 2228f487715fSReshma Pattan timer_tsc += diff_tsc; 2229f487715fSReshma Pattan } while ((nb_rx > 0) && 2230f487715fSReshma Pattan (timer_tsc < timer_period)); 2231f487715fSReshma Pattan timer_tsc = 0; 2232af75078fSIntel } 2233af75078fSIntel } 2234af75078fSIntel rte_delay_ms(10); /* wait 10 milli-seconds before retrying */ 2235af75078fSIntel } 2236af75078fSIntel } 2237af75078fSIntel 2238af75078fSIntel static void 2239af75078fSIntel run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd) 2240af75078fSIntel { 2241af75078fSIntel struct fwd_stream **fsm; 2242af75078fSIntel streamid_t nb_fs; 2243af75078fSIntel streamid_t sm_id; 2244a8d0d473SBruce Richardson #ifdef RTE_LIB_BITRATESTATS 22457e4441c8SRemy Horton uint64_t tics_per_1sec; 22467e4441c8SRemy Horton uint64_t tics_datum; 22477e4441c8SRemy Horton uint64_t tics_current; 22484918a357SXiaoyun Li uint16_t i, cnt_ports; 2249af75078fSIntel 22504918a357SXiaoyun Li cnt_ports = nb_ports; 22517e4441c8SRemy Horton tics_datum = rte_rdtsc(); 22527e4441c8SRemy Horton tics_per_1sec = rte_get_timer_hz(); 22537e4441c8SRemy Horton #endif 2254af75078fSIntel fsm = &fwd_streams[fc->stream_idx]; 2255af75078fSIntel nb_fs = fc->stream_nb; 2256af75078fSIntel do { 2257af75078fSIntel for (sm_id = 0; sm_id < nb_fs; sm_id++) 22583c4426dbSDmitry Kozlyuk if (!fsm[sm_id]->disabled) 2259af75078fSIntel (*pkt_fwd)(fsm[sm_id]); 2260a8d0d473SBruce Richardson #ifdef RTE_LIB_BITRATESTATS 2261e25e6c70SRemy Horton if (bitrate_enabled != 0 && 2262e25e6c70SRemy Horton bitrate_lcore_id == rte_lcore_id()) { 22637e4441c8SRemy Horton tics_current = rte_rdtsc(); 22647e4441c8SRemy Horton if (tics_current - tics_datum >= tics_per_1sec) { 22657e4441c8SRemy Horton /* Periodic bitrate calculation */ 22664918a357SXiaoyun Li for (i = 0; i < cnt_ports; i++) 2267e25e6c70SRemy Horton rte_stats_bitrate_calc(bitrate_data, 22684918a357SXiaoyun Li ports_ids[i]); 22697e4441c8SRemy Horton tics_datum = tics_current; 22707e4441c8SRemy Horton } 2271e25e6c70SRemy Horton } 22727e4441c8SRemy Horton #endif 2273a8d0d473SBruce Richardson #ifdef RTE_LIB_LATENCYSTATS 227465eb1e54SPablo de Lara if (latencystats_enabled != 0 && 227565eb1e54SPablo de Lara latencystats_lcore_id == rte_lcore_id()) 227662d3216dSReshma Pattan rte_latencystats_update(); 227762d3216dSReshma Pattan #endif 227862d3216dSReshma Pattan 2279af75078fSIntel } while (! fc->stopped); 2280af75078fSIntel } 2281af75078fSIntel 2282af75078fSIntel static int 2283af75078fSIntel start_pkt_forward_on_core(void *fwd_arg) 2284af75078fSIntel { 2285af75078fSIntel run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg, 2286af75078fSIntel cur_fwd_config.fwd_eng->packet_fwd); 2287af75078fSIntel return 0; 2288af75078fSIntel } 2289af75078fSIntel 2290af75078fSIntel /* 2291af75078fSIntel * Run the TXONLY packet forwarding engine to send a single burst of packets. 2292af75078fSIntel * Used to start communication flows in network loopback test configurations. 2293af75078fSIntel */ 2294af75078fSIntel static int 2295af75078fSIntel run_one_txonly_burst_on_core(void *fwd_arg) 2296af75078fSIntel { 2297af75078fSIntel struct fwd_lcore *fwd_lc; 2298af75078fSIntel struct fwd_lcore tmp_lcore; 2299af75078fSIntel 2300af75078fSIntel fwd_lc = (struct fwd_lcore *) fwd_arg; 2301af75078fSIntel tmp_lcore = *fwd_lc; 2302af75078fSIntel tmp_lcore.stopped = 1; 2303af75078fSIntel run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd); 2304af75078fSIntel return 0; 2305af75078fSIntel } 2306af75078fSIntel 2307af75078fSIntel /* 2308af75078fSIntel * Launch packet forwarding: 2309af75078fSIntel * - Setup per-port forwarding context. 2310af75078fSIntel * - launch logical cores with their forwarding configuration. 2311af75078fSIntel */ 2312af75078fSIntel static void 2313af75078fSIntel launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore) 2314af75078fSIntel { 2315af75078fSIntel unsigned int i; 2316af75078fSIntel unsigned int lc_id; 2317af75078fSIntel int diag; 2318af75078fSIntel 2319af75078fSIntel for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) { 2320af75078fSIntel lc_id = fwd_lcores_cpuids[i]; 2321af75078fSIntel if ((interactive == 0) || (lc_id != rte_lcore_id())) { 2322af75078fSIntel fwd_lcores[i]->stopped = 0; 2323af75078fSIntel diag = rte_eal_remote_launch(pkt_fwd_on_lcore, 2324af75078fSIntel fwd_lcores[i], lc_id); 2325af75078fSIntel if (diag != 0) 232661a3b0e5SAndrew Rybchenko fprintf(stderr, 232761a3b0e5SAndrew Rybchenko "launch lcore %u failed - diag=%d\n", 2328af75078fSIntel lc_id, diag); 2329af75078fSIntel } 2330af75078fSIntel } 2331af75078fSIntel } 2332af75078fSIntel 2333af75078fSIntel /* 2334af75078fSIntel * Launch packet forwarding configuration. 2335af75078fSIntel */ 2336af75078fSIntel void 2337af75078fSIntel start_packet_forwarding(int with_tx_first) 2338af75078fSIntel { 2339af75078fSIntel port_fwd_begin_t port_fwd_begin; 2340af75078fSIntel port_fwd_end_t port_fwd_end; 23413c4426dbSDmitry Kozlyuk stream_init_t stream_init = cur_fwd_eng->stream_init; 2342af75078fSIntel unsigned int i; 2343af75078fSIntel 23445a8fb55cSReshma Pattan if (strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") == 0 && !nb_rxq) 23455a8fb55cSReshma Pattan rte_exit(EXIT_FAILURE, "rxq are 0, cannot use rxonly fwd mode\n"); 23465a8fb55cSReshma Pattan 23475a8fb55cSReshma Pattan if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 && !nb_txq) 23485a8fb55cSReshma Pattan rte_exit(EXIT_FAILURE, "txq are 0, cannot use txonly fwd mode\n"); 23495a8fb55cSReshma Pattan 23505a8fb55cSReshma Pattan if ((strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") != 0 && 23515a8fb55cSReshma Pattan strcmp(cur_fwd_eng->fwd_mode_name, "txonly") != 0) && 23525a8fb55cSReshma Pattan (!nb_rxq || !nb_txq)) 23535a8fb55cSReshma Pattan rte_exit(EXIT_FAILURE, 23545a8fb55cSReshma Pattan "Either rxq or txq are 0, cannot use %s fwd mode\n", 23555a8fb55cSReshma Pattan cur_fwd_eng->fwd_mode_name); 23565a8fb55cSReshma Pattan 2357ce8d5614SIntel if (all_ports_started() == 0) { 235861a3b0e5SAndrew Rybchenko fprintf(stderr, "Not all ports were started\n"); 2359ce8d5614SIntel return; 2360ce8d5614SIntel } 2361af75078fSIntel if (test_done == 0) { 236261a3b0e5SAndrew Rybchenko fprintf(stderr, "Packet forwarding already started\n"); 2363af75078fSIntel return; 2364af75078fSIntel } 23657741e4cfSIntel 236647a767b2SMatan Azrad fwd_config_setup(); 236747a767b2SMatan Azrad 236865744833SXueming Li pkt_fwd_config_display(&cur_fwd_config); 236965744833SXueming Li if (!pkt_fwd_shared_rxq_check()) 237065744833SXueming Li return; 237165744833SXueming Li 23723c4426dbSDmitry Kozlyuk if (stream_init != NULL) 23733c4426dbSDmitry Kozlyuk for (i = 0; i < cur_fwd_config.nb_fwd_streams; i++) 23743c4426dbSDmitry Kozlyuk stream_init(fwd_streams[i]); 23753c4426dbSDmitry Kozlyuk 2376a78040c9SAlvin Zhang port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin; 2377a78040c9SAlvin Zhang if (port_fwd_begin != NULL) { 2378a78040c9SAlvin Zhang for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) { 2379a78040c9SAlvin Zhang if (port_fwd_begin(fwd_ports_ids[i])) { 2380a78040c9SAlvin Zhang fprintf(stderr, 2381a78040c9SAlvin Zhang "Packet forwarding is not ready\n"); 2382a78040c9SAlvin Zhang return; 2383a78040c9SAlvin Zhang } 2384a78040c9SAlvin Zhang } 2385a78040c9SAlvin Zhang } 2386a78040c9SAlvin Zhang 2387a78040c9SAlvin Zhang if (with_tx_first) { 2388a78040c9SAlvin Zhang port_fwd_begin = tx_only_engine.port_fwd_begin; 2389a78040c9SAlvin Zhang if (port_fwd_begin != NULL) { 2390a78040c9SAlvin Zhang for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) { 2391a78040c9SAlvin Zhang if (port_fwd_begin(fwd_ports_ids[i])) { 2392a78040c9SAlvin Zhang fprintf(stderr, 2393a78040c9SAlvin Zhang "Packet forwarding is not ready\n"); 2394a78040c9SAlvin Zhang return; 2395a78040c9SAlvin Zhang } 2396a78040c9SAlvin Zhang } 2397a78040c9SAlvin Zhang } 2398a78040c9SAlvin Zhang } 2399a78040c9SAlvin Zhang 2400a78040c9SAlvin Zhang test_done = 0; 2401a78040c9SAlvin Zhang 24027741e4cfSIntel if(!no_flush_rx) 24037741e4cfSIntel flush_fwd_rx_queues(); 24047741e4cfSIntel 2405af75078fSIntel rxtx_config_display(); 2406af75078fSIntel 240753324971SDavid Marchand fwd_stats_reset(); 2408af75078fSIntel if (with_tx_first) { 2409acbf77a6SZhihong Wang while (with_tx_first--) { 2410acbf77a6SZhihong Wang launch_packet_forwarding( 2411acbf77a6SZhihong Wang run_one_txonly_burst_on_core); 2412af75078fSIntel rte_eal_mp_wait_lcore(); 2413acbf77a6SZhihong Wang } 2414af75078fSIntel port_fwd_end = tx_only_engine.port_fwd_end; 2415af75078fSIntel if (port_fwd_end != NULL) { 2416af75078fSIntel for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) 2417af75078fSIntel (*port_fwd_end)(fwd_ports_ids[i]); 2418af75078fSIntel } 2419af75078fSIntel } 2420af75078fSIntel launch_packet_forwarding(start_pkt_forward_on_core); 2421af75078fSIntel } 2422af75078fSIntel 2423af75078fSIntel void 2424af75078fSIntel stop_packet_forwarding(void) 2425af75078fSIntel { 2426af75078fSIntel port_fwd_end_t port_fwd_end; 2427af75078fSIntel lcoreid_t lc_id; 242853324971SDavid Marchand portid_t pt_id; 242953324971SDavid Marchand int i; 2430af75078fSIntel 2431af75078fSIntel if (test_done) { 243261a3b0e5SAndrew Rybchenko fprintf(stderr, "Packet forwarding not started\n"); 2433af75078fSIntel return; 2434af75078fSIntel } 2435af75078fSIntel printf("Telling cores to stop..."); 2436af75078fSIntel for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) 2437af75078fSIntel fwd_lcores[lc_id]->stopped = 1; 2438af75078fSIntel printf("\nWaiting for lcores to finish...\n"); 2439af75078fSIntel rte_eal_mp_wait_lcore(); 2440af75078fSIntel port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end; 2441af75078fSIntel if (port_fwd_end != NULL) { 2442af75078fSIntel for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) { 2443af75078fSIntel pt_id = fwd_ports_ids[i]; 2444af75078fSIntel (*port_fwd_end)(pt_id); 2445af75078fSIntel } 2446af75078fSIntel } 2447c185d42cSDavid Marchand 244853324971SDavid Marchand fwd_stats_display(); 244958d475b7SJerin Jacob 2450af75078fSIntel printf("\nDone.\n"); 2451af75078fSIntel test_done = 1; 2452af75078fSIntel } 2453af75078fSIntel 2454cfae07fdSOuyang Changchun void 2455cfae07fdSOuyang Changchun dev_set_link_up(portid_t pid) 2456cfae07fdSOuyang Changchun { 2457492ab604SZhiyong Yang if (rte_eth_dev_set_link_up(pid) < 0) 245861a3b0e5SAndrew Rybchenko fprintf(stderr, "\nSet link up fail.\n"); 2459cfae07fdSOuyang Changchun } 2460cfae07fdSOuyang Changchun 2461cfae07fdSOuyang Changchun void 2462cfae07fdSOuyang Changchun dev_set_link_down(portid_t pid) 2463cfae07fdSOuyang Changchun { 2464492ab604SZhiyong Yang if (rte_eth_dev_set_link_down(pid) < 0) 246561a3b0e5SAndrew Rybchenko fprintf(stderr, "\nSet link down fail.\n"); 2466cfae07fdSOuyang Changchun } 2467cfae07fdSOuyang Changchun 2468ce8d5614SIntel static int 2469ce8d5614SIntel all_ports_started(void) 2470ce8d5614SIntel { 2471ce8d5614SIntel portid_t pi; 2472ce8d5614SIntel struct rte_port *port; 2473ce8d5614SIntel 24747d89b261SGaetan Rivet RTE_ETH_FOREACH_DEV(pi) { 2475ce8d5614SIntel port = &ports[pi]; 2476ce8d5614SIntel /* Check if there is a port which is not started */ 247741b05095SBernard Iremonger if ((port->port_status != RTE_PORT_STARTED) && 247841b05095SBernard Iremonger (port->slave_flag == 0)) 2479ce8d5614SIntel return 0; 2480ce8d5614SIntel } 2481ce8d5614SIntel 2482ce8d5614SIntel /* No port is not started */ 2483ce8d5614SIntel return 1; 2484ce8d5614SIntel } 2485ce8d5614SIntel 2486148f963fSBruce Richardson int 24876018eb8cSShahaf Shuler port_is_stopped(portid_t port_id) 24886018eb8cSShahaf Shuler { 24896018eb8cSShahaf Shuler struct rte_port *port = &ports[port_id]; 24906018eb8cSShahaf Shuler 24916018eb8cSShahaf Shuler if ((port->port_status != RTE_PORT_STOPPED) && 24926018eb8cSShahaf Shuler (port->slave_flag == 0)) 24936018eb8cSShahaf Shuler return 0; 24946018eb8cSShahaf Shuler return 1; 24956018eb8cSShahaf Shuler } 24966018eb8cSShahaf Shuler 24976018eb8cSShahaf Shuler int 2498edab33b1STetsuya Mukawa all_ports_stopped(void) 2499edab33b1STetsuya Mukawa { 2500edab33b1STetsuya Mukawa portid_t pi; 2501edab33b1STetsuya Mukawa 25027d89b261SGaetan Rivet RTE_ETH_FOREACH_DEV(pi) { 25036018eb8cSShahaf Shuler if (!port_is_stopped(pi)) 2504edab33b1STetsuya Mukawa return 0; 2505edab33b1STetsuya Mukawa } 2506edab33b1STetsuya Mukawa 2507edab33b1STetsuya Mukawa return 1; 2508edab33b1STetsuya Mukawa } 2509edab33b1STetsuya Mukawa 2510edab33b1STetsuya Mukawa int 2511edab33b1STetsuya Mukawa port_is_started(portid_t port_id) 2512edab33b1STetsuya Mukawa { 2513edab33b1STetsuya Mukawa if (port_id_is_invalid(port_id, ENABLED_WARN)) 2514edab33b1STetsuya Mukawa return 0; 2515edab33b1STetsuya Mukawa 2516edab33b1STetsuya Mukawa if (ports[port_id].port_status != RTE_PORT_STARTED) 2517edab33b1STetsuya Mukawa return 0; 2518edab33b1STetsuya Mukawa 2519edab33b1STetsuya Mukawa return 1; 2520edab33b1STetsuya Mukawa } 2521edab33b1STetsuya Mukawa 2522*23095155SDariusz Sosnowski #define HAIRPIN_MODE_RX_FORCE_MEMORY RTE_BIT32(8) 2523*23095155SDariusz Sosnowski #define HAIRPIN_MODE_TX_FORCE_MEMORY RTE_BIT32(9) 2524*23095155SDariusz Sosnowski 2525*23095155SDariusz Sosnowski #define HAIRPIN_MODE_RX_LOCKED_MEMORY RTE_BIT32(12) 2526*23095155SDariusz Sosnowski #define HAIRPIN_MODE_RX_RTE_MEMORY RTE_BIT32(13) 2527*23095155SDariusz Sosnowski 2528*23095155SDariusz Sosnowski #define HAIRPIN_MODE_TX_LOCKED_MEMORY RTE_BIT32(16) 2529*23095155SDariusz Sosnowski #define HAIRPIN_MODE_TX_RTE_MEMORY RTE_BIT32(17) 2530*23095155SDariusz Sosnowski 2531*23095155SDariusz Sosnowski 25321c69df45SOri Kam /* Configure the Rx and Tx hairpin queues for the selected port. */ 25331c69df45SOri Kam static int 253401817b10SBing Zhao setup_hairpin_queues(portid_t pi, portid_t p_pi, uint16_t cnt_pi) 25351c69df45SOri Kam { 25361c69df45SOri Kam queueid_t qi; 25371c69df45SOri Kam struct rte_eth_hairpin_conf hairpin_conf = { 25381c69df45SOri Kam .peer_count = 1, 25391c69df45SOri Kam }; 25401c69df45SOri Kam int i; 25411c69df45SOri Kam int diag; 25421c69df45SOri Kam struct rte_port *port = &ports[pi]; 254301817b10SBing Zhao uint16_t peer_rx_port = pi; 254401817b10SBing Zhao uint16_t peer_tx_port = pi; 254501817b10SBing Zhao uint32_t manual = 1; 254601817b10SBing Zhao uint32_t tx_exp = hairpin_mode & 0x10; 2547*23095155SDariusz Sosnowski uint32_t rx_force_memory = hairpin_mode & HAIRPIN_MODE_RX_FORCE_MEMORY; 2548*23095155SDariusz Sosnowski uint32_t rx_locked_memory = hairpin_mode & HAIRPIN_MODE_RX_LOCKED_MEMORY; 2549*23095155SDariusz Sosnowski uint32_t rx_rte_memory = hairpin_mode & HAIRPIN_MODE_RX_RTE_MEMORY; 2550*23095155SDariusz Sosnowski uint32_t tx_force_memory = hairpin_mode & HAIRPIN_MODE_TX_FORCE_MEMORY; 2551*23095155SDariusz Sosnowski uint32_t tx_locked_memory = hairpin_mode & HAIRPIN_MODE_TX_LOCKED_MEMORY; 2552*23095155SDariusz Sosnowski uint32_t tx_rte_memory = hairpin_mode & HAIRPIN_MODE_TX_RTE_MEMORY; 255301817b10SBing Zhao 255401817b10SBing Zhao if (!(hairpin_mode & 0xf)) { 255501817b10SBing Zhao peer_rx_port = pi; 255601817b10SBing Zhao peer_tx_port = pi; 255701817b10SBing Zhao manual = 0; 255801817b10SBing Zhao } else if (hairpin_mode & 0x1) { 255901817b10SBing Zhao peer_tx_port = rte_eth_find_next_owned_by(pi + 1, 256001817b10SBing Zhao RTE_ETH_DEV_NO_OWNER); 256101817b10SBing Zhao if (peer_tx_port >= RTE_MAX_ETHPORTS) 256201817b10SBing Zhao peer_tx_port = rte_eth_find_next_owned_by(0, 256301817b10SBing Zhao RTE_ETH_DEV_NO_OWNER); 256401817b10SBing Zhao if (p_pi != RTE_MAX_ETHPORTS) { 256501817b10SBing Zhao peer_rx_port = p_pi; 256601817b10SBing Zhao } else { 256701817b10SBing Zhao uint16_t next_pi; 256801817b10SBing Zhao 256901817b10SBing Zhao /* Last port will be the peer RX port of the first. */ 257001817b10SBing Zhao RTE_ETH_FOREACH_DEV(next_pi) 257101817b10SBing Zhao peer_rx_port = next_pi; 257201817b10SBing Zhao } 257301817b10SBing Zhao manual = 1; 257401817b10SBing Zhao } else if (hairpin_mode & 0x2) { 257501817b10SBing Zhao if (cnt_pi & 0x1) { 257601817b10SBing Zhao peer_rx_port = p_pi; 257701817b10SBing Zhao } else { 257801817b10SBing Zhao peer_rx_port = rte_eth_find_next_owned_by(pi + 1, 257901817b10SBing Zhao RTE_ETH_DEV_NO_OWNER); 258001817b10SBing Zhao if (peer_rx_port >= RTE_MAX_ETHPORTS) 258101817b10SBing Zhao peer_rx_port = pi; 258201817b10SBing Zhao } 258301817b10SBing Zhao peer_tx_port = peer_rx_port; 258401817b10SBing Zhao manual = 1; 258501817b10SBing Zhao } 25861c69df45SOri Kam 25871c69df45SOri Kam for (qi = nb_txq, i = 0; qi < nb_hairpinq + nb_txq; qi++) { 258801817b10SBing Zhao hairpin_conf.peers[0].port = peer_rx_port; 25891c69df45SOri Kam hairpin_conf.peers[0].queue = i + nb_rxq; 259001817b10SBing Zhao hairpin_conf.manual_bind = !!manual; 259101817b10SBing Zhao hairpin_conf.tx_explicit = !!tx_exp; 2592*23095155SDariusz Sosnowski hairpin_conf.force_memory = !!tx_force_memory; 2593*23095155SDariusz Sosnowski hairpin_conf.use_locked_device_memory = !!tx_locked_memory; 2594*23095155SDariusz Sosnowski hairpin_conf.use_rte_memory = !!tx_rte_memory; 25951c69df45SOri Kam diag = rte_eth_tx_hairpin_queue_setup 25961c69df45SOri Kam (pi, qi, nb_txd, &hairpin_conf); 25971c69df45SOri Kam i++; 25981c69df45SOri Kam if (diag == 0) 25991c69df45SOri Kam continue; 26001c69df45SOri Kam 26011c69df45SOri Kam /* Fail to setup rx queue, return */ 2602eac341d3SJoyce Kong if (port->port_status == RTE_PORT_HANDLING) 2603eac341d3SJoyce Kong port->port_status = RTE_PORT_STOPPED; 2604eac341d3SJoyce Kong else 260561a3b0e5SAndrew Rybchenko fprintf(stderr, 260661a3b0e5SAndrew Rybchenko "Port %d can not be set back to stopped\n", pi); 260761a3b0e5SAndrew Rybchenko fprintf(stderr, "Fail to configure port %d hairpin queues\n", 260861a3b0e5SAndrew Rybchenko pi); 26091c69df45SOri Kam /* try to reconfigure queues next time */ 26101c69df45SOri Kam port->need_reconfig_queues = 1; 26111c69df45SOri Kam return -1; 26121c69df45SOri Kam } 26131c69df45SOri Kam for (qi = nb_rxq, i = 0; qi < nb_hairpinq + nb_rxq; qi++) { 261401817b10SBing Zhao hairpin_conf.peers[0].port = peer_tx_port; 26151c69df45SOri Kam hairpin_conf.peers[0].queue = i + nb_txq; 261601817b10SBing Zhao hairpin_conf.manual_bind = !!manual; 261701817b10SBing Zhao hairpin_conf.tx_explicit = !!tx_exp; 2618*23095155SDariusz Sosnowski hairpin_conf.force_memory = !!rx_force_memory; 2619*23095155SDariusz Sosnowski hairpin_conf.use_locked_device_memory = !!rx_locked_memory; 2620*23095155SDariusz Sosnowski hairpin_conf.use_rte_memory = !!rx_rte_memory; 26211c69df45SOri Kam diag = rte_eth_rx_hairpin_queue_setup 26221c69df45SOri Kam (pi, qi, nb_rxd, &hairpin_conf); 26231c69df45SOri Kam i++; 26241c69df45SOri Kam if (diag == 0) 26251c69df45SOri Kam continue; 26261c69df45SOri Kam 26271c69df45SOri Kam /* Fail to setup rx queue, return */ 2628eac341d3SJoyce Kong if (port->port_status == RTE_PORT_HANDLING) 2629eac341d3SJoyce Kong port->port_status = RTE_PORT_STOPPED; 2630eac341d3SJoyce Kong else 263161a3b0e5SAndrew Rybchenko fprintf(stderr, 263261a3b0e5SAndrew Rybchenko "Port %d can not be set back to stopped\n", pi); 263361a3b0e5SAndrew Rybchenko fprintf(stderr, "Fail to configure port %d hairpin queues\n", 263461a3b0e5SAndrew Rybchenko pi); 26351c69df45SOri Kam /* try to reconfigure queues next time */ 26361c69df45SOri Kam port->need_reconfig_queues = 1; 26371c69df45SOri Kam return -1; 26381c69df45SOri Kam } 26391c69df45SOri Kam return 0; 26401c69df45SOri Kam } 26411c69df45SOri Kam 26422befc67fSViacheslav Ovsiienko /* Configure the Rx with optional split. */ 26432befc67fSViacheslav Ovsiienko int 26442befc67fSViacheslav Ovsiienko rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id, 26452befc67fSViacheslav Ovsiienko uint16_t nb_rx_desc, unsigned int socket_id, 26462befc67fSViacheslav Ovsiienko struct rte_eth_rxconf *rx_conf, struct rte_mempool *mp) 26472befc67fSViacheslav Ovsiienko { 26482befc67fSViacheslav Ovsiienko union rte_eth_rxseg rx_useg[MAX_SEGS_BUFFER_SPLIT] = {}; 26492befc67fSViacheslav Ovsiienko unsigned int i, mp_n; 26502befc67fSViacheslav Ovsiienko int ret; 26512befc67fSViacheslav Ovsiienko 26522befc67fSViacheslav Ovsiienko if (rx_pkt_nb_segs <= 1 || 26532befc67fSViacheslav Ovsiienko (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) == 0) { 26542befc67fSViacheslav Ovsiienko rx_conf->rx_seg = NULL; 26552befc67fSViacheslav Ovsiienko rx_conf->rx_nseg = 0; 26562befc67fSViacheslav Ovsiienko ret = rte_eth_rx_queue_setup(port_id, rx_queue_id, 26572befc67fSViacheslav Ovsiienko nb_rx_desc, socket_id, 26582befc67fSViacheslav Ovsiienko rx_conf, mp); 26593c4426dbSDmitry Kozlyuk goto exit; 26602befc67fSViacheslav Ovsiienko } 26612befc67fSViacheslav Ovsiienko for (i = 0; i < rx_pkt_nb_segs; i++) { 26622befc67fSViacheslav Ovsiienko struct rte_eth_rxseg_split *rx_seg = &rx_useg[i].split; 26632befc67fSViacheslav Ovsiienko struct rte_mempool *mpx; 26642befc67fSViacheslav Ovsiienko /* 26652befc67fSViacheslav Ovsiienko * Use last valid pool for the segments with number 26662befc67fSViacheslav Ovsiienko * exceeding the pool index. 26672befc67fSViacheslav Ovsiienko */ 26681108c33eSRaja Zidane mp_n = (i >= mbuf_data_size_n) ? mbuf_data_size_n - 1 : i; 26692befc67fSViacheslav Ovsiienko mpx = mbuf_pool_find(socket_id, mp_n); 26702befc67fSViacheslav Ovsiienko /* Handle zero as mbuf data buffer size. */ 26712befc67fSViacheslav Ovsiienko rx_seg->length = rx_pkt_seg_lengths[i] ? 26722befc67fSViacheslav Ovsiienko rx_pkt_seg_lengths[i] : 26732befc67fSViacheslav Ovsiienko mbuf_data_size[mp_n]; 26742befc67fSViacheslav Ovsiienko rx_seg->offset = i < rx_pkt_nb_offs ? 26752befc67fSViacheslav Ovsiienko rx_pkt_seg_offsets[i] : 0; 26762befc67fSViacheslav Ovsiienko rx_seg->mp = mpx ? mpx : mp; 26772befc67fSViacheslav Ovsiienko } 26782befc67fSViacheslav Ovsiienko rx_conf->rx_nseg = rx_pkt_nb_segs; 26792befc67fSViacheslav Ovsiienko rx_conf->rx_seg = rx_useg; 26802befc67fSViacheslav Ovsiienko ret = rte_eth_rx_queue_setup(port_id, rx_queue_id, nb_rx_desc, 26812befc67fSViacheslav Ovsiienko socket_id, rx_conf, NULL); 26822befc67fSViacheslav Ovsiienko rx_conf->rx_seg = NULL; 26832befc67fSViacheslav Ovsiienko rx_conf->rx_nseg = 0; 26843c4426dbSDmitry Kozlyuk exit: 26853c4426dbSDmitry Kozlyuk ports[port_id].rxq[rx_queue_id].state = rx_conf->rx_deferred_start ? 26863c4426dbSDmitry Kozlyuk RTE_ETH_QUEUE_STATE_STOPPED : 26873c4426dbSDmitry Kozlyuk RTE_ETH_QUEUE_STATE_STARTED; 26882befc67fSViacheslav Ovsiienko return ret; 26892befc67fSViacheslav Ovsiienko } 26902befc67fSViacheslav Ovsiienko 269163b72657SIvan Ilchenko static int 269263b72657SIvan Ilchenko alloc_xstats_display_info(portid_t pi) 269363b72657SIvan Ilchenko { 269463b72657SIvan Ilchenko uint64_t **ids_supp = &ports[pi].xstats_info.ids_supp; 269563b72657SIvan Ilchenko uint64_t **prev_values = &ports[pi].xstats_info.prev_values; 269663b72657SIvan Ilchenko uint64_t **curr_values = &ports[pi].xstats_info.curr_values; 269763b72657SIvan Ilchenko 269863b72657SIvan Ilchenko if (xstats_display_num == 0) 269963b72657SIvan Ilchenko return 0; 270063b72657SIvan Ilchenko 270163b72657SIvan Ilchenko *ids_supp = calloc(xstats_display_num, sizeof(**ids_supp)); 270263b72657SIvan Ilchenko if (*ids_supp == NULL) 270363b72657SIvan Ilchenko goto fail_ids_supp; 270463b72657SIvan Ilchenko 270563b72657SIvan Ilchenko *prev_values = calloc(xstats_display_num, 270663b72657SIvan Ilchenko sizeof(**prev_values)); 270763b72657SIvan Ilchenko if (*prev_values == NULL) 270863b72657SIvan Ilchenko goto fail_prev_values; 270963b72657SIvan Ilchenko 271063b72657SIvan Ilchenko *curr_values = calloc(xstats_display_num, 271163b72657SIvan Ilchenko sizeof(**curr_values)); 271263b72657SIvan Ilchenko if (*curr_values == NULL) 271363b72657SIvan Ilchenko goto fail_curr_values; 271463b72657SIvan Ilchenko 271563b72657SIvan Ilchenko ports[pi].xstats_info.allocated = true; 271663b72657SIvan Ilchenko 271763b72657SIvan Ilchenko return 0; 271863b72657SIvan Ilchenko 271963b72657SIvan Ilchenko fail_curr_values: 272063b72657SIvan Ilchenko free(*prev_values); 272163b72657SIvan Ilchenko fail_prev_values: 272263b72657SIvan Ilchenko free(*ids_supp); 272363b72657SIvan Ilchenko fail_ids_supp: 272463b72657SIvan Ilchenko return -ENOMEM; 272563b72657SIvan Ilchenko } 272663b72657SIvan Ilchenko 272763b72657SIvan Ilchenko static void 272863b72657SIvan Ilchenko free_xstats_display_info(portid_t pi) 272963b72657SIvan Ilchenko { 273063b72657SIvan Ilchenko if (!ports[pi].xstats_info.allocated) 273163b72657SIvan Ilchenko return; 273263b72657SIvan Ilchenko free(ports[pi].xstats_info.ids_supp); 273363b72657SIvan Ilchenko free(ports[pi].xstats_info.prev_values); 273463b72657SIvan Ilchenko free(ports[pi].xstats_info.curr_values); 273563b72657SIvan Ilchenko ports[pi].xstats_info.allocated = false; 273663b72657SIvan Ilchenko } 273763b72657SIvan Ilchenko 273863b72657SIvan Ilchenko /** Fill helper structures for specified port to show extended statistics. */ 273963b72657SIvan Ilchenko static void 274063b72657SIvan Ilchenko fill_xstats_display_info_for_port(portid_t pi) 274163b72657SIvan Ilchenko { 274263b72657SIvan Ilchenko unsigned int stat, stat_supp; 274363b72657SIvan Ilchenko const char *xstat_name; 274463b72657SIvan Ilchenko struct rte_port *port; 274563b72657SIvan Ilchenko uint64_t *ids_supp; 274663b72657SIvan Ilchenko int rc; 274763b72657SIvan Ilchenko 274863b72657SIvan Ilchenko if (xstats_display_num == 0) 274963b72657SIvan Ilchenko return; 275063b72657SIvan Ilchenko 275163b72657SIvan Ilchenko if (pi == (portid_t)RTE_PORT_ALL) { 275263b72657SIvan Ilchenko fill_xstats_display_info(); 275363b72657SIvan Ilchenko return; 275463b72657SIvan Ilchenko } 275563b72657SIvan Ilchenko 275663b72657SIvan Ilchenko port = &ports[pi]; 275763b72657SIvan Ilchenko if (port->port_status != RTE_PORT_STARTED) 275863b72657SIvan Ilchenko return; 275963b72657SIvan Ilchenko 276063b72657SIvan Ilchenko if (!port->xstats_info.allocated && alloc_xstats_display_info(pi) != 0) 276163b72657SIvan Ilchenko rte_exit(EXIT_FAILURE, 276263b72657SIvan Ilchenko "Failed to allocate xstats display memory\n"); 276363b72657SIvan Ilchenko 276463b72657SIvan Ilchenko ids_supp = port->xstats_info.ids_supp; 276563b72657SIvan Ilchenko for (stat = stat_supp = 0; stat < xstats_display_num; stat++) { 276663b72657SIvan Ilchenko xstat_name = xstats_display[stat].name; 276763b72657SIvan Ilchenko rc = rte_eth_xstats_get_id_by_name(pi, xstat_name, 276863b72657SIvan Ilchenko ids_supp + stat_supp); 276963b72657SIvan Ilchenko if (rc != 0) { 277063b72657SIvan Ilchenko fprintf(stderr, "No xstat '%s' on port %u - skip it %u\n", 277163b72657SIvan Ilchenko xstat_name, pi, stat); 277263b72657SIvan Ilchenko continue; 277363b72657SIvan Ilchenko } 277463b72657SIvan Ilchenko stat_supp++; 277563b72657SIvan Ilchenko } 277663b72657SIvan Ilchenko 277763b72657SIvan Ilchenko port->xstats_info.ids_supp_sz = stat_supp; 277863b72657SIvan Ilchenko } 277963b72657SIvan Ilchenko 278063b72657SIvan Ilchenko /** Fill helper structures for all ports to show extended statistics. */ 278163b72657SIvan Ilchenko static void 278263b72657SIvan Ilchenko fill_xstats_display_info(void) 278363b72657SIvan Ilchenko { 278463b72657SIvan Ilchenko portid_t pi; 278563b72657SIvan Ilchenko 278663b72657SIvan Ilchenko if (xstats_display_num == 0) 278763b72657SIvan Ilchenko return; 278863b72657SIvan Ilchenko 278963b72657SIvan Ilchenko RTE_ETH_FOREACH_DEV(pi) 279063b72657SIvan Ilchenko fill_xstats_display_info_for_port(pi); 279163b72657SIvan Ilchenko } 279263b72657SIvan Ilchenko 2793edab33b1STetsuya Mukawa int 2794ce8d5614SIntel start_port(portid_t pid) 2795ce8d5614SIntel { 279692d2703eSMichael Qiu int diag, need_check_link_status = -1; 2797ce8d5614SIntel portid_t pi; 279801817b10SBing Zhao portid_t p_pi = RTE_MAX_ETHPORTS; 279901817b10SBing Zhao portid_t pl[RTE_MAX_ETHPORTS]; 280001817b10SBing Zhao portid_t peer_pl[RTE_MAX_ETHPORTS]; 280101817b10SBing Zhao uint16_t cnt_pi = 0; 280201817b10SBing Zhao uint16_t cfg_pi = 0; 280301817b10SBing Zhao int peer_pi; 2804ce8d5614SIntel queueid_t qi; 2805ce8d5614SIntel struct rte_port *port; 28061c69df45SOri Kam struct rte_eth_hairpin_cap cap; 2807ce8d5614SIntel 28084468635fSMichael Qiu if (port_id_is_invalid(pid, ENABLED_WARN)) 28094468635fSMichael Qiu return 0; 28104468635fSMichael Qiu 28117d89b261SGaetan Rivet RTE_ETH_FOREACH_DEV(pi) { 2812edab33b1STetsuya Mukawa if (pid != pi && pid != (portid_t)RTE_PORT_ALL) 2813ce8d5614SIntel continue; 2814ce8d5614SIntel 2815d8c079a5SMin Hu (Connor) if (port_is_bonding_slave(pi)) { 2816d8c079a5SMin Hu (Connor) fprintf(stderr, 2817d8c079a5SMin Hu (Connor) "Please remove port %d from bonded device.\n", 2818d8c079a5SMin Hu (Connor) pi); 2819d8c079a5SMin Hu (Connor) continue; 2820d8c079a5SMin Hu (Connor) } 2821d8c079a5SMin Hu (Connor) 282292d2703eSMichael Qiu need_check_link_status = 0; 2823ce8d5614SIntel port = &ports[pi]; 2824eac341d3SJoyce Kong if (port->port_status == RTE_PORT_STOPPED) 2825eac341d3SJoyce Kong port->port_status = RTE_PORT_HANDLING; 2826eac341d3SJoyce Kong else { 282761a3b0e5SAndrew Rybchenko fprintf(stderr, "Port %d is now not stopped\n", pi); 2828ce8d5614SIntel continue; 2829ce8d5614SIntel } 2830ce8d5614SIntel 2831ce8d5614SIntel if (port->need_reconfig > 0) { 2832655eae01SJie Wang struct rte_eth_conf dev_conf; 2833655eae01SJie Wang int k; 2834655eae01SJie Wang 2835ce8d5614SIntel port->need_reconfig = 0; 2836ce8d5614SIntel 28377ee3e944SVasily Philipov if (flow_isolate_all) { 28387ee3e944SVasily Philipov int ret = port_flow_isolate(pi, 1); 28397ee3e944SVasily Philipov if (ret) { 284061a3b0e5SAndrew Rybchenko fprintf(stderr, 284161a3b0e5SAndrew Rybchenko "Failed to apply isolated mode on port %d\n", 284261a3b0e5SAndrew Rybchenko pi); 28437ee3e944SVasily Philipov return -1; 28447ee3e944SVasily Philipov } 28457ee3e944SVasily Philipov } 2846b5b38ed8SRaslan Darawsheh configure_rxtx_dump_callbacks(0); 28475706de65SJulien Cretin printf("Configuring Port %d (socket %u)\n", pi, 284820a0286fSLiu Xiaofeng port->socket_id); 28491c69df45SOri Kam if (nb_hairpinq > 0 && 28501c69df45SOri Kam rte_eth_dev_hairpin_capability_get(pi, &cap)) { 285161a3b0e5SAndrew Rybchenko fprintf(stderr, 285261a3b0e5SAndrew Rybchenko "Port %d doesn't support hairpin queues\n", 285361a3b0e5SAndrew Rybchenko pi); 28541c69df45SOri Kam return -1; 28551c69df45SOri Kam } 28561bb4a528SFerruh Yigit 2857ce8d5614SIntel /* configure port */ 2858a550baf2SMin Hu (Connor) diag = eth_dev_configure_mp(pi, nb_rxq + nb_hairpinq, 28591c69df45SOri Kam nb_txq + nb_hairpinq, 2860ce8d5614SIntel &(port->dev_conf)); 2861ce8d5614SIntel if (diag != 0) { 2862eac341d3SJoyce Kong if (port->port_status == RTE_PORT_HANDLING) 2863eac341d3SJoyce Kong port->port_status = RTE_PORT_STOPPED; 2864eac341d3SJoyce Kong else 286561a3b0e5SAndrew Rybchenko fprintf(stderr, 286661a3b0e5SAndrew Rybchenko "Port %d can not be set back to stopped\n", 286761a3b0e5SAndrew Rybchenko pi); 286861a3b0e5SAndrew Rybchenko fprintf(stderr, "Fail to configure port %d\n", 286961a3b0e5SAndrew Rybchenko pi); 2870ce8d5614SIntel /* try to reconfigure port next time */ 2871ce8d5614SIntel port->need_reconfig = 1; 2872148f963fSBruce Richardson return -1; 2873ce8d5614SIntel } 2874655eae01SJie Wang /* get device configuration*/ 2875655eae01SJie Wang if (0 != 2876655eae01SJie Wang eth_dev_conf_get_print_err(pi, &dev_conf)) { 2877655eae01SJie Wang fprintf(stderr, 2878655eae01SJie Wang "port %d can not get device configuration\n", 2879655eae01SJie Wang pi); 2880655eae01SJie Wang return -1; 2881655eae01SJie Wang } 2882655eae01SJie Wang /* Apply Rx offloads configuration */ 2883655eae01SJie Wang if (dev_conf.rxmode.offloads != 2884655eae01SJie Wang port->dev_conf.rxmode.offloads) { 2885655eae01SJie Wang port->dev_conf.rxmode.offloads |= 2886655eae01SJie Wang dev_conf.rxmode.offloads; 2887655eae01SJie Wang for (k = 0; 2888655eae01SJie Wang k < port->dev_info.max_rx_queues; 2889655eae01SJie Wang k++) 28903c4426dbSDmitry Kozlyuk port->rxq[k].conf.offloads |= 2891655eae01SJie Wang dev_conf.rxmode.offloads; 2892655eae01SJie Wang } 2893655eae01SJie Wang /* Apply Tx offloads configuration */ 2894655eae01SJie Wang if (dev_conf.txmode.offloads != 2895655eae01SJie Wang port->dev_conf.txmode.offloads) { 2896655eae01SJie Wang port->dev_conf.txmode.offloads |= 2897655eae01SJie Wang dev_conf.txmode.offloads; 2898655eae01SJie Wang for (k = 0; 2899655eae01SJie Wang k < port->dev_info.max_tx_queues; 2900655eae01SJie Wang k++) 29013c4426dbSDmitry Kozlyuk port->txq[k].conf.offloads |= 2902655eae01SJie Wang dev_conf.txmode.offloads; 2903655eae01SJie Wang } 2904ce8d5614SIntel } 2905a550baf2SMin Hu (Connor) if (port->need_reconfig_queues > 0 && is_proc_primary()) { 2906ce8d5614SIntel port->need_reconfig_queues = 0; 2907ce8d5614SIntel /* setup tx queues */ 2908ce8d5614SIntel for (qi = 0; qi < nb_txq; qi++) { 29093c4426dbSDmitry Kozlyuk struct rte_eth_txconf *conf = 29103c4426dbSDmitry Kozlyuk &port->txq[qi].conf; 29113c4426dbSDmitry Kozlyuk 2912b6ea6408SIntel if ((numa_support) && 2913b6ea6408SIntel (txring_numa[pi] != NUMA_NO_CONFIG)) 2914b6ea6408SIntel diag = rte_eth_tx_queue_setup(pi, qi, 2915d44f8a48SQi Zhang port->nb_tx_desc[qi], 2916d44f8a48SQi Zhang txring_numa[pi], 29173c4426dbSDmitry Kozlyuk &(port->txq[qi].conf)); 2918b6ea6408SIntel else 2919b6ea6408SIntel diag = rte_eth_tx_queue_setup(pi, qi, 2920d44f8a48SQi Zhang port->nb_tx_desc[qi], 2921d44f8a48SQi Zhang port->socket_id, 29223c4426dbSDmitry Kozlyuk &(port->txq[qi].conf)); 2923b6ea6408SIntel 29243c4426dbSDmitry Kozlyuk if (diag == 0) { 29253c4426dbSDmitry Kozlyuk port->txq[qi].state = 29263c4426dbSDmitry Kozlyuk conf->tx_deferred_start ? 29273c4426dbSDmitry Kozlyuk RTE_ETH_QUEUE_STATE_STOPPED : 29283c4426dbSDmitry Kozlyuk RTE_ETH_QUEUE_STATE_STARTED; 2929ce8d5614SIntel continue; 29303c4426dbSDmitry Kozlyuk } 2931ce8d5614SIntel 2932ce8d5614SIntel /* Fail to setup tx queue, return */ 2933eac341d3SJoyce Kong if (port->port_status == RTE_PORT_HANDLING) 2934eac341d3SJoyce Kong port->port_status = RTE_PORT_STOPPED; 2935eac341d3SJoyce Kong else 293661a3b0e5SAndrew Rybchenko fprintf(stderr, 293761a3b0e5SAndrew Rybchenko "Port %d can not be set back to stopped\n", 293861a3b0e5SAndrew Rybchenko pi); 293961a3b0e5SAndrew Rybchenko fprintf(stderr, 294061a3b0e5SAndrew Rybchenko "Fail to configure port %d tx queues\n", 2941d44f8a48SQi Zhang pi); 2942ce8d5614SIntel /* try to reconfigure queues next time */ 2943ce8d5614SIntel port->need_reconfig_queues = 1; 2944148f963fSBruce Richardson return -1; 2945ce8d5614SIntel } 2946ce8d5614SIntel for (qi = 0; qi < nb_rxq; qi++) { 2947d44f8a48SQi Zhang /* setup rx queues */ 2948b6ea6408SIntel if ((numa_support) && 2949b6ea6408SIntel (rxring_numa[pi] != NUMA_NO_CONFIG)) { 2950b6ea6408SIntel struct rte_mempool * mp = 295126cbb419SViacheslav Ovsiienko mbuf_pool_find 295226cbb419SViacheslav Ovsiienko (rxring_numa[pi], 0); 2953b6ea6408SIntel if (mp == NULL) { 295461a3b0e5SAndrew Rybchenko fprintf(stderr, 295561a3b0e5SAndrew Rybchenko "Failed to setup RX queue: No mempool allocation on the socket %d\n", 2956b6ea6408SIntel rxring_numa[pi]); 2957148f963fSBruce Richardson return -1; 2958b6ea6408SIntel } 2959b6ea6408SIntel 29602befc67fSViacheslav Ovsiienko diag = rx_queue_setup(pi, qi, 2961d4930794SFerruh Yigit port->nb_rx_desc[qi], 2962d44f8a48SQi Zhang rxring_numa[pi], 29633c4426dbSDmitry Kozlyuk &(port->rxq[qi].conf), 2964d44f8a48SQi Zhang mp); 29651e1d6bddSBernard Iremonger } else { 29661e1d6bddSBernard Iremonger struct rte_mempool *mp = 296726cbb419SViacheslav Ovsiienko mbuf_pool_find 296826cbb419SViacheslav Ovsiienko (port->socket_id, 0); 29691e1d6bddSBernard Iremonger if (mp == NULL) { 297061a3b0e5SAndrew Rybchenko fprintf(stderr, 297161a3b0e5SAndrew Rybchenko "Failed to setup RX queue: No mempool allocation on the socket %d\n", 29721e1d6bddSBernard Iremonger port->socket_id); 29731e1d6bddSBernard Iremonger return -1; 2974b6ea6408SIntel } 29752befc67fSViacheslav Ovsiienko diag = rx_queue_setup(pi, qi, 2976d4930794SFerruh Yigit port->nb_rx_desc[qi], 2977d44f8a48SQi Zhang port->socket_id, 29783c4426dbSDmitry Kozlyuk &(port->rxq[qi].conf), 2979d44f8a48SQi Zhang mp); 29801e1d6bddSBernard Iremonger } 2981ce8d5614SIntel if (diag == 0) 2982ce8d5614SIntel continue; 2983ce8d5614SIntel 2984ce8d5614SIntel /* Fail to setup rx queue, return */ 2985eac341d3SJoyce Kong if (port->port_status == RTE_PORT_HANDLING) 2986eac341d3SJoyce Kong port->port_status = RTE_PORT_STOPPED; 2987eac341d3SJoyce Kong else 298861a3b0e5SAndrew Rybchenko fprintf(stderr, 298961a3b0e5SAndrew Rybchenko "Port %d can not be set back to stopped\n", 299061a3b0e5SAndrew Rybchenko pi); 299161a3b0e5SAndrew Rybchenko fprintf(stderr, 299261a3b0e5SAndrew Rybchenko "Fail to configure port %d rx queues\n", 2993d44f8a48SQi Zhang pi); 2994ce8d5614SIntel /* try to reconfigure queues next time */ 2995ce8d5614SIntel port->need_reconfig_queues = 1; 2996148f963fSBruce Richardson return -1; 2997ce8d5614SIntel } 29981c69df45SOri Kam /* setup hairpin queues */ 299901817b10SBing Zhao if (setup_hairpin_queues(pi, p_pi, cnt_pi) != 0) 30001c69df45SOri Kam return -1; 3001ce8d5614SIntel } 3002b5b38ed8SRaslan Darawsheh configure_rxtx_dump_callbacks(verbose_level); 3003b0a9354aSPavan Nikhilesh if (clear_ptypes) { 3004b0a9354aSPavan Nikhilesh diag = rte_eth_dev_set_ptypes(pi, RTE_PTYPE_UNKNOWN, 3005b0a9354aSPavan Nikhilesh NULL, 0); 3006b0a9354aSPavan Nikhilesh if (diag < 0) 300761a3b0e5SAndrew Rybchenko fprintf(stderr, 3008b0a9354aSPavan Nikhilesh "Port %d: Failed to disable Ptype parsing\n", 3009b0a9354aSPavan Nikhilesh pi); 3010b0a9354aSPavan Nikhilesh } 3011b0a9354aSPavan Nikhilesh 301201817b10SBing Zhao p_pi = pi; 301301817b10SBing Zhao cnt_pi++; 301401817b10SBing Zhao 3015ce8d5614SIntel /* start port */ 3016a550baf2SMin Hu (Connor) diag = eth_dev_start_mp(pi); 301752f2c6f2SAndrew Rybchenko if (diag < 0) { 301861a3b0e5SAndrew Rybchenko fprintf(stderr, "Fail to start port %d: %s\n", 301961a3b0e5SAndrew Rybchenko pi, rte_strerror(-diag)); 3020ce8d5614SIntel 3021ce8d5614SIntel /* Fail to setup rx queue, return */ 3022eac341d3SJoyce Kong if (port->port_status == RTE_PORT_HANDLING) 3023eac341d3SJoyce Kong port->port_status = RTE_PORT_STOPPED; 3024eac341d3SJoyce Kong else 302561a3b0e5SAndrew Rybchenko fprintf(stderr, 302661a3b0e5SAndrew Rybchenko "Port %d can not be set back to stopped\n", 302761a3b0e5SAndrew Rybchenko pi); 3028ce8d5614SIntel continue; 3029ce8d5614SIntel } 3030ce8d5614SIntel 3031eac341d3SJoyce Kong if (port->port_status == RTE_PORT_HANDLING) 3032eac341d3SJoyce Kong port->port_status = RTE_PORT_STARTED; 3033eac341d3SJoyce Kong else 303461a3b0e5SAndrew Rybchenko fprintf(stderr, "Port %d can not be set into started\n", 303561a3b0e5SAndrew Rybchenko pi); 3036ce8d5614SIntel 30375ffc4a2aSYuying Zhang if (eth_macaddr_get_print_err(pi, &port->eth_addr) == 0) 3038c2c4f87bSAman Deep Singh printf("Port %d: " RTE_ETHER_ADDR_PRT_FMT "\n", pi, 3039a7db3afcSAman Deep Singh RTE_ETHER_ADDR_BYTES(&port->eth_addr)); 3040d8c89163SZijie Pan 3041ce8d5614SIntel /* at least one port started, need checking link status */ 3042ce8d5614SIntel need_check_link_status = 1; 304301817b10SBing Zhao 304401817b10SBing Zhao pl[cfg_pi++] = pi; 3045ce8d5614SIntel } 3046ce8d5614SIntel 304792d2703eSMichael Qiu if (need_check_link_status == 1 && !no_link_check) 3048edab33b1STetsuya Mukawa check_all_ports_link_status(RTE_PORT_ALL); 304992d2703eSMichael Qiu else if (need_check_link_status == 0) 305061a3b0e5SAndrew Rybchenko fprintf(stderr, "Please stop the ports first\n"); 3051ce8d5614SIntel 305201817b10SBing Zhao if (hairpin_mode & 0xf) { 305301817b10SBing Zhao uint16_t i; 305401817b10SBing Zhao int j; 305501817b10SBing Zhao 305601817b10SBing Zhao /* bind all started hairpin ports */ 305701817b10SBing Zhao for (i = 0; i < cfg_pi; i++) { 305801817b10SBing Zhao pi = pl[i]; 305901817b10SBing Zhao /* bind current Tx to all peer Rx */ 306001817b10SBing Zhao peer_pi = rte_eth_hairpin_get_peer_ports(pi, peer_pl, 306101817b10SBing Zhao RTE_MAX_ETHPORTS, 1); 306201817b10SBing Zhao if (peer_pi < 0) 306301817b10SBing Zhao return peer_pi; 306401817b10SBing Zhao for (j = 0; j < peer_pi; j++) { 306501817b10SBing Zhao if (!port_is_started(peer_pl[j])) 306601817b10SBing Zhao continue; 306701817b10SBing Zhao diag = rte_eth_hairpin_bind(pi, peer_pl[j]); 306801817b10SBing Zhao if (diag < 0) { 306961a3b0e5SAndrew Rybchenko fprintf(stderr, 307061a3b0e5SAndrew Rybchenko "Error during binding hairpin Tx port %u to %u: %s\n", 307101817b10SBing Zhao pi, peer_pl[j], 307201817b10SBing Zhao rte_strerror(-diag)); 307301817b10SBing Zhao return -1; 307401817b10SBing Zhao } 307501817b10SBing Zhao } 307601817b10SBing Zhao /* bind all peer Tx to current Rx */ 307701817b10SBing Zhao peer_pi = rte_eth_hairpin_get_peer_ports(pi, peer_pl, 307801817b10SBing Zhao RTE_MAX_ETHPORTS, 0); 307901817b10SBing Zhao if (peer_pi < 0) 308001817b10SBing Zhao return peer_pi; 308101817b10SBing Zhao for (j = 0; j < peer_pi; j++) { 308201817b10SBing Zhao if (!port_is_started(peer_pl[j])) 308301817b10SBing Zhao continue; 308401817b10SBing Zhao diag = rte_eth_hairpin_bind(peer_pl[j], pi); 308501817b10SBing Zhao if (diag < 0) { 308661a3b0e5SAndrew Rybchenko fprintf(stderr, 308761a3b0e5SAndrew Rybchenko "Error during binding hairpin Tx port %u to %u: %s\n", 308801817b10SBing Zhao peer_pl[j], pi, 308901817b10SBing Zhao rte_strerror(-diag)); 309001817b10SBing Zhao return -1; 309101817b10SBing Zhao } 309201817b10SBing Zhao } 309301817b10SBing Zhao } 309401817b10SBing Zhao } 309501817b10SBing Zhao 309663b72657SIvan Ilchenko fill_xstats_display_info_for_port(pid); 309763b72657SIvan Ilchenko 3098ce8d5614SIntel printf("Done\n"); 3099148f963fSBruce Richardson return 0; 3100ce8d5614SIntel } 3101ce8d5614SIntel 3102ce8d5614SIntel void 3103ce8d5614SIntel stop_port(portid_t pid) 3104ce8d5614SIntel { 3105ce8d5614SIntel portid_t pi; 3106ce8d5614SIntel struct rte_port *port; 3107ce8d5614SIntel int need_check_link_status = 0; 310801817b10SBing Zhao portid_t peer_pl[RTE_MAX_ETHPORTS]; 310901817b10SBing Zhao int peer_pi; 3110ce8d5614SIntel 31114468635fSMichael Qiu if (port_id_is_invalid(pid, ENABLED_WARN)) 31124468635fSMichael Qiu return; 31134468635fSMichael Qiu 3114ce8d5614SIntel printf("Stopping ports...\n"); 3115ce8d5614SIntel 31167d89b261SGaetan Rivet RTE_ETH_FOREACH_DEV(pi) { 31174468635fSMichael Qiu if (pid != pi && pid != (portid_t)RTE_PORT_ALL) 3118ce8d5614SIntel continue; 3119ce8d5614SIntel 3120a8ef3e3aSBernard Iremonger if (port_is_forwarding(pi) != 0 && test_done == 0) { 312161a3b0e5SAndrew Rybchenko fprintf(stderr, 312261a3b0e5SAndrew Rybchenko "Please remove port %d from forwarding configuration.\n", 312361a3b0e5SAndrew Rybchenko pi); 3124a8ef3e3aSBernard Iremonger continue; 3125a8ef3e3aSBernard Iremonger } 3126a8ef3e3aSBernard Iremonger 31270e545d30SBernard Iremonger if (port_is_bonding_slave(pi)) { 312861a3b0e5SAndrew Rybchenko fprintf(stderr, 312961a3b0e5SAndrew Rybchenko "Please remove port %d from bonded device.\n", 313061a3b0e5SAndrew Rybchenko pi); 31310e545d30SBernard Iremonger continue; 31320e545d30SBernard Iremonger } 31330e545d30SBernard Iremonger 3134ce8d5614SIntel port = &ports[pi]; 3135eac341d3SJoyce Kong if (port->port_status == RTE_PORT_STARTED) 3136eac341d3SJoyce Kong port->port_status = RTE_PORT_HANDLING; 3137eac341d3SJoyce Kong else 3138ce8d5614SIntel continue; 3139ce8d5614SIntel 314001817b10SBing Zhao if (hairpin_mode & 0xf) { 314101817b10SBing Zhao int j; 314201817b10SBing Zhao 314301817b10SBing Zhao rte_eth_hairpin_unbind(pi, RTE_MAX_ETHPORTS); 314401817b10SBing Zhao /* unbind all peer Tx from current Rx */ 314501817b10SBing Zhao peer_pi = rte_eth_hairpin_get_peer_ports(pi, peer_pl, 314601817b10SBing Zhao RTE_MAX_ETHPORTS, 0); 314701817b10SBing Zhao if (peer_pi < 0) 314801817b10SBing Zhao continue; 314901817b10SBing Zhao for (j = 0; j < peer_pi; j++) { 315001817b10SBing Zhao if (!port_is_started(peer_pl[j])) 315101817b10SBing Zhao continue; 315201817b10SBing Zhao rte_eth_hairpin_unbind(peer_pl[j], pi); 315301817b10SBing Zhao } 315401817b10SBing Zhao } 315501817b10SBing Zhao 31560f93edbfSGregory Etelson if (port->flow_list) 31570f93edbfSGregory Etelson port_flow_flush(pi); 31580f93edbfSGregory Etelson 3159a550baf2SMin Hu (Connor) if (eth_dev_stop_mp(pi) != 0) 3160e62c5a12SIvan Ilchenko RTE_LOG(ERR, EAL, "rte_eth_dev_stop failed for port %u\n", 3161e62c5a12SIvan Ilchenko pi); 3162ce8d5614SIntel 3163eac341d3SJoyce Kong if (port->port_status == RTE_PORT_HANDLING) 3164eac341d3SJoyce Kong port->port_status = RTE_PORT_STOPPED; 3165eac341d3SJoyce Kong else 316661a3b0e5SAndrew Rybchenko fprintf(stderr, "Port %d can not be set into stopped\n", 316761a3b0e5SAndrew Rybchenko pi); 3168ce8d5614SIntel need_check_link_status = 1; 3169ce8d5614SIntel } 3170bc202406SDavid Marchand if (need_check_link_status && !no_link_check) 3171edab33b1STetsuya Mukawa check_all_ports_link_status(RTE_PORT_ALL); 3172ce8d5614SIntel 3173ce8d5614SIntel printf("Done\n"); 3174ce8d5614SIntel } 3175ce8d5614SIntel 3176ce6959bfSWisam Jaddo static void 31774f1de450SThomas Monjalon remove_invalid_ports_in(portid_t *array, portid_t *total) 3178ce6959bfSWisam Jaddo { 31794f1de450SThomas Monjalon portid_t i; 31804f1de450SThomas Monjalon portid_t new_total = 0; 3181ce6959bfSWisam Jaddo 31824f1de450SThomas Monjalon for (i = 0; i < *total; i++) 31834f1de450SThomas Monjalon if (!port_id_is_invalid(array[i], DISABLED_WARN)) { 31844f1de450SThomas Monjalon array[new_total] = array[i]; 31854f1de450SThomas Monjalon new_total++; 3186ce6959bfSWisam Jaddo } 31874f1de450SThomas Monjalon *total = new_total; 31884f1de450SThomas Monjalon } 31894f1de450SThomas Monjalon 31904f1de450SThomas Monjalon static void 31914f1de450SThomas Monjalon remove_invalid_ports(void) 31924f1de450SThomas Monjalon { 31934f1de450SThomas Monjalon remove_invalid_ports_in(ports_ids, &nb_ports); 31944f1de450SThomas Monjalon remove_invalid_ports_in(fwd_ports_ids, &nb_fwd_ports); 31954f1de450SThomas Monjalon nb_cfg_ports = nb_fwd_ports; 3196ce6959bfSWisam Jaddo } 3197ce6959bfSWisam Jaddo 31983889a322SHuisong Li static void 31994b27989dSDmitry Kozlyuk flush_port_owned_resources(portid_t pi) 32004b27989dSDmitry Kozlyuk { 32014b27989dSDmitry Kozlyuk mcast_addr_pool_destroy(pi); 32024b27989dSDmitry Kozlyuk port_flow_flush(pi); 32034b27989dSDmitry Kozlyuk port_flex_item_flush(pi); 32044b27989dSDmitry Kozlyuk port_action_handle_flush(pi); 32054b27989dSDmitry Kozlyuk } 32064b27989dSDmitry Kozlyuk 32074b27989dSDmitry Kozlyuk static void 32083889a322SHuisong Li clear_bonding_slave_device(portid_t *slave_pids, uint16_t num_slaves) 32093889a322SHuisong Li { 32103889a322SHuisong Li struct rte_port *port; 32113889a322SHuisong Li portid_t slave_pid; 32123889a322SHuisong Li uint16_t i; 32133889a322SHuisong Li 32143889a322SHuisong Li for (i = 0; i < num_slaves; i++) { 32153889a322SHuisong Li slave_pid = slave_pids[i]; 32163889a322SHuisong Li if (port_is_started(slave_pid) == 1) { 32173889a322SHuisong Li if (rte_eth_dev_stop(slave_pid) != 0) 32183889a322SHuisong Li fprintf(stderr, "rte_eth_dev_stop failed for port %u\n", 32193889a322SHuisong Li slave_pid); 32203889a322SHuisong Li 32213889a322SHuisong Li port = &ports[slave_pid]; 32223889a322SHuisong Li port->port_status = RTE_PORT_STOPPED; 32233889a322SHuisong Li } 32243889a322SHuisong Li 32253889a322SHuisong Li clear_port_slave_flag(slave_pid); 32263889a322SHuisong Li 32273889a322SHuisong Li /* Close slave device when testpmd quit or is killed. */ 32283889a322SHuisong Li if (cl_quit == 1 || f_quit == 1) 32293889a322SHuisong Li rte_eth_dev_close(slave_pid); 32303889a322SHuisong Li } 32313889a322SHuisong Li } 32323889a322SHuisong Li 3233ce8d5614SIntel void 3234ce8d5614SIntel close_port(portid_t pid) 3235ce8d5614SIntel { 3236ce8d5614SIntel portid_t pi; 3237ce8d5614SIntel struct rte_port *port; 32383889a322SHuisong Li portid_t slave_pids[RTE_MAX_ETHPORTS]; 32393889a322SHuisong Li int num_slaves = 0; 3240ce8d5614SIntel 32414468635fSMichael Qiu if (port_id_is_invalid(pid, ENABLED_WARN)) 32424468635fSMichael Qiu return; 32434468635fSMichael Qiu 3244ce8d5614SIntel printf("Closing ports...\n"); 3245ce8d5614SIntel 32467d89b261SGaetan Rivet RTE_ETH_FOREACH_DEV(pi) { 32474468635fSMichael Qiu if (pid != pi && pid != (portid_t)RTE_PORT_ALL) 3248ce8d5614SIntel continue; 3249ce8d5614SIntel 3250a8ef3e3aSBernard Iremonger if (port_is_forwarding(pi) != 0 && test_done == 0) { 325161a3b0e5SAndrew Rybchenko fprintf(stderr, 325261a3b0e5SAndrew Rybchenko "Please remove port %d from forwarding configuration.\n", 325361a3b0e5SAndrew Rybchenko pi); 3254a8ef3e3aSBernard Iremonger continue; 3255a8ef3e3aSBernard Iremonger } 3256a8ef3e3aSBernard Iremonger 32570e545d30SBernard Iremonger if (port_is_bonding_slave(pi)) { 325861a3b0e5SAndrew Rybchenko fprintf(stderr, 325961a3b0e5SAndrew Rybchenko "Please remove port %d from bonded device.\n", 326061a3b0e5SAndrew Rybchenko pi); 32610e545d30SBernard Iremonger continue; 32620e545d30SBernard Iremonger } 32630e545d30SBernard Iremonger 3264ce8d5614SIntel port = &ports[pi]; 3265eac341d3SJoyce Kong if (port->port_status == RTE_PORT_CLOSED) { 326661a3b0e5SAndrew Rybchenko fprintf(stderr, "Port %d is already closed\n", pi); 3267d4e8ad64SMichael Qiu continue; 3268d4e8ad64SMichael Qiu } 3269d4e8ad64SMichael Qiu 3270a550baf2SMin Hu (Connor) if (is_proc_primary()) { 32714b27989dSDmitry Kozlyuk flush_port_owned_resources(pi); 32723889a322SHuisong Li #ifdef RTE_NET_BOND 32733889a322SHuisong Li if (port->bond_flag == 1) 32743889a322SHuisong Li num_slaves = rte_eth_bond_slaves_get(pi, 32753889a322SHuisong Li slave_pids, RTE_MAX_ETHPORTS); 32763889a322SHuisong Li #endif 3277ce8d5614SIntel rte_eth_dev_close(pi); 32783889a322SHuisong Li /* 32793889a322SHuisong Li * If this port is bonded device, all slaves under the 32803889a322SHuisong Li * device need to be removed or closed. 32813889a322SHuisong Li */ 32823889a322SHuisong Li if (port->bond_flag == 1 && num_slaves > 0) 32833889a322SHuisong Li clear_bonding_slave_device(slave_pids, 32843889a322SHuisong Li num_slaves); 3285ce8d5614SIntel } 328663b72657SIvan Ilchenko 328763b72657SIvan Ilchenko free_xstats_display_info(pi); 3288a550baf2SMin Hu (Connor) } 3289ce8d5614SIntel 329085c6571cSThomas Monjalon remove_invalid_ports(); 3291ce8d5614SIntel printf("Done\n"); 3292ce8d5614SIntel } 3293ce8d5614SIntel 3294edab33b1STetsuya Mukawa void 329597f1e196SWei Dai reset_port(portid_t pid) 329697f1e196SWei Dai { 329797f1e196SWei Dai int diag; 329897f1e196SWei Dai portid_t pi; 329997f1e196SWei Dai struct rte_port *port; 330097f1e196SWei Dai 330197f1e196SWei Dai if (port_id_is_invalid(pid, ENABLED_WARN)) 330297f1e196SWei Dai return; 330397f1e196SWei Dai 33041cde1b9aSShougang Wang if ((pid == (portid_t)RTE_PORT_ALL && !all_ports_stopped()) || 33051cde1b9aSShougang Wang (pid != (portid_t)RTE_PORT_ALL && !port_is_stopped(pid))) { 330661a3b0e5SAndrew Rybchenko fprintf(stderr, 330761a3b0e5SAndrew Rybchenko "Can not reset port(s), please stop port(s) first.\n"); 33081cde1b9aSShougang Wang return; 33091cde1b9aSShougang Wang } 33101cde1b9aSShougang Wang 331197f1e196SWei Dai printf("Resetting ports...\n"); 331297f1e196SWei Dai 331397f1e196SWei Dai RTE_ETH_FOREACH_DEV(pi) { 331497f1e196SWei Dai if (pid != pi && pid != (portid_t)RTE_PORT_ALL) 331597f1e196SWei Dai continue; 331697f1e196SWei Dai 331797f1e196SWei Dai if (port_is_forwarding(pi) != 0 && test_done == 0) { 331861a3b0e5SAndrew Rybchenko fprintf(stderr, 331961a3b0e5SAndrew Rybchenko "Please remove port %d from forwarding configuration.\n", 332061a3b0e5SAndrew Rybchenko pi); 332197f1e196SWei Dai continue; 332297f1e196SWei Dai } 332397f1e196SWei Dai 332497f1e196SWei Dai if (port_is_bonding_slave(pi)) { 332561a3b0e5SAndrew Rybchenko fprintf(stderr, 332661a3b0e5SAndrew Rybchenko "Please remove port %d from bonded device.\n", 332797f1e196SWei Dai pi); 332897f1e196SWei Dai continue; 332997f1e196SWei Dai } 333097f1e196SWei Dai 333197f1e196SWei Dai diag = rte_eth_dev_reset(pi); 333297f1e196SWei Dai if (diag == 0) { 333397f1e196SWei Dai port = &ports[pi]; 333497f1e196SWei Dai port->need_reconfig = 1; 333597f1e196SWei Dai port->need_reconfig_queues = 1; 333697f1e196SWei Dai } else { 333761a3b0e5SAndrew Rybchenko fprintf(stderr, "Failed to reset port %d. diag=%d\n", 333861a3b0e5SAndrew Rybchenko pi, diag); 333997f1e196SWei Dai } 334097f1e196SWei Dai } 334197f1e196SWei Dai 334297f1e196SWei Dai printf("Done\n"); 334397f1e196SWei Dai } 334497f1e196SWei Dai 334597f1e196SWei Dai void 3346edab33b1STetsuya Mukawa attach_port(char *identifier) 3347ce8d5614SIntel { 33484f1ed78eSThomas Monjalon portid_t pi; 3349c9cce428SThomas Monjalon struct rte_dev_iterator iterator; 3350ce8d5614SIntel 3351edab33b1STetsuya Mukawa printf("Attaching a new port...\n"); 3352edab33b1STetsuya Mukawa 3353edab33b1STetsuya Mukawa if (identifier == NULL) { 335461a3b0e5SAndrew Rybchenko fprintf(stderr, "Invalid parameters are specified\n"); 3355edab33b1STetsuya Mukawa return; 3356ce8d5614SIntel } 3357ce8d5614SIntel 335875b66decSIlya Maximets if (rte_dev_probe(identifier) < 0) { 3359c9cce428SThomas Monjalon TESTPMD_LOG(ERR, "Failed to attach port %s\n", identifier); 3360edab33b1STetsuya Mukawa return; 3361c9cce428SThomas Monjalon } 3362c9cce428SThomas Monjalon 33634f1ed78eSThomas Monjalon /* first attach mode: event */ 33644f1ed78eSThomas Monjalon if (setup_on_probe_event) { 33654f1ed78eSThomas Monjalon /* new ports are detected on RTE_ETH_EVENT_NEW event */ 33664f1ed78eSThomas Monjalon for (pi = 0; pi < RTE_MAX_ETHPORTS; pi++) 33674f1ed78eSThomas Monjalon if (ports[pi].port_status == RTE_PORT_HANDLING && 33684f1ed78eSThomas Monjalon ports[pi].need_setup != 0) 33694f1ed78eSThomas Monjalon setup_attached_port(pi); 33704f1ed78eSThomas Monjalon return; 33714f1ed78eSThomas Monjalon } 33724f1ed78eSThomas Monjalon 33734f1ed78eSThomas Monjalon /* second attach mode: iterator */ 337486fa5de1SThomas Monjalon RTE_ETH_FOREACH_MATCHING_DEV(pi, identifier, &iterator) { 33754f1ed78eSThomas Monjalon /* setup ports matching the devargs used for probing */ 337686fa5de1SThomas Monjalon if (port_is_forwarding(pi)) 337786fa5de1SThomas Monjalon continue; /* port was already attached before */ 3378c9cce428SThomas Monjalon setup_attached_port(pi); 3379c9cce428SThomas Monjalon } 338086fa5de1SThomas Monjalon } 3381c9cce428SThomas Monjalon 3382c9cce428SThomas Monjalon static void 3383c9cce428SThomas Monjalon setup_attached_port(portid_t pi) 3384c9cce428SThomas Monjalon { 3385c9cce428SThomas Monjalon unsigned int socket_id; 338634fc1051SIvan Ilchenko int ret; 3387edab33b1STetsuya Mukawa 3388931126baSBernard Iremonger socket_id = (unsigned)rte_eth_dev_socket_id(pi); 338929841336SPhil Yang /* if socket_id is invalid, set to the first available socket. */ 3390931126baSBernard Iremonger if (check_socket_id(socket_id) < 0) 339129841336SPhil Yang socket_id = socket_ids[0]; 3392931126baSBernard Iremonger reconfig(pi, socket_id); 339334fc1051SIvan Ilchenko ret = rte_eth_promiscuous_enable(pi); 339434fc1051SIvan Ilchenko if (ret != 0) 339561a3b0e5SAndrew Rybchenko fprintf(stderr, 339661a3b0e5SAndrew Rybchenko "Error during enabling promiscuous mode for port %u: %s - ignore\n", 339734fc1051SIvan Ilchenko pi, rte_strerror(-ret)); 3398edab33b1STetsuya Mukawa 33994f1de450SThomas Monjalon ports_ids[nb_ports++] = pi; 34004f1de450SThomas Monjalon fwd_ports_ids[nb_fwd_ports++] = pi; 34014f1de450SThomas Monjalon nb_cfg_ports = nb_fwd_ports; 34024f1ed78eSThomas Monjalon ports[pi].need_setup = 0; 3403edab33b1STetsuya Mukawa ports[pi].port_status = RTE_PORT_STOPPED; 3404edab33b1STetsuya Mukawa 3405edab33b1STetsuya Mukawa printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports); 3406edab33b1STetsuya Mukawa printf("Done\n"); 3407edab33b1STetsuya Mukawa } 3408edab33b1STetsuya Mukawa 34090654d4a8SThomas Monjalon static void 34100654d4a8SThomas Monjalon detach_device(struct rte_device *dev) 34115f4ec54fSChen Jing D(Mark) { 3412f8e5baa2SThomas Monjalon portid_t sibling; 3413f8e5baa2SThomas Monjalon 3414f8e5baa2SThomas Monjalon if (dev == NULL) { 341561a3b0e5SAndrew Rybchenko fprintf(stderr, "Device already removed\n"); 3416f8e5baa2SThomas Monjalon return; 3417f8e5baa2SThomas Monjalon } 3418f8e5baa2SThomas Monjalon 34190654d4a8SThomas Monjalon printf("Removing a device...\n"); 3420938a184aSAdrien Mazarguil 34212a449871SThomas Monjalon RTE_ETH_FOREACH_DEV_OF(sibling, dev) { 34222a449871SThomas Monjalon if (ports[sibling].port_status != RTE_PORT_CLOSED) { 34232a449871SThomas Monjalon if (ports[sibling].port_status != RTE_PORT_STOPPED) { 342461a3b0e5SAndrew Rybchenko fprintf(stderr, "Port %u not stopped\n", 342561a3b0e5SAndrew Rybchenko sibling); 34262a449871SThomas Monjalon return; 34272a449871SThomas Monjalon } 34284b27989dSDmitry Kozlyuk flush_port_owned_resources(sibling); 34292a449871SThomas Monjalon } 34302a449871SThomas Monjalon } 34312a449871SThomas Monjalon 343275b66decSIlya Maximets if (rte_dev_remove(dev) < 0) { 3433ec5ecd7eSDavid Marchand TESTPMD_LOG(ERR, "Failed to detach device %s\n", rte_dev_name(dev)); 3434edab33b1STetsuya Mukawa return; 34353070419eSGaetan Rivet } 34364f1de450SThomas Monjalon remove_invalid_ports(); 343703ce2c53SMatan Azrad 34380654d4a8SThomas Monjalon printf("Device is detached\n"); 3439f8e5baa2SThomas Monjalon printf("Now total ports is %d\n", nb_ports); 3440edab33b1STetsuya Mukawa printf("Done\n"); 3441edab33b1STetsuya Mukawa return; 34425f4ec54fSChen Jing D(Mark) } 34435f4ec54fSChen Jing D(Mark) 3444af75078fSIntel void 34450654d4a8SThomas Monjalon detach_port_device(portid_t port_id) 34460654d4a8SThomas Monjalon { 34470a0821bcSPaulis Gributs int ret; 34480a0821bcSPaulis Gributs struct rte_eth_dev_info dev_info; 34490a0821bcSPaulis Gributs 34500654d4a8SThomas Monjalon if (port_id_is_invalid(port_id, ENABLED_WARN)) 34510654d4a8SThomas Monjalon return; 34520654d4a8SThomas Monjalon 34530654d4a8SThomas Monjalon if (ports[port_id].port_status != RTE_PORT_CLOSED) { 34540654d4a8SThomas Monjalon if (ports[port_id].port_status != RTE_PORT_STOPPED) { 345561a3b0e5SAndrew Rybchenko fprintf(stderr, "Port not stopped\n"); 34560654d4a8SThomas Monjalon return; 34570654d4a8SThomas Monjalon } 345861a3b0e5SAndrew Rybchenko fprintf(stderr, "Port was not closed\n"); 34590654d4a8SThomas Monjalon } 34600654d4a8SThomas Monjalon 34610a0821bcSPaulis Gributs ret = eth_dev_info_get_print_err(port_id, &dev_info); 34620a0821bcSPaulis Gributs if (ret != 0) { 34630a0821bcSPaulis Gributs TESTPMD_LOG(ERR, 34640a0821bcSPaulis Gributs "Failed to get device info for port %d, not detaching\n", 34650a0821bcSPaulis Gributs port_id); 34660a0821bcSPaulis Gributs return; 34670a0821bcSPaulis Gributs } 34680a0821bcSPaulis Gributs detach_device(dev_info.device); 34690654d4a8SThomas Monjalon } 34700654d4a8SThomas Monjalon 34710654d4a8SThomas Monjalon void 34725edee5f6SThomas Monjalon detach_devargs(char *identifier) 347355e51c96SNithin Dabilpuram { 347455e51c96SNithin Dabilpuram struct rte_dev_iterator iterator; 347555e51c96SNithin Dabilpuram struct rte_devargs da; 347655e51c96SNithin Dabilpuram portid_t port_id; 347755e51c96SNithin Dabilpuram 347855e51c96SNithin Dabilpuram printf("Removing a device...\n"); 347955e51c96SNithin Dabilpuram 348055e51c96SNithin Dabilpuram memset(&da, 0, sizeof(da)); 348155e51c96SNithin Dabilpuram if (rte_devargs_parsef(&da, "%s", identifier)) { 348261a3b0e5SAndrew Rybchenko fprintf(stderr, "cannot parse identifier\n"); 348355e51c96SNithin Dabilpuram return; 348455e51c96SNithin Dabilpuram } 348555e51c96SNithin Dabilpuram 348655e51c96SNithin Dabilpuram RTE_ETH_FOREACH_MATCHING_DEV(port_id, identifier, &iterator) { 348755e51c96SNithin Dabilpuram if (ports[port_id].port_status != RTE_PORT_CLOSED) { 348855e51c96SNithin Dabilpuram if (ports[port_id].port_status != RTE_PORT_STOPPED) { 348961a3b0e5SAndrew Rybchenko fprintf(stderr, "Port %u not stopped\n", 349061a3b0e5SAndrew Rybchenko port_id); 3491149677c9SStephen Hemminger rte_eth_iterator_cleanup(&iterator); 349264051bb1SXueming Li rte_devargs_reset(&da); 349355e51c96SNithin Dabilpuram return; 349455e51c96SNithin Dabilpuram } 34954b27989dSDmitry Kozlyuk flush_port_owned_resources(port_id); 349655e51c96SNithin Dabilpuram } 349755e51c96SNithin Dabilpuram } 349855e51c96SNithin Dabilpuram 3499148c51a3SDavid Marchand if (rte_eal_hotplug_remove(rte_bus_name(da.bus), da.name) != 0) { 350055e51c96SNithin Dabilpuram TESTPMD_LOG(ERR, "Failed to detach device %s(%s)\n", 3501148c51a3SDavid Marchand da.name, rte_bus_name(da.bus)); 350264051bb1SXueming Li rte_devargs_reset(&da); 350355e51c96SNithin Dabilpuram return; 350455e51c96SNithin Dabilpuram } 350555e51c96SNithin Dabilpuram 350655e51c96SNithin Dabilpuram remove_invalid_ports(); 350755e51c96SNithin Dabilpuram 350855e51c96SNithin Dabilpuram printf("Device %s is detached\n", identifier); 350955e51c96SNithin Dabilpuram printf("Now total ports is %d\n", nb_ports); 351055e51c96SNithin Dabilpuram printf("Done\n"); 351164051bb1SXueming Li rte_devargs_reset(&da); 351255e51c96SNithin Dabilpuram } 351355e51c96SNithin Dabilpuram 351455e51c96SNithin Dabilpuram void 3515af75078fSIntel pmd_test_exit(void) 3516af75078fSIntel { 3517af75078fSIntel portid_t pt_id; 351826cbb419SViacheslav Ovsiienko unsigned int i; 3519fb73e096SJeff Guo int ret; 3520af75078fSIntel 35218210ec25SPablo de Lara if (test_done == 0) 35228210ec25SPablo de Lara stop_packet_forwarding(); 35238210ec25SPablo de Lara 3524761f7ae1SJie Zhou #ifndef RTE_EXEC_ENV_WINDOWS 352526cbb419SViacheslav Ovsiienko for (i = 0 ; i < RTE_DIM(mempools) ; i++) { 35263a0968c8SShahaf Shuler if (mempools[i]) { 35273a0968c8SShahaf Shuler if (mp_alloc_type == MP_ALLOC_ANON) 35283a0968c8SShahaf Shuler rte_mempool_mem_iter(mempools[i], dma_unmap_cb, 35293a0968c8SShahaf Shuler NULL); 35303a0968c8SShahaf Shuler } 35313a0968c8SShahaf Shuler } 3532761f7ae1SJie Zhou #endif 3533d3a274ceSZhihong Wang if (ports != NULL) { 3534d3a274ceSZhihong Wang no_link_check = 1; 35357d89b261SGaetan Rivet RTE_ETH_FOREACH_DEV(pt_id) { 353608fd782bSCristian Dumitrescu printf("\nStopping port %d...\n", pt_id); 3537af75078fSIntel fflush(stdout); 3538d3a274ceSZhihong Wang stop_port(pt_id); 353908fd782bSCristian Dumitrescu } 354008fd782bSCristian Dumitrescu RTE_ETH_FOREACH_DEV(pt_id) { 354108fd782bSCristian Dumitrescu printf("\nShutting down port %d...\n", pt_id); 354208fd782bSCristian Dumitrescu fflush(stdout); 3543d3a274ceSZhihong Wang close_port(pt_id); 3544af75078fSIntel } 3545d3a274ceSZhihong Wang } 3546fb73e096SJeff Guo 3547fb73e096SJeff Guo if (hot_plug) { 3548fb73e096SJeff Guo ret = rte_dev_event_monitor_stop(); 35492049c511SJeff Guo if (ret) { 3550fb73e096SJeff Guo RTE_LOG(ERR, EAL, 3551fb73e096SJeff Guo "fail to stop device event monitor."); 35522049c511SJeff Guo return; 35532049c511SJeff Guo } 3554fb73e096SJeff Guo 35552049c511SJeff Guo ret = rte_dev_event_callback_unregister(NULL, 3556cc1bf307SJeff Guo dev_event_callback, NULL); 35572049c511SJeff Guo if (ret < 0) { 3558fb73e096SJeff Guo RTE_LOG(ERR, EAL, 35592049c511SJeff Guo "fail to unregister device event callback.\n"); 35602049c511SJeff Guo return; 35612049c511SJeff Guo } 35622049c511SJeff Guo 35632049c511SJeff Guo ret = rte_dev_hotplug_handle_disable(); 35642049c511SJeff Guo if (ret) { 35652049c511SJeff Guo RTE_LOG(ERR, EAL, 35662049c511SJeff Guo "fail to disable hotplug handling.\n"); 35672049c511SJeff Guo return; 35682049c511SJeff Guo } 3569fb73e096SJeff Guo } 357026cbb419SViacheslav Ovsiienko for (i = 0 ; i < RTE_DIM(mempools) ; i++) { 3571401b744dSShahaf Shuler if (mempools[i]) 3572a550baf2SMin Hu (Connor) mempool_free_mp(mempools[i]); 3573401b744dSShahaf Shuler } 357463b72657SIvan Ilchenko free(xstats_display); 3575fb73e096SJeff Guo 3576d3a274ceSZhihong Wang printf("\nBye...\n"); 3577af75078fSIntel } 3578af75078fSIntel 3579af75078fSIntel typedef void (*cmd_func_t)(void); 3580af75078fSIntel struct pmd_test_command { 3581af75078fSIntel const char *cmd_name; 3582af75078fSIntel cmd_func_t cmd_func; 3583af75078fSIntel }; 3584af75078fSIntel 3585ce8d5614SIntel /* Check the link status of all ports in up to 9s, and print them finally */ 3586af75078fSIntel static void 3587edab33b1STetsuya Mukawa check_all_ports_link_status(uint32_t port_mask) 3588af75078fSIntel { 3589ce8d5614SIntel #define CHECK_INTERVAL 100 /* 100ms */ 3590ce8d5614SIntel #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */ 3591f8244c63SZhiyong Yang portid_t portid; 3592f8244c63SZhiyong Yang uint8_t count, all_ports_up, print_flag = 0; 3593ce8d5614SIntel struct rte_eth_link link; 3594e661a08bSIgor Romanov int ret; 3595ba5509a6SIvan Dyukov char link_status[RTE_ETH_LINK_MAX_STR_LEN]; 3596ce8d5614SIntel 3597ce8d5614SIntel printf("Checking link statuses...\n"); 3598ce8d5614SIntel fflush(stdout); 3599ce8d5614SIntel for (count = 0; count <= MAX_CHECK_TIME; count++) { 3600ce8d5614SIntel all_ports_up = 1; 36017d89b261SGaetan Rivet RTE_ETH_FOREACH_DEV(portid) { 3602ce8d5614SIntel if ((port_mask & (1 << portid)) == 0) 3603ce8d5614SIntel continue; 3604ce8d5614SIntel memset(&link, 0, sizeof(link)); 3605e661a08bSIgor Romanov ret = rte_eth_link_get_nowait(portid, &link); 3606e661a08bSIgor Romanov if (ret < 0) { 3607e661a08bSIgor Romanov all_ports_up = 0; 3608e661a08bSIgor Romanov if (print_flag == 1) 360961a3b0e5SAndrew Rybchenko fprintf(stderr, 361061a3b0e5SAndrew Rybchenko "Port %u link get failed: %s\n", 3611e661a08bSIgor Romanov portid, rte_strerror(-ret)); 3612e661a08bSIgor Romanov continue; 3613e661a08bSIgor Romanov } 3614ce8d5614SIntel /* print link status if flag set */ 3615ce8d5614SIntel if (print_flag == 1) { 3616ba5509a6SIvan Dyukov rte_eth_link_to_str(link_status, 3617ba5509a6SIvan Dyukov sizeof(link_status), &link); 3618ba5509a6SIvan Dyukov printf("Port %d %s\n", portid, link_status); 3619ce8d5614SIntel continue; 3620ce8d5614SIntel } 3621ce8d5614SIntel /* clear all_ports_up flag if any link down */ 3622295968d1SFerruh Yigit if (link.link_status == RTE_ETH_LINK_DOWN) { 3623ce8d5614SIntel all_ports_up = 0; 3624ce8d5614SIntel break; 3625ce8d5614SIntel } 3626ce8d5614SIntel } 3627ce8d5614SIntel /* after finally printing all link status, get out */ 3628ce8d5614SIntel if (print_flag == 1) 3629ce8d5614SIntel break; 3630ce8d5614SIntel 3631ce8d5614SIntel if (all_ports_up == 0) { 3632ce8d5614SIntel fflush(stdout); 3633ce8d5614SIntel rte_delay_ms(CHECK_INTERVAL); 3634ce8d5614SIntel } 3635ce8d5614SIntel 3636ce8d5614SIntel /* set the print_flag if all ports up or timeout */ 3637ce8d5614SIntel if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) { 3638ce8d5614SIntel print_flag = 1; 3639ce8d5614SIntel } 36408ea656f8SGaetan Rivet 36418ea656f8SGaetan Rivet if (lsc_interrupt) 36428ea656f8SGaetan Rivet break; 3643ce8d5614SIntel } 3644af75078fSIntel } 3645af75078fSIntel 3646284c908cSGaetan Rivet static void 3647cc1bf307SJeff Guo rmv_port_callback(void *arg) 3648284c908cSGaetan Rivet { 36493b97888aSMatan Azrad int need_to_start = 0; 36500da2a62bSMatan Azrad int org_no_link_check = no_link_check; 365128caa76aSZhiyong Yang portid_t port_id = (intptr_t)arg; 36520a0821bcSPaulis Gributs struct rte_eth_dev_info dev_info; 36530a0821bcSPaulis Gributs int ret; 3654284c908cSGaetan Rivet 3655284c908cSGaetan Rivet RTE_ETH_VALID_PORTID_OR_RET(port_id); 3656284c908cSGaetan Rivet 36573b97888aSMatan Azrad if (!test_done && port_is_forwarding(port_id)) { 36583b97888aSMatan Azrad need_to_start = 1; 36593b97888aSMatan Azrad stop_packet_forwarding(); 36603b97888aSMatan Azrad } 36610da2a62bSMatan Azrad no_link_check = 1; 3662284c908cSGaetan Rivet stop_port(port_id); 36630da2a62bSMatan Azrad no_link_check = org_no_link_check; 36640654d4a8SThomas Monjalon 36650a0821bcSPaulis Gributs ret = eth_dev_info_get_print_err(port_id, &dev_info); 36660a0821bcSPaulis Gributs if (ret != 0) 36670a0821bcSPaulis Gributs TESTPMD_LOG(ERR, 36680a0821bcSPaulis Gributs "Failed to get device info for port %d, not detaching\n", 36690a0821bcSPaulis Gributs port_id); 3670e1d38504SPaulis Gributs else { 3671e1d38504SPaulis Gributs struct rte_device *device = dev_info.device; 3672e1d38504SPaulis Gributs close_port(port_id); 3673e1d38504SPaulis Gributs detach_device(device); /* might be already removed or have more ports */ 3674e1d38504SPaulis Gributs } 36753b97888aSMatan Azrad if (need_to_start) 36763b97888aSMatan Azrad start_packet_forwarding(0); 3677284c908cSGaetan Rivet } 3678284c908cSGaetan Rivet 367976ad4a2dSGaetan Rivet /* This function is used by the interrupt thread */ 3680d6af1a13SBernard Iremonger static int 3681f8244c63SZhiyong Yang eth_event_callback(portid_t port_id, enum rte_eth_event_type type, void *param, 3682d6af1a13SBernard Iremonger void *ret_param) 368376ad4a2dSGaetan Rivet { 368476ad4a2dSGaetan Rivet RTE_SET_USED(param); 3685d6af1a13SBernard Iremonger RTE_SET_USED(ret_param); 368676ad4a2dSGaetan Rivet 368776ad4a2dSGaetan Rivet if (type >= RTE_ETH_EVENT_MAX) { 368861a3b0e5SAndrew Rybchenko fprintf(stderr, 368961a3b0e5SAndrew Rybchenko "\nPort %" PRIu16 ": %s called upon invalid event %d\n", 369076ad4a2dSGaetan Rivet port_id, __func__, type); 369176ad4a2dSGaetan Rivet fflush(stderr); 36923af72783SGaetan Rivet } else if (event_print_mask & (UINT32_C(1) << type)) { 3693f431e010SHerakliusz Lipiec printf("\nPort %" PRIu16 ": %s event\n", port_id, 369497b5d8b5SThomas Monjalon eth_event_desc[type]); 369576ad4a2dSGaetan Rivet fflush(stdout); 369676ad4a2dSGaetan Rivet } 3697284c908cSGaetan Rivet 3698284c908cSGaetan Rivet switch (type) { 36994f1ed78eSThomas Monjalon case RTE_ETH_EVENT_NEW: 37004f1ed78eSThomas Monjalon ports[port_id].need_setup = 1; 37014f1ed78eSThomas Monjalon ports[port_id].port_status = RTE_PORT_HANDLING; 37024f1ed78eSThomas Monjalon break; 3703284c908cSGaetan Rivet case RTE_ETH_EVENT_INTR_RMV: 37044f1ed78eSThomas Monjalon if (port_id_is_invalid(port_id, DISABLED_WARN)) 37054f1ed78eSThomas Monjalon break; 3706284c908cSGaetan Rivet if (rte_eal_alarm_set(100000, 3707cc1bf307SJeff Guo rmv_port_callback, (void *)(intptr_t)port_id)) 370861a3b0e5SAndrew Rybchenko fprintf(stderr, 370961a3b0e5SAndrew Rybchenko "Could not set up deferred device removal\n"); 3710284c908cSGaetan Rivet break; 371185c6571cSThomas Monjalon case RTE_ETH_EVENT_DESTROY: 371285c6571cSThomas Monjalon ports[port_id].port_status = RTE_PORT_CLOSED; 371385c6571cSThomas Monjalon printf("Port %u is closed\n", port_id); 371485c6571cSThomas Monjalon break; 3715bc70e559SSpike Du case RTE_ETH_EVENT_RX_AVAIL_THRESH: { 3716bc70e559SSpike Du uint16_t rxq_id; 3717bc70e559SSpike Du int ret; 3718bc70e559SSpike Du 3719bc70e559SSpike Du /* avail_thresh query API rewinds rxq_id, no need to check max RxQ num */ 3720bc70e559SSpike Du for (rxq_id = 0; ; rxq_id++) { 3721bc70e559SSpike Du ret = rte_eth_rx_avail_thresh_query(port_id, &rxq_id, 3722bc70e559SSpike Du NULL); 3723bc70e559SSpike Du if (ret <= 0) 3724bc70e559SSpike Du break; 3725bc70e559SSpike Du printf("Received avail_thresh event, port: %u, rxq_id: %u\n", 3726bc70e559SSpike Du port_id, rxq_id); 3727f41a5092SSpike Du 3728f41a5092SSpike Du #ifdef RTE_NET_MLX5 3729f41a5092SSpike Du mlx5_test_avail_thresh_event_handler(port_id, rxq_id); 3730f41a5092SSpike Du #endif 3731bc70e559SSpike Du } 3732bc70e559SSpike Du break; 3733bc70e559SSpike Du } 3734284c908cSGaetan Rivet default: 3735284c908cSGaetan Rivet break; 3736284c908cSGaetan Rivet } 3737d6af1a13SBernard Iremonger return 0; 373876ad4a2dSGaetan Rivet } 373976ad4a2dSGaetan Rivet 374097b5d8b5SThomas Monjalon static int 374197b5d8b5SThomas Monjalon register_eth_event_callback(void) 374297b5d8b5SThomas Monjalon { 374397b5d8b5SThomas Monjalon int ret; 374497b5d8b5SThomas Monjalon enum rte_eth_event_type event; 374597b5d8b5SThomas Monjalon 374697b5d8b5SThomas Monjalon for (event = RTE_ETH_EVENT_UNKNOWN; 374797b5d8b5SThomas Monjalon event < RTE_ETH_EVENT_MAX; event++) { 374897b5d8b5SThomas Monjalon ret = rte_eth_dev_callback_register(RTE_ETH_ALL, 374997b5d8b5SThomas Monjalon event, 375097b5d8b5SThomas Monjalon eth_event_callback, 375197b5d8b5SThomas Monjalon NULL); 375297b5d8b5SThomas Monjalon if (ret != 0) { 375397b5d8b5SThomas Monjalon TESTPMD_LOG(ERR, "Failed to register callback for " 375497b5d8b5SThomas Monjalon "%s event\n", eth_event_desc[event]); 375597b5d8b5SThomas Monjalon return -1; 375697b5d8b5SThomas Monjalon } 375797b5d8b5SThomas Monjalon } 375897b5d8b5SThomas Monjalon 375997b5d8b5SThomas Monjalon return 0; 376097b5d8b5SThomas Monjalon } 376197b5d8b5SThomas Monjalon 3762fb73e096SJeff Guo /* This function is used by the interrupt thread */ 3763fb73e096SJeff Guo static void 3764cc1bf307SJeff Guo dev_event_callback(const char *device_name, enum rte_dev_event_type type, 3765fb73e096SJeff Guo __rte_unused void *arg) 3766fb73e096SJeff Guo { 37672049c511SJeff Guo uint16_t port_id; 37682049c511SJeff Guo int ret; 37692049c511SJeff Guo 3770fb73e096SJeff Guo if (type >= RTE_DEV_EVENT_MAX) { 3771fb73e096SJeff Guo fprintf(stderr, "%s called upon invalid event %d\n", 3772fb73e096SJeff Guo __func__, type); 3773fb73e096SJeff Guo fflush(stderr); 3774fb73e096SJeff Guo } 3775fb73e096SJeff Guo 3776fb73e096SJeff Guo switch (type) { 3777fb73e096SJeff Guo case RTE_DEV_EVENT_REMOVE: 3778cc1bf307SJeff Guo RTE_LOG(DEBUG, EAL, "The device: %s has been removed!\n", 3779fb73e096SJeff Guo device_name); 37802049c511SJeff Guo ret = rte_eth_dev_get_port_by_name(device_name, &port_id); 37812049c511SJeff Guo if (ret) { 37822049c511SJeff Guo RTE_LOG(ERR, EAL, "can not get port by device %s!\n", 37832049c511SJeff Guo device_name); 37842049c511SJeff Guo return; 37852049c511SJeff Guo } 3786cc1bf307SJeff Guo /* 3787cc1bf307SJeff Guo * Because the user's callback is invoked in eal interrupt 3788cc1bf307SJeff Guo * callback, the interrupt callback need to be finished before 3789cc1bf307SJeff Guo * it can be unregistered when detaching device. So finish 3790cc1bf307SJeff Guo * callback soon and use a deferred removal to detach device 3791cc1bf307SJeff Guo * is need. It is a workaround, once the device detaching be 3792cc1bf307SJeff Guo * moved into the eal in the future, the deferred removal could 3793cc1bf307SJeff Guo * be deleted. 3794cc1bf307SJeff Guo */ 3795cc1bf307SJeff Guo if (rte_eal_alarm_set(100000, 3796cc1bf307SJeff Guo rmv_port_callback, (void *)(intptr_t)port_id)) 3797cc1bf307SJeff Guo RTE_LOG(ERR, EAL, 3798cc1bf307SJeff Guo "Could not set up deferred device removal\n"); 3799fb73e096SJeff Guo break; 3800fb73e096SJeff Guo case RTE_DEV_EVENT_ADD: 3801fb73e096SJeff Guo RTE_LOG(ERR, EAL, "The device: %s has been added!\n", 3802fb73e096SJeff Guo device_name); 3803fb73e096SJeff Guo /* TODO: After finish kernel driver binding, 3804fb73e096SJeff Guo * begin to attach port. 3805fb73e096SJeff Guo */ 3806fb73e096SJeff Guo break; 3807fb73e096SJeff Guo default: 3808fb73e096SJeff Guo break; 3809fb73e096SJeff Guo } 3810fb73e096SJeff Guo } 3811fb73e096SJeff Guo 3812f2c5125aSPablo de Lara static void 3813f4d178c1SXueming Li rxtx_port_config(portid_t pid) 3814f2c5125aSPablo de Lara { 3815d44f8a48SQi Zhang uint16_t qid; 38165e91aeefSWei Zhao uint64_t offloads; 3817f4d178c1SXueming Li struct rte_port *port = &ports[pid]; 3818f2c5125aSPablo de Lara 3819d44f8a48SQi Zhang for (qid = 0; qid < nb_rxq; qid++) { 38203c4426dbSDmitry Kozlyuk offloads = port->rxq[qid].conf.offloads; 38213c4426dbSDmitry Kozlyuk port->rxq[qid].conf = port->dev_info.default_rxconf; 3822f4d178c1SXueming Li 3823f4d178c1SXueming Li if (rxq_share > 0 && 3824f4d178c1SXueming Li (port->dev_info.dev_capa & RTE_ETH_DEV_CAPA_RXQ_SHARE)) { 3825f4d178c1SXueming Li /* Non-zero share group to enable RxQ share. */ 38263c4426dbSDmitry Kozlyuk port->rxq[qid].conf.share_group = pid / rxq_share + 1; 38273c4426dbSDmitry Kozlyuk port->rxq[qid].conf.share_qid = qid; /* Equal mapping. */ 3828f4d178c1SXueming Li } 3829f4d178c1SXueming Li 3830575e0fd1SWei Zhao if (offloads != 0) 38313c4426dbSDmitry Kozlyuk port->rxq[qid].conf.offloads = offloads; 3832d44f8a48SQi Zhang 3833d44f8a48SQi Zhang /* Check if any Rx parameters have been passed */ 3834f2c5125aSPablo de Lara if (rx_pthresh != RTE_PMD_PARAM_UNSET) 38353c4426dbSDmitry Kozlyuk port->rxq[qid].conf.rx_thresh.pthresh = rx_pthresh; 3836f2c5125aSPablo de Lara 3837f2c5125aSPablo de Lara if (rx_hthresh != RTE_PMD_PARAM_UNSET) 38383c4426dbSDmitry Kozlyuk port->rxq[qid].conf.rx_thresh.hthresh = rx_hthresh; 3839f2c5125aSPablo de Lara 3840f2c5125aSPablo de Lara if (rx_wthresh != RTE_PMD_PARAM_UNSET) 38413c4426dbSDmitry Kozlyuk port->rxq[qid].conf.rx_thresh.wthresh = rx_wthresh; 3842f2c5125aSPablo de Lara 3843f2c5125aSPablo de Lara if (rx_free_thresh != RTE_PMD_PARAM_UNSET) 38443c4426dbSDmitry Kozlyuk port->rxq[qid].conf.rx_free_thresh = rx_free_thresh; 3845f2c5125aSPablo de Lara 3846f2c5125aSPablo de Lara if (rx_drop_en != RTE_PMD_PARAM_UNSET) 38473c4426dbSDmitry Kozlyuk port->rxq[qid].conf.rx_drop_en = rx_drop_en; 3848f2c5125aSPablo de Lara 3849d44f8a48SQi Zhang port->nb_rx_desc[qid] = nb_rxd; 3850d44f8a48SQi Zhang } 3851d44f8a48SQi Zhang 3852d44f8a48SQi Zhang for (qid = 0; qid < nb_txq; qid++) { 38533c4426dbSDmitry Kozlyuk offloads = port->txq[qid].conf.offloads; 38543c4426dbSDmitry Kozlyuk port->txq[qid].conf = port->dev_info.default_txconf; 3855575e0fd1SWei Zhao if (offloads != 0) 38563c4426dbSDmitry Kozlyuk port->txq[qid].conf.offloads = offloads; 3857d44f8a48SQi Zhang 3858d44f8a48SQi Zhang /* Check if any Tx parameters have been passed */ 3859f2c5125aSPablo de Lara if (tx_pthresh != RTE_PMD_PARAM_UNSET) 38603c4426dbSDmitry Kozlyuk port->txq[qid].conf.tx_thresh.pthresh = tx_pthresh; 3861f2c5125aSPablo de Lara 3862f2c5125aSPablo de Lara if (tx_hthresh != RTE_PMD_PARAM_UNSET) 38633c4426dbSDmitry Kozlyuk port->txq[qid].conf.tx_thresh.hthresh = tx_hthresh; 3864f2c5125aSPablo de Lara 3865f2c5125aSPablo de Lara if (tx_wthresh != RTE_PMD_PARAM_UNSET) 38663c4426dbSDmitry Kozlyuk port->txq[qid].conf.tx_thresh.wthresh = tx_wthresh; 3867f2c5125aSPablo de Lara 3868f2c5125aSPablo de Lara if (tx_rs_thresh != RTE_PMD_PARAM_UNSET) 38693c4426dbSDmitry Kozlyuk port->txq[qid].conf.tx_rs_thresh = tx_rs_thresh; 3870f2c5125aSPablo de Lara 3871f2c5125aSPablo de Lara if (tx_free_thresh != RTE_PMD_PARAM_UNSET) 38723c4426dbSDmitry Kozlyuk port->txq[qid].conf.tx_free_thresh = tx_free_thresh; 3873d44f8a48SQi Zhang 3874d44f8a48SQi Zhang port->nb_tx_desc[qid] = nb_txd; 3875d44f8a48SQi Zhang } 3876f2c5125aSPablo de Lara } 3877f2c5125aSPablo de Lara 38780c4abd36SSteve Yang /* 3879b563c142SFerruh Yigit * Helper function to set MTU from frame size 38800c4abd36SSteve Yang * 38810c4abd36SSteve Yang * port->dev_info should be set before calling this function. 38820c4abd36SSteve Yang * 38830c4abd36SSteve Yang * return 0 on success, negative on error 38840c4abd36SSteve Yang */ 38850c4abd36SSteve Yang int 3886b563c142SFerruh Yigit update_mtu_from_frame_size(portid_t portid, uint32_t max_rx_pktlen) 38870c4abd36SSteve Yang { 38880c4abd36SSteve Yang struct rte_port *port = &ports[portid]; 38890c4abd36SSteve Yang uint32_t eth_overhead; 38901bb4a528SFerruh Yigit uint16_t mtu, new_mtu; 38910c4abd36SSteve Yang 38921bb4a528SFerruh Yigit eth_overhead = get_eth_overhead(&port->dev_info); 38931bb4a528SFerruh Yigit 38941bb4a528SFerruh Yigit if (rte_eth_dev_get_mtu(portid, &mtu) != 0) { 38951bb4a528SFerruh Yigit printf("Failed to get MTU for port %u\n", portid); 38961bb4a528SFerruh Yigit return -1; 38971bb4a528SFerruh Yigit } 38981bb4a528SFerruh Yigit 38991bb4a528SFerruh Yigit new_mtu = max_rx_pktlen - eth_overhead; 39000c4abd36SSteve Yang 39011bb4a528SFerruh Yigit if (mtu == new_mtu) 39021bb4a528SFerruh Yigit return 0; 39031bb4a528SFerruh Yigit 39041bb4a528SFerruh Yigit if (eth_dev_set_mtu_mp(portid, new_mtu) != 0) { 390561a3b0e5SAndrew Rybchenko fprintf(stderr, 390661a3b0e5SAndrew Rybchenko "Failed to set MTU to %u for port %u\n", 39071bb4a528SFerruh Yigit new_mtu, portid); 39081bb4a528SFerruh Yigit return -1; 39090c4abd36SSteve Yang } 39100c4abd36SSteve Yang 39111bb4a528SFerruh Yigit port->dev_conf.rxmode.mtu = new_mtu; 39121bb4a528SFerruh Yigit 39130c4abd36SSteve Yang return 0; 39140c4abd36SSteve Yang } 39150c4abd36SSteve Yang 3916013af9b6SIntel void 3917013af9b6SIntel init_port_config(void) 3918013af9b6SIntel { 3919013af9b6SIntel portid_t pid; 3920013af9b6SIntel struct rte_port *port; 3921655eae01SJie Wang int ret, i; 3922013af9b6SIntel 39237d89b261SGaetan Rivet RTE_ETH_FOREACH_DEV(pid) { 3924013af9b6SIntel port = &ports[pid]; 39256f51deb9SIvan Ilchenko 39266f51deb9SIvan Ilchenko ret = eth_dev_info_get_print_err(pid, &port->dev_info); 39276f51deb9SIvan Ilchenko if (ret != 0) 39286f51deb9SIvan Ilchenko return; 39296f51deb9SIvan Ilchenko 39303ce690d3SBruce Richardson if (nb_rxq > 1) { 3931013af9b6SIntel port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL; 393290892962SQi Zhang port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 3933422515b9SAdrien Mazarguil rss_hf & port->dev_info.flow_type_rss_offloads; 3934af75078fSIntel } else { 3935013af9b6SIntel port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL; 3936013af9b6SIntel port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0; 3937af75078fSIntel } 39383ce690d3SBruce Richardson 39395f592039SJingjing Wu if (port->dcb_flag == 0) { 3940655eae01SJie Wang if (port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0) { 3941f9295aa2SXiaoyu Min port->dev_conf.rxmode.mq_mode = 3942f9295aa2SXiaoyu Min (enum rte_eth_rx_mq_mode) 3943295968d1SFerruh Yigit (rx_mq_mode & RTE_ETH_MQ_RX_RSS); 3944655eae01SJie Wang } else { 3945295968d1SFerruh Yigit port->dev_conf.rxmode.mq_mode = RTE_ETH_MQ_RX_NONE; 3946655eae01SJie Wang port->dev_conf.rxmode.offloads &= 3947295968d1SFerruh Yigit ~RTE_ETH_RX_OFFLOAD_RSS_HASH; 3948655eae01SJie Wang 3949655eae01SJie Wang for (i = 0; 3950655eae01SJie Wang i < port->dev_info.nb_rx_queues; 3951655eae01SJie Wang i++) 39523c4426dbSDmitry Kozlyuk port->rxq[i].conf.offloads &= 3953295968d1SFerruh Yigit ~RTE_ETH_RX_OFFLOAD_RSS_HASH; 3954655eae01SJie Wang } 39553ce690d3SBruce Richardson } 39563ce690d3SBruce Richardson 3957f4d178c1SXueming Li rxtx_port_config(pid); 3958013af9b6SIntel 3959a5279d25SIgor Romanov ret = eth_macaddr_get_print_err(pid, &port->eth_addr); 3960a5279d25SIgor Romanov if (ret != 0) 3961a5279d25SIgor Romanov return; 3962013af9b6SIntel 39630a0821bcSPaulis Gributs if (lsc_interrupt && (*port->dev_info.dev_flags & RTE_ETH_DEV_INTR_LSC)) 39648ea656f8SGaetan Rivet port->dev_conf.intr_conf.lsc = 1; 39650a0821bcSPaulis Gributs if (rmv_interrupt && (*port->dev_info.dev_flags & RTE_ETH_DEV_INTR_RMV)) 3966284c908cSGaetan Rivet port->dev_conf.intr_conf.rmv = 1; 3967013af9b6SIntel } 3968013af9b6SIntel } 3969013af9b6SIntel 397041b05095SBernard Iremonger void set_port_slave_flag(portid_t slave_pid) 397141b05095SBernard Iremonger { 397241b05095SBernard Iremonger struct rte_port *port; 397341b05095SBernard Iremonger 397441b05095SBernard Iremonger port = &ports[slave_pid]; 397541b05095SBernard Iremonger port->slave_flag = 1; 397641b05095SBernard Iremonger } 397741b05095SBernard Iremonger 397841b05095SBernard Iremonger void clear_port_slave_flag(portid_t slave_pid) 397941b05095SBernard Iremonger { 398041b05095SBernard Iremonger struct rte_port *port; 398141b05095SBernard Iremonger 398241b05095SBernard Iremonger port = &ports[slave_pid]; 398341b05095SBernard Iremonger port->slave_flag = 0; 398441b05095SBernard Iremonger } 398541b05095SBernard Iremonger 39860e545d30SBernard Iremonger uint8_t port_is_bonding_slave(portid_t slave_pid) 39870e545d30SBernard Iremonger { 39880e545d30SBernard Iremonger struct rte_port *port; 39890a0821bcSPaulis Gributs struct rte_eth_dev_info dev_info; 39900a0821bcSPaulis Gributs int ret; 39910e545d30SBernard Iremonger 39920e545d30SBernard Iremonger port = &ports[slave_pid]; 39930a0821bcSPaulis Gributs ret = eth_dev_info_get_print_err(slave_pid, &dev_info); 39940a0821bcSPaulis Gributs if (ret != 0) { 39950a0821bcSPaulis Gributs TESTPMD_LOG(ERR, 39960a0821bcSPaulis Gributs "Failed to get device info for port id %d," 39970a0821bcSPaulis Gributs "cannot determine if the port is a bonded slave", 39980a0821bcSPaulis Gributs slave_pid); 39990a0821bcSPaulis Gributs return 0; 40000a0821bcSPaulis Gributs } 40010a0821bcSPaulis Gributs if ((*dev_info.dev_flags & RTE_ETH_DEV_BONDED_SLAVE) || (port->slave_flag == 1)) 4002b8b8b344SMatan Azrad return 1; 4003b8b8b344SMatan Azrad return 0; 40040e545d30SBernard Iremonger } 40050e545d30SBernard Iremonger 4006013af9b6SIntel const uint16_t vlan_tags[] = { 4007013af9b6SIntel 0, 1, 2, 3, 4, 5, 6, 7, 4008013af9b6SIntel 8, 9, 10, 11, 12, 13, 14, 15, 4009013af9b6SIntel 16, 17, 18, 19, 20, 21, 22, 23, 4010013af9b6SIntel 24, 25, 26, 27, 28, 29, 30, 31 4011013af9b6SIntel }; 4012013af9b6SIntel 4013013af9b6SIntel static int 4014ac7c491cSKonstantin Ananyev get_eth_dcb_conf(portid_t pid, struct rte_eth_conf *eth_conf, 40151a572499SJingjing Wu enum dcb_mode_enable dcb_mode, 40161a572499SJingjing Wu enum rte_eth_nb_tcs num_tcs, 40171a572499SJingjing Wu uint8_t pfc_en) 4018013af9b6SIntel { 4019013af9b6SIntel uint8_t i; 4020ac7c491cSKonstantin Ananyev int32_t rc; 4021ac7c491cSKonstantin Ananyev struct rte_eth_rss_conf rss_conf; 4022af75078fSIntel 4023af75078fSIntel /* 4024013af9b6SIntel * Builds up the correct configuration for dcb+vt based on the vlan tags array 4025013af9b6SIntel * given above, and the number of traffic classes available for use. 4026af75078fSIntel */ 40271a572499SJingjing Wu if (dcb_mode == DCB_VT_ENABLED) { 40281a572499SJingjing Wu struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf = 40291a572499SJingjing Wu ð_conf->rx_adv_conf.vmdq_dcb_conf; 40301a572499SJingjing Wu struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf = 40311a572499SJingjing Wu ð_conf->tx_adv_conf.vmdq_dcb_tx_conf; 4032013af9b6SIntel 4033547d946cSNirmoy Das /* VMDQ+DCB RX and TX configurations */ 40341a572499SJingjing Wu vmdq_rx_conf->enable_default_pool = 0; 40351a572499SJingjing Wu vmdq_rx_conf->default_pool = 0; 40361a572499SJingjing Wu vmdq_rx_conf->nb_queue_pools = 4037295968d1SFerruh Yigit (num_tcs == RTE_ETH_4_TCS ? RTE_ETH_32_POOLS : RTE_ETH_16_POOLS); 40381a572499SJingjing Wu vmdq_tx_conf->nb_queue_pools = 4039295968d1SFerruh Yigit (num_tcs == RTE_ETH_4_TCS ? RTE_ETH_32_POOLS : RTE_ETH_16_POOLS); 4040013af9b6SIntel 40411a572499SJingjing Wu vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools; 40421a572499SJingjing Wu for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) { 40431a572499SJingjing Wu vmdq_rx_conf->pool_map[i].vlan_id = vlan_tags[i]; 40441a572499SJingjing Wu vmdq_rx_conf->pool_map[i].pools = 40451a572499SJingjing Wu 1 << (i % vmdq_rx_conf->nb_queue_pools); 4046af75078fSIntel } 4047295968d1SFerruh Yigit for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++) { 4048f59908feSWei Dai vmdq_rx_conf->dcb_tc[i] = i % num_tcs; 4049f59908feSWei Dai vmdq_tx_conf->dcb_tc[i] = i % num_tcs; 4050013af9b6SIntel } 4051013af9b6SIntel 4052013af9b6SIntel /* set DCB mode of RX and TX of multiple queues */ 4053f9295aa2SXiaoyu Min eth_conf->rxmode.mq_mode = 4054f9295aa2SXiaoyu Min (enum rte_eth_rx_mq_mode) 4055295968d1SFerruh Yigit (rx_mq_mode & RTE_ETH_MQ_RX_VMDQ_DCB); 4056295968d1SFerruh Yigit eth_conf->txmode.mq_mode = RTE_ETH_MQ_TX_VMDQ_DCB; 40571a572499SJingjing Wu } else { 40581a572499SJingjing Wu struct rte_eth_dcb_rx_conf *rx_conf = 40591a572499SJingjing Wu ð_conf->rx_adv_conf.dcb_rx_conf; 40601a572499SJingjing Wu struct rte_eth_dcb_tx_conf *tx_conf = 40611a572499SJingjing Wu ð_conf->tx_adv_conf.dcb_tx_conf; 4062013af9b6SIntel 40635139bc12STing Xu memset(&rss_conf, 0, sizeof(struct rte_eth_rss_conf)); 40645139bc12STing Xu 4065ac7c491cSKonstantin Ananyev rc = rte_eth_dev_rss_hash_conf_get(pid, &rss_conf); 4066ac7c491cSKonstantin Ananyev if (rc != 0) 4067ac7c491cSKonstantin Ananyev return rc; 4068ac7c491cSKonstantin Ananyev 40691a572499SJingjing Wu rx_conf->nb_tcs = num_tcs; 40701a572499SJingjing Wu tx_conf->nb_tcs = num_tcs; 40711a572499SJingjing Wu 4072295968d1SFerruh Yigit for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++) { 4073bcd0e432SJingjing Wu rx_conf->dcb_tc[i] = i % num_tcs; 4074bcd0e432SJingjing Wu tx_conf->dcb_tc[i] = i % num_tcs; 4075013af9b6SIntel } 4076ac7c491cSKonstantin Ananyev 4077f9295aa2SXiaoyu Min eth_conf->rxmode.mq_mode = 4078f9295aa2SXiaoyu Min (enum rte_eth_rx_mq_mode) 4079295968d1SFerruh Yigit (rx_mq_mode & RTE_ETH_MQ_RX_DCB_RSS); 4080ac7c491cSKonstantin Ananyev eth_conf->rx_adv_conf.rss_conf = rss_conf; 4081295968d1SFerruh Yigit eth_conf->txmode.mq_mode = RTE_ETH_MQ_TX_DCB; 40821a572499SJingjing Wu } 40831a572499SJingjing Wu 40841a572499SJingjing Wu if (pfc_en) 40851a572499SJingjing Wu eth_conf->dcb_capability_en = 4086295968d1SFerruh Yigit RTE_ETH_DCB_PG_SUPPORT | RTE_ETH_DCB_PFC_SUPPORT; 4087013af9b6SIntel else 4088295968d1SFerruh Yigit eth_conf->dcb_capability_en = RTE_ETH_DCB_PG_SUPPORT; 4089013af9b6SIntel 4090013af9b6SIntel return 0; 4091013af9b6SIntel } 4092013af9b6SIntel 4093013af9b6SIntel int 40941a572499SJingjing Wu init_port_dcb_config(portid_t pid, 40951a572499SJingjing Wu enum dcb_mode_enable dcb_mode, 40961a572499SJingjing Wu enum rte_eth_nb_tcs num_tcs, 40971a572499SJingjing Wu uint8_t pfc_en) 4098013af9b6SIntel { 4099013af9b6SIntel struct rte_eth_conf port_conf; 4100013af9b6SIntel struct rte_port *rte_port; 4101013af9b6SIntel int retval; 4102013af9b6SIntel uint16_t i; 4103013af9b6SIntel 4104a550baf2SMin Hu (Connor) if (num_procs > 1) { 4105a550baf2SMin Hu (Connor) printf("The multi-process feature doesn't support dcb.\n"); 4106a550baf2SMin Hu (Connor) return -ENOTSUP; 4107a550baf2SMin Hu (Connor) } 41082a977b89SWenzhuo Lu rte_port = &ports[pid]; 4109013af9b6SIntel 4110c1ba6c32SHuisong Li /* retain the original device configuration. */ 4111c1ba6c32SHuisong Li memcpy(&port_conf, &rte_port->dev_conf, sizeof(struct rte_eth_conf)); 4112d5354e89SYanglong Wu 4113013af9b6SIntel /*set configuration of DCB in vt mode and DCB in non-vt mode*/ 4114ac7c491cSKonstantin Ananyev retval = get_eth_dcb_conf(pid, &port_conf, dcb_mode, num_tcs, pfc_en); 4115013af9b6SIntel if (retval < 0) 4116013af9b6SIntel return retval; 4117295968d1SFerruh Yigit port_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_VLAN_FILTER; 4118cbe70fdeSJie Wang /* remove RSS HASH offload for DCB in vt mode */ 4119cbe70fdeSJie Wang if (port_conf.rxmode.mq_mode == RTE_ETH_MQ_RX_VMDQ_DCB) { 4120cbe70fdeSJie Wang port_conf.rxmode.offloads &= ~RTE_ETH_RX_OFFLOAD_RSS_HASH; 4121cbe70fdeSJie Wang for (i = 0; i < nb_rxq; i++) 41223c4426dbSDmitry Kozlyuk rte_port->rxq[i].conf.offloads &= 4123cbe70fdeSJie Wang ~RTE_ETH_RX_OFFLOAD_RSS_HASH; 4124cbe70fdeSJie Wang } 4125013af9b6SIntel 41262f203d44SQi Zhang /* re-configure the device . */ 41272b0e0ebaSChenbo Xia retval = rte_eth_dev_configure(pid, nb_rxq, nb_rxq, &port_conf); 41282b0e0ebaSChenbo Xia if (retval < 0) 41292b0e0ebaSChenbo Xia return retval; 41306f51deb9SIvan Ilchenko 41316f51deb9SIvan Ilchenko retval = eth_dev_info_get_print_err(pid, &rte_port->dev_info); 41326f51deb9SIvan Ilchenko if (retval != 0) 41336f51deb9SIvan Ilchenko return retval; 41342a977b89SWenzhuo Lu 41352a977b89SWenzhuo Lu /* If dev_info.vmdq_pool_base is greater than 0, 41362a977b89SWenzhuo Lu * the queue id of vmdq pools is started after pf queues. 41372a977b89SWenzhuo Lu */ 41382a977b89SWenzhuo Lu if (dcb_mode == DCB_VT_ENABLED && 41392a977b89SWenzhuo Lu rte_port->dev_info.vmdq_pool_base > 0) { 414061a3b0e5SAndrew Rybchenko fprintf(stderr, 414161a3b0e5SAndrew Rybchenko "VMDQ_DCB multi-queue mode is nonsensical for port %d.\n", 414261a3b0e5SAndrew Rybchenko pid); 41432a977b89SWenzhuo Lu return -1; 41442a977b89SWenzhuo Lu } 41452a977b89SWenzhuo Lu 41462a977b89SWenzhuo Lu /* Assume the ports in testpmd have the same dcb capability 41472a977b89SWenzhuo Lu * and has the same number of rxq and txq in dcb mode 41482a977b89SWenzhuo Lu */ 41492a977b89SWenzhuo Lu if (dcb_mode == DCB_VT_ENABLED) { 415086ef65eeSBernard Iremonger if (rte_port->dev_info.max_vfs > 0) { 415186ef65eeSBernard Iremonger nb_rxq = rte_port->dev_info.nb_rx_queues; 415286ef65eeSBernard Iremonger nb_txq = rte_port->dev_info.nb_tx_queues; 415386ef65eeSBernard Iremonger } else { 41542a977b89SWenzhuo Lu nb_rxq = rte_port->dev_info.max_rx_queues; 41552a977b89SWenzhuo Lu nb_txq = rte_port->dev_info.max_tx_queues; 415686ef65eeSBernard Iremonger } 41572a977b89SWenzhuo Lu } else { 41582a977b89SWenzhuo Lu /*if vt is disabled, use all pf queues */ 41592a977b89SWenzhuo Lu if (rte_port->dev_info.vmdq_pool_base == 0) { 41602a977b89SWenzhuo Lu nb_rxq = rte_port->dev_info.max_rx_queues; 41612a977b89SWenzhuo Lu nb_txq = rte_port->dev_info.max_tx_queues; 41622a977b89SWenzhuo Lu } else { 41632a977b89SWenzhuo Lu nb_rxq = (queueid_t)num_tcs; 41642a977b89SWenzhuo Lu nb_txq = (queueid_t)num_tcs; 41652a977b89SWenzhuo Lu 41662a977b89SWenzhuo Lu } 41672a977b89SWenzhuo Lu } 41682a977b89SWenzhuo Lu rx_free_thresh = 64; 41692a977b89SWenzhuo Lu 4170013af9b6SIntel memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf)); 4171013af9b6SIntel 4172f4d178c1SXueming Li rxtx_port_config(pid); 4173013af9b6SIntel /* VLAN filter */ 4174295968d1SFerruh Yigit rte_port->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_VLAN_FILTER; 41751a572499SJingjing Wu for (i = 0; i < RTE_DIM(vlan_tags); i++) 4176013af9b6SIntel rx_vft_set(pid, vlan_tags[i], 1); 4177013af9b6SIntel 4178a5279d25SIgor Romanov retval = eth_macaddr_get_print_err(pid, &rte_port->eth_addr); 4179a5279d25SIgor Romanov if (retval != 0) 4180a5279d25SIgor Romanov return retval; 4181a5279d25SIgor Romanov 41827741e4cfSIntel rte_port->dcb_flag = 1; 41837741e4cfSIntel 4184a690a070SHuisong Li /* Enter DCB configuration status */ 4185a690a070SHuisong Li dcb_config = 1; 4186a690a070SHuisong Li 4187013af9b6SIntel return 0; 4188af75078fSIntel } 4189af75078fSIntel 4190ffc468ffSTetsuya Mukawa static void 4191ffc468ffSTetsuya Mukawa init_port(void) 4192ffc468ffSTetsuya Mukawa { 41931b9f2746SGregory Etelson int i; 41941b9f2746SGregory Etelson 4195ffc468ffSTetsuya Mukawa /* Configuration of Ethernet ports. */ 4196ffc468ffSTetsuya Mukawa ports = rte_zmalloc("testpmd: ports", 4197ffc468ffSTetsuya Mukawa sizeof(struct rte_port) * RTE_MAX_ETHPORTS, 4198ffc468ffSTetsuya Mukawa RTE_CACHE_LINE_SIZE); 4199ffc468ffSTetsuya Mukawa if (ports == NULL) { 4200ffc468ffSTetsuya Mukawa rte_exit(EXIT_FAILURE, 4201ffc468ffSTetsuya Mukawa "rte_zmalloc(%d struct rte_port) failed\n", 4202ffc468ffSTetsuya Mukawa RTE_MAX_ETHPORTS); 4203ffc468ffSTetsuya Mukawa } 42041b9f2746SGregory Etelson for (i = 0; i < RTE_MAX_ETHPORTS; i++) 420563b72657SIvan Ilchenko ports[i].xstats_info.allocated = false; 420663b72657SIvan Ilchenko for (i = 0; i < RTE_MAX_ETHPORTS; i++) 42071b9f2746SGregory Etelson LIST_INIT(&ports[i].flow_tunnel_list); 420829841336SPhil Yang /* Initialize ports NUMA structures */ 420929841336SPhil Yang memset(port_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS); 421029841336SPhil Yang memset(rxring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS); 421129841336SPhil Yang memset(txring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS); 4212ffc468ffSTetsuya Mukawa } 4213ffc468ffSTetsuya Mukawa 4214d3a274ceSZhihong Wang static void 4215d3a274ceSZhihong Wang force_quit(void) 4216d3a274ceSZhihong Wang { 4217d3a274ceSZhihong Wang pmd_test_exit(); 4218d3a274ceSZhihong Wang prompt_exit(); 4219d3a274ceSZhihong Wang } 4220d3a274ceSZhihong Wang 4221d3a274ceSZhihong Wang static void 4222cfea1f30SPablo de Lara print_stats(void) 4223cfea1f30SPablo de Lara { 4224cfea1f30SPablo de Lara uint8_t i; 4225cfea1f30SPablo de Lara const char clr[] = { 27, '[', '2', 'J', '\0' }; 4226cfea1f30SPablo de Lara const char top_left[] = { 27, '[', '1', ';', '1', 'H', '\0' }; 4227cfea1f30SPablo de Lara 4228cfea1f30SPablo de Lara /* Clear screen and move to top left */ 4229cfea1f30SPablo de Lara printf("%s%s", clr, top_left); 4230cfea1f30SPablo de Lara 4231cfea1f30SPablo de Lara printf("\nPort statistics ===================================="); 4232cfea1f30SPablo de Lara for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) 4233cfea1f30SPablo de Lara nic_stats_display(fwd_ports_ids[i]); 4234683d1e82SIgor Romanov 4235683d1e82SIgor Romanov fflush(stdout); 4236cfea1f30SPablo de Lara } 4237cfea1f30SPablo de Lara 4238cfea1f30SPablo de Lara static void 4239d3a274ceSZhihong Wang signal_handler(int signum) 4240d3a274ceSZhihong Wang { 4241d3a274ceSZhihong Wang if (signum == SIGINT || signum == SIGTERM) { 424261a3b0e5SAndrew Rybchenko fprintf(stderr, "\nSignal %d received, preparing to exit...\n", 4243d3a274ceSZhihong Wang signum); 4244a8d0d473SBruce Richardson #ifdef RTE_LIB_PDUMP 4245102b7329SReshma Pattan /* uninitialize packet capture framework */ 4246102b7329SReshma Pattan rte_pdump_uninit(); 4247102b7329SReshma Pattan #endif 4248a8d0d473SBruce Richardson #ifdef RTE_LIB_LATENCYSTATS 42498b36297dSAmit Gupta if (latencystats_enabled != 0) 425062d3216dSReshma Pattan rte_latencystats_uninit(); 425162d3216dSReshma Pattan #endif 4252d3a274ceSZhihong Wang force_quit(); 4253d9a191a0SPhil Yang /* Set flag to indicate the force termination. */ 4254d9a191a0SPhil Yang f_quit = 1; 4255d3a274ceSZhihong Wang /* exit with the expected status */ 4256761f7ae1SJie Zhou #ifndef RTE_EXEC_ENV_WINDOWS 4257d3a274ceSZhihong Wang signal(signum, SIG_DFL); 4258d3a274ceSZhihong Wang kill(getpid(), signum); 4259761f7ae1SJie Zhou #endif 4260d3a274ceSZhihong Wang } 4261d3a274ceSZhihong Wang } 4262d3a274ceSZhihong Wang 4263af75078fSIntel int 4264af75078fSIntel main(int argc, char** argv) 4265af75078fSIntel { 4266af75078fSIntel int diag; 4267f8244c63SZhiyong Yang portid_t port_id; 42684918a357SXiaoyun Li uint16_t count; 4269fb73e096SJeff Guo int ret; 4270af75078fSIntel 4271d3a274ceSZhihong Wang signal(SIGINT, signal_handler); 4272d3a274ceSZhihong Wang signal(SIGTERM, signal_handler); 4273d3a274ceSZhihong Wang 4274285fd101SOlivier Matz testpmd_logtype = rte_log_register("testpmd"); 4275285fd101SOlivier Matz if (testpmd_logtype < 0) 427616267ceeSStephen Hemminger rte_exit(EXIT_FAILURE, "Cannot register log type"); 4277285fd101SOlivier Matz rte_log_set_level(testpmd_logtype, RTE_LOG_DEBUG); 4278285fd101SOlivier Matz 42799201806eSStephen Hemminger diag = rte_eal_init(argc, argv); 42809201806eSStephen Hemminger if (diag < 0) 428116267ceeSStephen Hemminger rte_exit(EXIT_FAILURE, "Cannot init EAL: %s\n", 428216267ceeSStephen Hemminger rte_strerror(rte_errno)); 42839201806eSStephen Hemminger 428497b5d8b5SThomas Monjalon ret = register_eth_event_callback(); 428597b5d8b5SThomas Monjalon if (ret != 0) 428616267ceeSStephen Hemminger rte_exit(EXIT_FAILURE, "Cannot register for ethdev events"); 428797b5d8b5SThomas Monjalon 4288a8d0d473SBruce Richardson #ifdef RTE_LIB_PDUMP 42894aa0d012SAnatoly Burakov /* initialize packet capture framework */ 4290e9436f54STiwei Bie rte_pdump_init(); 42914aa0d012SAnatoly Burakov #endif 42924aa0d012SAnatoly Burakov 42934918a357SXiaoyun Li count = 0; 42944918a357SXiaoyun Li RTE_ETH_FOREACH_DEV(port_id) { 42954918a357SXiaoyun Li ports_ids[count] = port_id; 42964918a357SXiaoyun Li count++; 42974918a357SXiaoyun Li } 42984918a357SXiaoyun Li nb_ports = (portid_t) count; 42994aa0d012SAnatoly Burakov if (nb_ports == 0) 43004aa0d012SAnatoly Burakov TESTPMD_LOG(WARNING, "No probed ethernet devices\n"); 43014aa0d012SAnatoly Burakov 43024aa0d012SAnatoly Burakov /* allocate port structures, and init them */ 43034aa0d012SAnatoly Burakov init_port(); 43044aa0d012SAnatoly Burakov 43054aa0d012SAnatoly Burakov set_def_fwd_config(); 43064aa0d012SAnatoly Burakov if (nb_lcores == 0) 430716267ceeSStephen Hemminger rte_exit(EXIT_FAILURE, "No cores defined for forwarding\n" 430816267ceeSStephen Hemminger "Check the core mask argument\n"); 43094aa0d012SAnatoly Burakov 4310e505d84cSAnatoly Burakov /* Bitrate/latency stats disabled by default */ 4311a8d0d473SBruce Richardson #ifdef RTE_LIB_BITRATESTATS 4312e505d84cSAnatoly Burakov bitrate_enabled = 0; 4313e505d84cSAnatoly Burakov #endif 4314a8d0d473SBruce Richardson #ifdef RTE_LIB_LATENCYSTATS 4315e505d84cSAnatoly Burakov latencystats_enabled = 0; 4316e505d84cSAnatoly Burakov #endif 4317e505d84cSAnatoly Burakov 4318fb7b8b32SAnatoly Burakov /* on FreeBSD, mlockall() is disabled by default */ 43195fbc1d49SBruce Richardson #ifdef RTE_EXEC_ENV_FREEBSD 4320fb7b8b32SAnatoly Burakov do_mlockall = 0; 4321fb7b8b32SAnatoly Burakov #else 4322fb7b8b32SAnatoly Burakov do_mlockall = 1; 4323fb7b8b32SAnatoly Burakov #endif 4324fb7b8b32SAnatoly Burakov 4325e505d84cSAnatoly Burakov argc -= diag; 4326e505d84cSAnatoly Burakov argv += diag; 4327e505d84cSAnatoly Burakov if (argc > 1) 4328e505d84cSAnatoly Burakov launch_args_parse(argc, argv); 4329e505d84cSAnatoly Burakov 4330761f7ae1SJie Zhou #ifndef RTE_EXEC_ENV_WINDOWS 4331e505d84cSAnatoly Burakov if (do_mlockall && mlockall(MCL_CURRENT | MCL_FUTURE)) { 4332285fd101SOlivier Matz TESTPMD_LOG(NOTICE, "mlockall() failed with error \"%s\"\n", 43331c036b16SEelco Chaudron strerror(errno)); 43341c036b16SEelco Chaudron } 4335761f7ae1SJie Zhou #endif 43361c036b16SEelco Chaudron 433799cabef0SPablo de Lara if (tx_first && interactive) 433899cabef0SPablo de Lara rte_exit(EXIT_FAILURE, "--tx-first cannot be used on " 433999cabef0SPablo de Lara "interactive mode.\n"); 43408820cba4SDavid Hunt 43418820cba4SDavid Hunt if (tx_first && lsc_interrupt) { 434261a3b0e5SAndrew Rybchenko fprintf(stderr, 434361a3b0e5SAndrew Rybchenko "Warning: lsc_interrupt needs to be off when using tx_first. Disabling.\n"); 43448820cba4SDavid Hunt lsc_interrupt = 0; 43458820cba4SDavid Hunt } 43468820cba4SDavid Hunt 43475a8fb55cSReshma Pattan if (!nb_rxq && !nb_txq) 434861a3b0e5SAndrew Rybchenko fprintf(stderr, 434961a3b0e5SAndrew Rybchenko "Warning: Either rx or tx queues should be non-zero\n"); 43505a8fb55cSReshma Pattan 43515a8fb55cSReshma Pattan if (nb_rxq > 1 && nb_rxq > nb_txq) 435261a3b0e5SAndrew Rybchenko fprintf(stderr, 435361a3b0e5SAndrew Rybchenko "Warning: nb_rxq=%d enables RSS configuration, but nb_txq=%d will prevent to fully test it.\n", 4354af75078fSIntel nb_rxq, nb_txq); 4355af75078fSIntel 4356af75078fSIntel init_config(); 4357fb73e096SJeff Guo 4358fb73e096SJeff Guo if (hot_plug) { 43592049c511SJeff Guo ret = rte_dev_hotplug_handle_enable(); 4360fb73e096SJeff Guo if (ret) { 43612049c511SJeff Guo RTE_LOG(ERR, EAL, 43622049c511SJeff Guo "fail to enable hotplug handling."); 4363fb73e096SJeff Guo return -1; 4364fb73e096SJeff Guo } 4365fb73e096SJeff Guo 43662049c511SJeff Guo ret = rte_dev_event_monitor_start(); 43672049c511SJeff Guo if (ret) { 43682049c511SJeff Guo RTE_LOG(ERR, EAL, 43692049c511SJeff Guo "fail to start device event monitoring."); 43702049c511SJeff Guo return -1; 43712049c511SJeff Guo } 43722049c511SJeff Guo 43732049c511SJeff Guo ret = rte_dev_event_callback_register(NULL, 4374cc1bf307SJeff Guo dev_event_callback, NULL); 43752049c511SJeff Guo if (ret) { 43762049c511SJeff Guo RTE_LOG(ERR, EAL, 43772049c511SJeff Guo "fail to register device event callback\n"); 43782049c511SJeff Guo return -1; 43792049c511SJeff Guo } 4380fb73e096SJeff Guo } 4381fb73e096SJeff Guo 43826937d210SStephen Hemminger if (!no_device_start && start_port(RTE_PORT_ALL) != 0) 4383148f963fSBruce Richardson rte_exit(EXIT_FAILURE, "Start ports failed\n"); 4384af75078fSIntel 4385ce8d5614SIntel /* set all ports to promiscuous mode by default */ 438634fc1051SIvan Ilchenko RTE_ETH_FOREACH_DEV(port_id) { 438734fc1051SIvan Ilchenko ret = rte_eth_promiscuous_enable(port_id); 438834fc1051SIvan Ilchenko if (ret != 0) 438961a3b0e5SAndrew Rybchenko fprintf(stderr, 439061a3b0e5SAndrew Rybchenko "Error during enabling promiscuous mode for port %u: %s - ignore\n", 439134fc1051SIvan Ilchenko port_id, rte_strerror(-ret)); 439234fc1051SIvan Ilchenko } 4393af75078fSIntel 4394bb9be9a4SDavid Marchand #ifdef RTE_LIB_METRICS 43957e4441c8SRemy Horton /* Init metrics library */ 43967e4441c8SRemy Horton rte_metrics_init(rte_socket_id()); 4397bb9be9a4SDavid Marchand #endif 43987e4441c8SRemy Horton 4399a8d0d473SBruce Richardson #ifdef RTE_LIB_LATENCYSTATS 440062d3216dSReshma Pattan if (latencystats_enabled != 0) { 440162d3216dSReshma Pattan int ret = rte_latencystats_init(1, NULL); 440262d3216dSReshma Pattan if (ret) 440361a3b0e5SAndrew Rybchenko fprintf(stderr, 440461a3b0e5SAndrew Rybchenko "Warning: latencystats init() returned error %d\n", 440561a3b0e5SAndrew Rybchenko ret); 440661a3b0e5SAndrew Rybchenko fprintf(stderr, "Latencystats running on lcore %d\n", 440762d3216dSReshma Pattan latencystats_lcore_id); 440862d3216dSReshma Pattan } 440962d3216dSReshma Pattan #endif 441062d3216dSReshma Pattan 44117e4441c8SRemy Horton /* Setup bitrate stats */ 4412a8d0d473SBruce Richardson #ifdef RTE_LIB_BITRATESTATS 4413e25e6c70SRemy Horton if (bitrate_enabled != 0) { 44147e4441c8SRemy Horton bitrate_data = rte_stats_bitrate_create(); 44157e4441c8SRemy Horton if (bitrate_data == NULL) 4416e25e6c70SRemy Horton rte_exit(EXIT_FAILURE, 4417e25e6c70SRemy Horton "Could not allocate bitrate data.\n"); 44187e4441c8SRemy Horton rte_stats_bitrate_reg(bitrate_data); 4419e25e6c70SRemy Horton } 44207e4441c8SRemy Horton #endif 4421a8d0d473SBruce Richardson #ifdef RTE_LIB_CMDLINE 4422592ab76fSDavid Marchand if (init_cmdline() != 0) 4423592ab76fSDavid Marchand rte_exit(EXIT_FAILURE, 4424592ab76fSDavid Marchand "Could not initialise cmdline context.\n"); 4425592ab76fSDavid Marchand 442681ef862bSAllain Legacy if (strlen(cmdline_filename) != 0) 442781ef862bSAllain Legacy cmdline_read_from_file(cmdline_filename); 442881ef862bSAllain Legacy 4429ca7feb22SCyril Chemparathy if (interactive == 1) { 4430ca7feb22SCyril Chemparathy if (auto_start) { 4431ca7feb22SCyril Chemparathy printf("Start automatic packet forwarding\n"); 4432ca7feb22SCyril Chemparathy start_packet_forwarding(0); 4433ca7feb22SCyril Chemparathy } 4434af75078fSIntel prompt(); 44350de738cfSJiayu Hu pmd_test_exit(); 4436ca7feb22SCyril Chemparathy } else 44370d56cb81SThomas Monjalon #endif 44380d56cb81SThomas Monjalon { 4439af75078fSIntel char c; 4440af75078fSIntel int rc; 4441af75078fSIntel 4442d9a191a0SPhil Yang f_quit = 0; 4443d9a191a0SPhil Yang 4444af75078fSIntel printf("No commandline core given, start packet forwarding\n"); 444599cabef0SPablo de Lara start_packet_forwarding(tx_first); 4446cfea1f30SPablo de Lara if (stats_period != 0) { 4447cfea1f30SPablo de Lara uint64_t prev_time = 0, cur_time, diff_time = 0; 4448cfea1f30SPablo de Lara uint64_t timer_period; 4449cfea1f30SPablo de Lara 4450cfea1f30SPablo de Lara /* Convert to number of cycles */ 4451cfea1f30SPablo de Lara timer_period = stats_period * rte_get_timer_hz(); 4452cfea1f30SPablo de Lara 4453d9a191a0SPhil Yang while (f_quit == 0) { 4454cfea1f30SPablo de Lara cur_time = rte_get_timer_cycles(); 4455cfea1f30SPablo de Lara diff_time += cur_time - prev_time; 4456cfea1f30SPablo de Lara 4457cfea1f30SPablo de Lara if (diff_time >= timer_period) { 4458cfea1f30SPablo de Lara print_stats(); 4459cfea1f30SPablo de Lara /* Reset the timer */ 4460cfea1f30SPablo de Lara diff_time = 0; 4461cfea1f30SPablo de Lara } 4462cfea1f30SPablo de Lara /* Sleep to avoid unnecessary checks */ 4463cfea1f30SPablo de Lara prev_time = cur_time; 4464761f7ae1SJie Zhou rte_delay_us_sleep(US_PER_S); 4465cfea1f30SPablo de Lara } 4466cfea1f30SPablo de Lara } 4467cfea1f30SPablo de Lara 4468af75078fSIntel printf("Press enter to exit\n"); 4469af75078fSIntel rc = read(0, &c, 1); 4470d3a274ceSZhihong Wang pmd_test_exit(); 4471af75078fSIntel if (rc < 0) 4472af75078fSIntel return 1; 4473af75078fSIntel } 4474af75078fSIntel 44755e516c89SStephen Hemminger ret = rte_eal_cleanup(); 44765e516c89SStephen Hemminger if (ret != 0) 44775e516c89SStephen Hemminger rte_exit(EXIT_FAILURE, 44785e516c89SStephen Hemminger "EAL cleanup failed: %s\n", strerror(-ret)); 44795e516c89SStephen Hemminger 44805e516c89SStephen Hemminger return EXIT_SUCCESS; 4481af75078fSIntel } 4482