1174a1631SBruce Richardson /* SPDX-License-Identifier: BSD-3-Clause 2174a1631SBruce Richardson * Copyright(c) 2010-2017 Intel Corporation 3af75078fSIntel */ 4af75078fSIntel 5af75078fSIntel #include <stdarg.h> 6af75078fSIntel #include <stdio.h> 7af75078fSIntel #include <stdlib.h> 8af75078fSIntel #include <signal.h> 9af75078fSIntel #include <string.h> 10af75078fSIntel #include <time.h> 11af75078fSIntel #include <fcntl.h> 12761f7ae1SJie Zhou #ifndef RTE_EXEC_ENV_WINDOWS 131c036b16SEelco Chaudron #include <sys/mman.h> 14761f7ae1SJie Zhou #endif 15af75078fSIntel #include <sys/types.h> 16af75078fSIntel #include <errno.h> 17fb73e096SJeff Guo #include <stdbool.h> 18af75078fSIntel 19af75078fSIntel #include <sys/queue.h> 20af75078fSIntel #include <sys/stat.h> 21af75078fSIntel 22af75078fSIntel #include <stdint.h> 23af75078fSIntel #include <unistd.h> 24af75078fSIntel #include <inttypes.h> 25af75078fSIntel 26af75078fSIntel #include <rte_common.h> 27d1eb542eSOlivier Matz #include <rte_errno.h> 28af75078fSIntel #include <rte_byteorder.h> 29af75078fSIntel #include <rte_log.h> 30af75078fSIntel #include <rte_debug.h> 31af75078fSIntel #include <rte_cycles.h> 32af75078fSIntel #include <rte_memory.h> 33af75078fSIntel #include <rte_memcpy.h> 34af75078fSIntel #include <rte_launch.h> 35af75078fSIntel #include <rte_eal.h> 36284c908cSGaetan Rivet #include <rte_alarm.h> 37af75078fSIntel #include <rte_per_lcore.h> 38af75078fSIntel #include <rte_lcore.h> 39af75078fSIntel #include <rte_branch_prediction.h> 40af75078fSIntel #include <rte_mempool.h> 41af75078fSIntel #include <rte_malloc.h> 42af75078fSIntel #include <rte_mbuf.h> 430e798567SPavan Nikhilesh #include <rte_mbuf_pool_ops.h> 44af75078fSIntel #include <rte_interrupts.h> 45af75078fSIntel #include <rte_pci.h> 46af75078fSIntel #include <rte_ether.h> 47af75078fSIntel #include <rte_ethdev.h> 48edab33b1STetsuya Mukawa #include <rte_dev.h> 49af75078fSIntel #include <rte_string_fns.h> 50a8d0d473SBruce Richardson #ifdef RTE_NET_IXGBE 51e261265eSRadu Nicolau #include <rte_pmd_ixgbe.h> 52e261265eSRadu Nicolau #endif 53a8d0d473SBruce Richardson #ifdef RTE_LIB_PDUMP 54102b7329SReshma Pattan #include <rte_pdump.h> 55102b7329SReshma Pattan #endif 56938a184aSAdrien Mazarguil #include <rte_flow.h> 57*bb9be9a4SDavid Marchand #ifdef RTE_LIB_METRICS 587e4441c8SRemy Horton #include <rte_metrics.h> 59*bb9be9a4SDavid Marchand #endif 60a8d0d473SBruce Richardson #ifdef RTE_LIB_BITRATESTATS 617e4441c8SRemy Horton #include <rte_bitrate.h> 627e4441c8SRemy Horton #endif 63a8d0d473SBruce Richardson #ifdef RTE_LIB_LATENCYSTATS 6462d3216dSReshma Pattan #include <rte_latencystats.h> 6562d3216dSReshma Pattan #endif 66761f7ae1SJie Zhou #ifdef RTE_EXEC_ENV_WINDOWS 67761f7ae1SJie Zhou #include <process.h> 68761f7ae1SJie Zhou #endif 69af75078fSIntel 70af75078fSIntel #include "testpmd.h" 71af75078fSIntel 72c7f5dba7SAnatoly Burakov #ifndef MAP_HUGETLB 73c7f5dba7SAnatoly Burakov /* FreeBSD may not have MAP_HUGETLB (in fact, it probably doesn't) */ 74c7f5dba7SAnatoly Burakov #define HUGE_FLAG (0x40000) 75c7f5dba7SAnatoly Burakov #else 76c7f5dba7SAnatoly Burakov #define HUGE_FLAG MAP_HUGETLB 77c7f5dba7SAnatoly Burakov #endif 78c7f5dba7SAnatoly Burakov 79c7f5dba7SAnatoly Burakov #ifndef MAP_HUGE_SHIFT 80c7f5dba7SAnatoly Burakov /* older kernels (or FreeBSD) will not have this define */ 81c7f5dba7SAnatoly Burakov #define HUGE_SHIFT (26) 82c7f5dba7SAnatoly Burakov #else 83c7f5dba7SAnatoly Burakov #define HUGE_SHIFT MAP_HUGE_SHIFT 84c7f5dba7SAnatoly Burakov #endif 85c7f5dba7SAnatoly Burakov 86c7f5dba7SAnatoly Burakov #define EXTMEM_HEAP_NAME "extmem" 8772512e18SViacheslav Ovsiienko #define EXTBUF_ZONE_SIZE RTE_PGSIZE_2M 88c7f5dba7SAnatoly Burakov 89af75078fSIntel uint16_t verbose_level = 0; /**< Silent by default. */ 90285fd101SOlivier Matz int testpmd_logtype; /**< Log type for testpmd logs */ 91af75078fSIntel 92cb056611SStephen Hemminger /* use main core for command line ? */ 93af75078fSIntel uint8_t interactive = 0; 94ca7feb22SCyril Chemparathy uint8_t auto_start = 0; 9599cabef0SPablo de Lara uint8_t tx_first; 9681ef862bSAllain Legacy char cmdline_filename[PATH_MAX] = {0}; 97af75078fSIntel 98af75078fSIntel /* 99af75078fSIntel * NUMA support configuration. 100af75078fSIntel * When set, the NUMA support attempts to dispatch the allocation of the 101af75078fSIntel * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the 102af75078fSIntel * probed ports among the CPU sockets 0 and 1. 103af75078fSIntel * Otherwise, all memory is allocated from CPU socket 0. 104af75078fSIntel */ 105999b2ee0SBruce Richardson uint8_t numa_support = 1; /**< numa enabled by default */ 106af75078fSIntel 107af75078fSIntel /* 108b6ea6408SIntel * In UMA mode,all memory is allocated from socket 0 if --socket-num is 109b6ea6408SIntel * not configured. 110b6ea6408SIntel */ 111b6ea6408SIntel uint8_t socket_num = UMA_NO_CONFIG; 112b6ea6408SIntel 113b6ea6408SIntel /* 114c7f5dba7SAnatoly Burakov * Select mempool allocation type: 115c7f5dba7SAnatoly Burakov * - native: use regular DPDK memory 116c7f5dba7SAnatoly Burakov * - anon: use regular DPDK memory to create mempool, but populate using 117c7f5dba7SAnatoly Burakov * anonymous memory (may not be IOVA-contiguous) 118c7f5dba7SAnatoly Burakov * - xmem: use externally allocated hugepage memory 119148f963fSBruce Richardson */ 120c7f5dba7SAnatoly Burakov uint8_t mp_alloc_type = MP_ALLOC_NATIVE; 121148f963fSBruce Richardson 122148f963fSBruce Richardson /* 12363531389SGeorgios Katsikas * Store specified sockets on which memory pool to be used by ports 12463531389SGeorgios Katsikas * is allocated. 12563531389SGeorgios Katsikas */ 12663531389SGeorgios Katsikas uint8_t port_numa[RTE_MAX_ETHPORTS]; 12763531389SGeorgios Katsikas 12863531389SGeorgios Katsikas /* 12963531389SGeorgios Katsikas * Store specified sockets on which RX ring to be used by ports 13063531389SGeorgios Katsikas * is allocated. 13163531389SGeorgios Katsikas */ 13263531389SGeorgios Katsikas uint8_t rxring_numa[RTE_MAX_ETHPORTS]; 13363531389SGeorgios Katsikas 13463531389SGeorgios Katsikas /* 13563531389SGeorgios Katsikas * Store specified sockets on which TX ring to be used by ports 13663531389SGeorgios Katsikas * is allocated. 13763531389SGeorgios Katsikas */ 13863531389SGeorgios Katsikas uint8_t txring_numa[RTE_MAX_ETHPORTS]; 13963531389SGeorgios Katsikas 14063531389SGeorgios Katsikas /* 141af75078fSIntel * Record the Ethernet address of peer target ports to which packets are 142af75078fSIntel * forwarded. 143547d946cSNirmoy Das * Must be instantiated with the ethernet addresses of peer traffic generator 144af75078fSIntel * ports. 145af75078fSIntel */ 1466d13ea8eSOlivier Matz struct rte_ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS]; 147af75078fSIntel portid_t nb_peer_eth_addrs = 0; 148af75078fSIntel 149af75078fSIntel /* 150af75078fSIntel * Probed Target Environment. 151af75078fSIntel */ 152af75078fSIntel struct rte_port *ports; /**< For all probed ethernet ports. */ 153af75078fSIntel portid_t nb_ports; /**< Number of probed ethernet ports. */ 154af75078fSIntel struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */ 155af75078fSIntel lcoreid_t nb_lcores; /**< Number of probed logical cores. */ 156af75078fSIntel 1574918a357SXiaoyun Li portid_t ports_ids[RTE_MAX_ETHPORTS]; /**< Store all port ids. */ 1584918a357SXiaoyun Li 159af75078fSIntel /* 160af75078fSIntel * Test Forwarding Configuration. 161af75078fSIntel * nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores 162af75078fSIntel * nb_fwd_ports <= nb_cfg_ports <= nb_ports 163af75078fSIntel */ 164af75078fSIntel lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */ 165af75078fSIntel lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */ 166af75078fSIntel portid_t nb_cfg_ports; /**< Number of configured ports. */ 167af75078fSIntel portid_t nb_fwd_ports; /**< Number of forwarding ports. */ 168af75078fSIntel 169af75078fSIntel unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */ 170af75078fSIntel portid_t fwd_ports_ids[RTE_MAX_ETHPORTS]; /**< Port ids configuration. */ 171af75078fSIntel 172af75078fSIntel struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */ 173af75078fSIntel streamid_t nb_fwd_streams; /**< Is equal to (nb_ports * nb_rxq). */ 174af75078fSIntel 175af75078fSIntel /* 176af75078fSIntel * Forwarding engines. 177af75078fSIntel */ 178af75078fSIntel struct fwd_engine * fwd_engines[] = { 179af75078fSIntel &io_fwd_engine, 180af75078fSIntel &mac_fwd_engine, 181d47388f1SCyril Chemparathy &mac_swap_engine, 182e9e23a61SCyril Chemparathy &flow_gen_engine, 183af75078fSIntel &rx_only_engine, 184af75078fSIntel &tx_only_engine, 185af75078fSIntel &csum_fwd_engine, 186168dfa61SIvan Boule &icmp_echo_engine, 1873c156061SJens Freimann &noisy_vnf_engine, 1882564abdaSShiri Kuzin &five_tuple_swap_fwd_engine, 189af75078fSIntel #ifdef RTE_LIBRTE_IEEE1588 190af75078fSIntel &ieee1588_fwd_engine, 191af75078fSIntel #endif 19259840375SXueming Li &shared_rxq_engine, 193af75078fSIntel NULL, 194af75078fSIntel }; 195af75078fSIntel 19626cbb419SViacheslav Ovsiienko struct rte_mempool *mempools[RTE_MAX_NUMA_NODES * MAX_SEGS_BUFFER_SPLIT]; 19759fcf854SShahaf Shuler uint16_t mempool_flags; 198401b744dSShahaf Shuler 199af75078fSIntel struct fwd_config cur_fwd_config; 200af75078fSIntel struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */ 201bf56fce1SZhihong Wang uint32_t retry_enabled; 202bf56fce1SZhihong Wang uint32_t burst_tx_delay_time = BURST_TX_WAIT_US; 203bf56fce1SZhihong Wang uint32_t burst_tx_retry_num = BURST_TX_RETRIES; 204af75078fSIntel 20526cbb419SViacheslav Ovsiienko uint32_t mbuf_data_size_n = 1; /* Number of specified mbuf sizes. */ 20626cbb419SViacheslav Ovsiienko uint16_t mbuf_data_size[MAX_SEGS_BUFFER_SPLIT] = { 20726cbb419SViacheslav Ovsiienko DEFAULT_MBUF_DATA_SIZE 20826cbb419SViacheslav Ovsiienko }; /**< Mbuf data space size. */ 209c8798818SIntel uint32_t param_total_num_mbufs = 0; /**< number of mbufs in all pools - if 210c8798818SIntel * specified on command-line. */ 211cfea1f30SPablo de Lara uint16_t stats_period; /**< Period to show statistics (disabled by default) */ 212d9a191a0SPhil Yang 21363b72657SIvan Ilchenko /** Extended statistics to show. */ 21463b72657SIvan Ilchenko struct rte_eth_xstat_name *xstats_display; 21563b72657SIvan Ilchenko 21663b72657SIvan Ilchenko unsigned int xstats_display_num; /**< Size of extended statistics to show */ 21763b72657SIvan Ilchenko 218d9a191a0SPhil Yang /* 219d9a191a0SPhil Yang * In container, it cannot terminate the process which running with 'stats-period' 220d9a191a0SPhil Yang * option. Set flag to exit stats period loop after received SIGINT/SIGTERM. 221d9a191a0SPhil Yang */ 222d9a191a0SPhil Yang uint8_t f_quit; 223d9a191a0SPhil Yang 224af75078fSIntel /* 2251bb4a528SFerruh Yigit * Max Rx frame size, set by '--max-pkt-len' parameter. 2261bb4a528SFerruh Yigit */ 2271bb4a528SFerruh Yigit uint32_t max_rx_pkt_len; 2281bb4a528SFerruh Yigit 2291bb4a528SFerruh Yigit /* 2300f2096d7SViacheslav Ovsiienko * Configuration of packet segments used to scatter received packets 2310f2096d7SViacheslav Ovsiienko * if some of split features is configured. 2320f2096d7SViacheslav Ovsiienko */ 2330f2096d7SViacheslav Ovsiienko uint16_t rx_pkt_seg_lengths[MAX_SEGS_BUFFER_SPLIT]; 2340f2096d7SViacheslav Ovsiienko uint8_t rx_pkt_nb_segs; /**< Number of segments to split */ 23591c78e09SViacheslav Ovsiienko uint16_t rx_pkt_seg_offsets[MAX_SEGS_BUFFER_SPLIT]; 23691c78e09SViacheslav Ovsiienko uint8_t rx_pkt_nb_offs; /**< Number of specified offsets */ 2370f2096d7SViacheslav Ovsiienko 2380f2096d7SViacheslav Ovsiienko /* 239af75078fSIntel * Configuration of packet segments used by the "txonly" processing engine. 240af75078fSIntel */ 241af75078fSIntel uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */ 242af75078fSIntel uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = { 243af75078fSIntel TXONLY_DEF_PACKET_LEN, 244af75078fSIntel }; 245af75078fSIntel uint8_t tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */ 246af75078fSIntel 24779bec05bSKonstantin Ananyev enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF; 24879bec05bSKonstantin Ananyev /**< Split policy for packets to TX. */ 24979bec05bSKonstantin Ananyev 25082010ef5SYongseok Koh uint8_t txonly_multi_flow; 25182010ef5SYongseok Koh /**< Whether multiple flows are generated in TXONLY mode. */ 25282010ef5SYongseok Koh 2534940344dSViacheslav Ovsiienko uint32_t tx_pkt_times_inter; 2544940344dSViacheslav Ovsiienko /**< Timings for send scheduling in TXONLY mode, time between bursts. */ 2554940344dSViacheslav Ovsiienko 2564940344dSViacheslav Ovsiienko uint32_t tx_pkt_times_intra; 2574940344dSViacheslav Ovsiienko /**< Timings for send scheduling in TXONLY mode, time between packets. */ 2584940344dSViacheslav Ovsiienko 259af75078fSIntel uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */ 2606c02043eSIgor Russkikh uint16_t nb_pkt_flowgen_clones; /**< Number of Tx packet clones to send in flowgen mode. */ 261861e7684SZhihong Wang int nb_flows_flowgen = 1024; /**< Number of flows in flowgen mode. */ 262e9378bbcSCunming Liang uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */ 263af75078fSIntel 264900550deSIntel /* current configuration is in DCB or not,0 means it is not in DCB mode */ 265900550deSIntel uint8_t dcb_config = 0; 266900550deSIntel 267af75078fSIntel /* 268af75078fSIntel * Configurable number of RX/TX queues. 269af75078fSIntel */ 2701c69df45SOri Kam queueid_t nb_hairpinq; /**< Number of hairpin queues per port. */ 271af75078fSIntel queueid_t nb_rxq = 1; /**< Number of RX queues per port. */ 272af75078fSIntel queueid_t nb_txq = 1; /**< Number of TX queues per port. */ 273af75078fSIntel 274af75078fSIntel /* 275af75078fSIntel * Configurable number of RX/TX ring descriptors. 2768599ed31SRemy Horton * Defaults are supplied by drivers via ethdev. 277af75078fSIntel */ 2788599ed31SRemy Horton #define RTE_TEST_RX_DESC_DEFAULT 0 2798599ed31SRemy Horton #define RTE_TEST_TX_DESC_DEFAULT 0 280af75078fSIntel uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */ 281af75078fSIntel uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */ 282af75078fSIntel 283f2c5125aSPablo de Lara #define RTE_PMD_PARAM_UNSET -1 284af75078fSIntel /* 285af75078fSIntel * Configurable values of RX and TX ring threshold registers. 286af75078fSIntel */ 287af75078fSIntel 288f2c5125aSPablo de Lara int8_t rx_pthresh = RTE_PMD_PARAM_UNSET; 289f2c5125aSPablo de Lara int8_t rx_hthresh = RTE_PMD_PARAM_UNSET; 290f2c5125aSPablo de Lara int8_t rx_wthresh = RTE_PMD_PARAM_UNSET; 291af75078fSIntel 292f2c5125aSPablo de Lara int8_t tx_pthresh = RTE_PMD_PARAM_UNSET; 293f2c5125aSPablo de Lara int8_t tx_hthresh = RTE_PMD_PARAM_UNSET; 294f2c5125aSPablo de Lara int8_t tx_wthresh = RTE_PMD_PARAM_UNSET; 295af75078fSIntel 296af75078fSIntel /* 297af75078fSIntel * Configurable value of RX free threshold. 298af75078fSIntel */ 299f2c5125aSPablo de Lara int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET; 300af75078fSIntel 301af75078fSIntel /* 302ce8d5614SIntel * Configurable value of RX drop enable. 303ce8d5614SIntel */ 304f2c5125aSPablo de Lara int8_t rx_drop_en = RTE_PMD_PARAM_UNSET; 305ce8d5614SIntel 306ce8d5614SIntel /* 307af75078fSIntel * Configurable value of TX free threshold. 308af75078fSIntel */ 309f2c5125aSPablo de Lara int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET; 310af75078fSIntel 311af75078fSIntel /* 312af75078fSIntel * Configurable value of TX RS bit threshold. 313af75078fSIntel */ 314f2c5125aSPablo de Lara int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET; 315af75078fSIntel 316af75078fSIntel /* 3173c156061SJens Freimann * Configurable value of buffered packets before sending. 3183c156061SJens Freimann */ 3193c156061SJens Freimann uint16_t noisy_tx_sw_bufsz; 3203c156061SJens Freimann 3213c156061SJens Freimann /* 3223c156061SJens Freimann * Configurable value of packet buffer timeout. 3233c156061SJens Freimann */ 3243c156061SJens Freimann uint16_t noisy_tx_sw_buf_flush_time; 3253c156061SJens Freimann 3263c156061SJens Freimann /* 3273c156061SJens Freimann * Configurable value for size of VNF internal memory area 3283c156061SJens Freimann * used for simulating noisy neighbour behaviour 3293c156061SJens Freimann */ 3303c156061SJens Freimann uint64_t noisy_lkup_mem_sz; 3313c156061SJens Freimann 3323c156061SJens Freimann /* 3333c156061SJens Freimann * Configurable value of number of random writes done in 3343c156061SJens Freimann * VNF simulation memory area. 3353c156061SJens Freimann */ 3363c156061SJens Freimann uint64_t noisy_lkup_num_writes; 3373c156061SJens Freimann 3383c156061SJens Freimann /* 3393c156061SJens Freimann * Configurable value of number of random reads done in 3403c156061SJens Freimann * VNF simulation memory area. 3413c156061SJens Freimann */ 3423c156061SJens Freimann uint64_t noisy_lkup_num_reads; 3433c156061SJens Freimann 3443c156061SJens Freimann /* 3453c156061SJens Freimann * Configurable value of number of random reads/writes done in 3463c156061SJens Freimann * VNF simulation memory area. 3473c156061SJens Freimann */ 3483c156061SJens Freimann uint64_t noisy_lkup_num_reads_writes; 3493c156061SJens Freimann 3503c156061SJens Freimann /* 351af75078fSIntel * Receive Side Scaling (RSS) configuration. 352af75078fSIntel */ 353295968d1SFerruh Yigit uint64_t rss_hf = RTE_ETH_RSS_IP; /* RSS IP by default. */ 354af75078fSIntel 355af75078fSIntel /* 356af75078fSIntel * Port topology configuration 357af75078fSIntel */ 358af75078fSIntel uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */ 359af75078fSIntel 3607741e4cfSIntel /* 3617741e4cfSIntel * Avoids to flush all the RX streams before starts forwarding. 3627741e4cfSIntel */ 3637741e4cfSIntel uint8_t no_flush_rx = 0; /* flush by default */ 3647741e4cfSIntel 365af75078fSIntel /* 3667ee3e944SVasily Philipov * Flow API isolated mode. 3677ee3e944SVasily Philipov */ 3687ee3e944SVasily Philipov uint8_t flow_isolate_all; 3697ee3e944SVasily Philipov 3707ee3e944SVasily Philipov /* 371bc202406SDavid Marchand * Avoids to check link status when starting/stopping a port. 372bc202406SDavid Marchand */ 373bc202406SDavid Marchand uint8_t no_link_check = 0; /* check by default */ 374bc202406SDavid Marchand 375bc202406SDavid Marchand /* 3766937d210SStephen Hemminger * Don't automatically start all ports in interactive mode. 3776937d210SStephen Hemminger */ 3786937d210SStephen Hemminger uint8_t no_device_start = 0; 3796937d210SStephen Hemminger 3806937d210SStephen Hemminger /* 3818ea656f8SGaetan Rivet * Enable link status change notification 3828ea656f8SGaetan Rivet */ 3838ea656f8SGaetan Rivet uint8_t lsc_interrupt = 1; /* enabled by default */ 3848ea656f8SGaetan Rivet 3858ea656f8SGaetan Rivet /* 386284c908cSGaetan Rivet * Enable device removal notification. 387284c908cSGaetan Rivet */ 388284c908cSGaetan Rivet uint8_t rmv_interrupt = 1; /* enabled by default */ 389284c908cSGaetan Rivet 390fb73e096SJeff Guo uint8_t hot_plug = 0; /**< hotplug disabled by default. */ 391fb73e096SJeff Guo 3924f1ed78eSThomas Monjalon /* After attach, port setup is called on event or by iterator */ 3934f1ed78eSThomas Monjalon bool setup_on_probe_event = true; 3944f1ed78eSThomas Monjalon 395b0a9354aSPavan Nikhilesh /* Clear ptypes on port initialization. */ 396b0a9354aSPavan Nikhilesh uint8_t clear_ptypes = true; 397b0a9354aSPavan Nikhilesh 39801817b10SBing Zhao /* Hairpin ports configuration mode. */ 39901817b10SBing Zhao uint16_t hairpin_mode; 40001817b10SBing Zhao 40197b5d8b5SThomas Monjalon /* Pretty printing of ethdev events */ 40297b5d8b5SThomas Monjalon static const char * const eth_event_desc[] = { 40397b5d8b5SThomas Monjalon [RTE_ETH_EVENT_UNKNOWN] = "unknown", 40497b5d8b5SThomas Monjalon [RTE_ETH_EVENT_INTR_LSC] = "link state change", 40597b5d8b5SThomas Monjalon [RTE_ETH_EVENT_QUEUE_STATE] = "queue state", 40697b5d8b5SThomas Monjalon [RTE_ETH_EVENT_INTR_RESET] = "reset", 40797b5d8b5SThomas Monjalon [RTE_ETH_EVENT_VF_MBOX] = "VF mbox", 40897b5d8b5SThomas Monjalon [RTE_ETH_EVENT_IPSEC] = "IPsec", 40997b5d8b5SThomas Monjalon [RTE_ETH_EVENT_MACSEC] = "MACsec", 41097b5d8b5SThomas Monjalon [RTE_ETH_EVENT_INTR_RMV] = "device removal", 41197b5d8b5SThomas Monjalon [RTE_ETH_EVENT_NEW] = "device probed", 41297b5d8b5SThomas Monjalon [RTE_ETH_EVENT_DESTROY] = "device released", 4130e459ffaSDong Zhou [RTE_ETH_EVENT_FLOW_AGED] = "flow aged", 41497b5d8b5SThomas Monjalon [RTE_ETH_EVENT_MAX] = NULL, 41597b5d8b5SThomas Monjalon }; 41697b5d8b5SThomas Monjalon 417284c908cSGaetan Rivet /* 4183af72783SGaetan Rivet * Display or mask ether events 4193af72783SGaetan Rivet * Default to all events except VF_MBOX 4203af72783SGaetan Rivet */ 4213af72783SGaetan Rivet uint32_t event_print_mask = (UINT32_C(1) << RTE_ETH_EVENT_UNKNOWN) | 4223af72783SGaetan Rivet (UINT32_C(1) << RTE_ETH_EVENT_INTR_LSC) | 4233af72783SGaetan Rivet (UINT32_C(1) << RTE_ETH_EVENT_QUEUE_STATE) | 4243af72783SGaetan Rivet (UINT32_C(1) << RTE_ETH_EVENT_INTR_RESET) | 425badb87c1SAnoob Joseph (UINT32_C(1) << RTE_ETH_EVENT_IPSEC) | 4263af72783SGaetan Rivet (UINT32_C(1) << RTE_ETH_EVENT_MACSEC) | 4270e459ffaSDong Zhou (UINT32_C(1) << RTE_ETH_EVENT_INTR_RMV) | 4280e459ffaSDong Zhou (UINT32_C(1) << RTE_ETH_EVENT_FLOW_AGED); 429e505d84cSAnatoly Burakov /* 430e505d84cSAnatoly Burakov * Decide if all memory are locked for performance. 431e505d84cSAnatoly Burakov */ 432e505d84cSAnatoly Burakov int do_mlockall = 0; 4333af72783SGaetan Rivet 4343af72783SGaetan Rivet /* 4357b7e5ba7SIntel * NIC bypass mode configuration options. 4367b7e5ba7SIntel */ 4377b7e5ba7SIntel 438a8d0d473SBruce Richardson #if defined RTE_NET_IXGBE && defined RTE_LIBRTE_IXGBE_BYPASS 4397b7e5ba7SIntel /* The NIC bypass watchdog timeout. */ 440e261265eSRadu Nicolau uint32_t bypass_timeout = RTE_PMD_IXGBE_BYPASS_TMT_OFF; 4417b7e5ba7SIntel #endif 4427b7e5ba7SIntel 443e261265eSRadu Nicolau 444a8d0d473SBruce Richardson #ifdef RTE_LIB_LATENCYSTATS 44562d3216dSReshma Pattan 44662d3216dSReshma Pattan /* 44762d3216dSReshma Pattan * Set when latency stats is enabled in the commandline 44862d3216dSReshma Pattan */ 44962d3216dSReshma Pattan uint8_t latencystats_enabled; 45062d3216dSReshma Pattan 45162d3216dSReshma Pattan /* 45262d3216dSReshma Pattan * Lcore ID to serive latency statistics. 45362d3216dSReshma Pattan */ 45462d3216dSReshma Pattan lcoreid_t latencystats_lcore_id = -1; 45562d3216dSReshma Pattan 45662d3216dSReshma Pattan #endif 45762d3216dSReshma Pattan 4587b7e5ba7SIntel /* 459af75078fSIntel * Ethernet device configuration. 460af75078fSIntel */ 4611bb4a528SFerruh Yigit struct rte_eth_rxmode rx_mode; 462af75078fSIntel 46307e5f7bdSShahaf Shuler struct rte_eth_txmode tx_mode = { 464295968d1SFerruh Yigit .offloads = RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE, 46507e5f7bdSShahaf Shuler }; 466fd8c20aaSShahaf Shuler 467295968d1SFerruh Yigit struct rte_eth_fdir_conf fdir_conf = { 468af75078fSIntel .mode = RTE_FDIR_MODE_NONE, 469295968d1SFerruh Yigit .pballoc = RTE_ETH_FDIR_PBALLOC_64K, 470af75078fSIntel .status = RTE_FDIR_REPORT_STATUS, 471d9d5e6f2SJingjing Wu .mask = { 47226f579aaSWei Zhao .vlan_tci_mask = 0xFFEF, 473d9d5e6f2SJingjing Wu .ipv4_mask = { 474d9d5e6f2SJingjing Wu .src_ip = 0xFFFFFFFF, 475d9d5e6f2SJingjing Wu .dst_ip = 0xFFFFFFFF, 476d9d5e6f2SJingjing Wu }, 477d9d5e6f2SJingjing Wu .ipv6_mask = { 478d9d5e6f2SJingjing Wu .src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF}, 479d9d5e6f2SJingjing Wu .dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF}, 480d9d5e6f2SJingjing Wu }, 481d9d5e6f2SJingjing Wu .src_port_mask = 0xFFFF, 482d9d5e6f2SJingjing Wu .dst_port_mask = 0xFFFF, 48347b3ac6bSWenzhuo Lu .mac_addr_byte_mask = 0xFF, 48447b3ac6bSWenzhuo Lu .tunnel_type_mask = 1, 48547b3ac6bSWenzhuo Lu .tunnel_id_mask = 0xFFFFFFFF, 486d9d5e6f2SJingjing Wu }, 487af75078fSIntel .drop_queue = 127, 488af75078fSIntel }; 489af75078fSIntel 4902950a769SDeclan Doherty volatile int test_done = 1; /* stop packet forwarding when set to 1. */ 491af75078fSIntel 492a4fd5eeeSElza Mathew /* 493a4fd5eeeSElza Mathew * Display zero values by default for xstats 494a4fd5eeeSElza Mathew */ 495a4fd5eeeSElza Mathew uint8_t xstats_hide_zero; 496a4fd5eeeSElza Mathew 497bc700b67SDharmik Thakkar /* 498bc700b67SDharmik Thakkar * Measure of CPU cycles disabled by default 499bc700b67SDharmik Thakkar */ 500bc700b67SDharmik Thakkar uint8_t record_core_cycles; 501bc700b67SDharmik Thakkar 5020e4b1963SDharmik Thakkar /* 5030e4b1963SDharmik Thakkar * Display of RX and TX bursts disabled by default 5040e4b1963SDharmik Thakkar */ 5050e4b1963SDharmik Thakkar uint8_t record_burst_stats; 5060e4b1963SDharmik Thakkar 507f4d178c1SXueming Li /* 508f4d178c1SXueming Li * Number of ports per shared Rx queue group, 0 disable. 509f4d178c1SXueming Li */ 510f4d178c1SXueming Li uint32_t rxq_share; 511f4d178c1SXueming Li 512c9cafcc8SShahaf Shuler unsigned int num_sockets = 0; 513c9cafcc8SShahaf Shuler unsigned int socket_ids[RTE_MAX_NUMA_NODES]; 5147acf894dSStephen Hurd 515a8d0d473SBruce Richardson #ifdef RTE_LIB_BITRATESTATS 5167e4441c8SRemy Horton /* Bitrate statistics */ 5177e4441c8SRemy Horton struct rte_stats_bitrates *bitrate_data; 518e25e6c70SRemy Horton lcoreid_t bitrate_lcore_id; 519e25e6c70SRemy Horton uint8_t bitrate_enabled; 520e25e6c70SRemy Horton #endif 5217e4441c8SRemy Horton 5226970401eSDavid Marchand #ifdef RTE_LIB_GRO 523b40f8d78SJiayu Hu struct gro_status gro_ports[RTE_MAX_ETHPORTS]; 524b7091f1dSJiayu Hu uint8_t gro_flush_cycles = GRO_DEFAULT_FLUSH_CYCLES; 5256970401eSDavid Marchand #endif 526b40f8d78SJiayu Hu 527f9295aa2SXiaoyu Min /* 528f9295aa2SXiaoyu Min * hexadecimal bitmask of RX mq mode can be enabled. 529f9295aa2SXiaoyu Min */ 530295968d1SFerruh Yigit enum rte_eth_rx_mq_mode rx_mq_mode = RTE_ETH_MQ_RX_VMDQ_DCB_RSS; 531f9295aa2SXiaoyu Min 532b7b78a08SAjit Khaparde /* 533b7b78a08SAjit Khaparde * Used to set forced link speed 534b7b78a08SAjit Khaparde */ 535b7b78a08SAjit Khaparde uint32_t eth_link_speed; 536b7b78a08SAjit Khaparde 537a550baf2SMin Hu (Connor) /* 538a550baf2SMin Hu (Connor) * ID of the current process in multi-process, used to 539a550baf2SMin Hu (Connor) * configure the queues to be polled. 540a550baf2SMin Hu (Connor) */ 541a550baf2SMin Hu (Connor) int proc_id; 542a550baf2SMin Hu (Connor) 543a550baf2SMin Hu (Connor) /* 544a550baf2SMin Hu (Connor) * Number of processes in multi-process, used to 545a550baf2SMin Hu (Connor) * configure the queues to be polled. 546a550baf2SMin Hu (Connor) */ 547a550baf2SMin Hu (Connor) unsigned int num_procs = 1; 548a550baf2SMin Hu (Connor) 549f6d8a6d3SIvan Malov static void 550f6d8a6d3SIvan Malov eth_rx_metadata_negotiate_mp(uint16_t port_id) 551f6d8a6d3SIvan Malov { 552f6d8a6d3SIvan Malov uint64_t rx_meta_features = 0; 553f6d8a6d3SIvan Malov int ret; 554f6d8a6d3SIvan Malov 555f6d8a6d3SIvan Malov if (!is_proc_primary()) 556f6d8a6d3SIvan Malov return; 557f6d8a6d3SIvan Malov 558f6d8a6d3SIvan Malov rx_meta_features |= RTE_ETH_RX_METADATA_USER_FLAG; 559f6d8a6d3SIvan Malov rx_meta_features |= RTE_ETH_RX_METADATA_USER_MARK; 560f6d8a6d3SIvan Malov rx_meta_features |= RTE_ETH_RX_METADATA_TUNNEL_ID; 561f6d8a6d3SIvan Malov 562f6d8a6d3SIvan Malov ret = rte_eth_rx_metadata_negotiate(port_id, &rx_meta_features); 563f6d8a6d3SIvan Malov if (ret == 0) { 564f6d8a6d3SIvan Malov if (!(rx_meta_features & RTE_ETH_RX_METADATA_USER_FLAG)) { 565f6d8a6d3SIvan Malov TESTPMD_LOG(DEBUG, "Flow action FLAG will not affect Rx mbufs on port %u\n", 566f6d8a6d3SIvan Malov port_id); 567f6d8a6d3SIvan Malov } 568f6d8a6d3SIvan Malov 569f6d8a6d3SIvan Malov if (!(rx_meta_features & RTE_ETH_RX_METADATA_USER_MARK)) { 570f6d8a6d3SIvan Malov TESTPMD_LOG(DEBUG, "Flow action MARK will not affect Rx mbufs on port %u\n", 571f6d8a6d3SIvan Malov port_id); 572f6d8a6d3SIvan Malov } 573f6d8a6d3SIvan Malov 574f6d8a6d3SIvan Malov if (!(rx_meta_features & RTE_ETH_RX_METADATA_TUNNEL_ID)) { 575f6d8a6d3SIvan Malov TESTPMD_LOG(DEBUG, "Flow tunnel offload support might be limited or unavailable on port %u\n", 576f6d8a6d3SIvan Malov port_id); 577f6d8a6d3SIvan Malov } 578f6d8a6d3SIvan Malov } else if (ret != -ENOTSUP) { 579f6d8a6d3SIvan Malov rte_exit(EXIT_FAILURE, "Error when negotiating Rx meta features on port %u: %s\n", 580f6d8a6d3SIvan Malov port_id, rte_strerror(-ret)); 581f6d8a6d3SIvan Malov } 582f6d8a6d3SIvan Malov } 583f6d8a6d3SIvan Malov 5841179f05cSIvan Malov static void 5851179f05cSIvan Malov flow_pick_transfer_proxy_mp(uint16_t port_id) 5861179f05cSIvan Malov { 5871179f05cSIvan Malov struct rte_port *port = &ports[port_id]; 5881179f05cSIvan Malov int ret; 5891179f05cSIvan Malov 5901179f05cSIvan Malov port->flow_transfer_proxy = port_id; 5911179f05cSIvan Malov 5921179f05cSIvan Malov if (!is_proc_primary()) 5931179f05cSIvan Malov return; 5941179f05cSIvan Malov 5951179f05cSIvan Malov ret = rte_flow_pick_transfer_proxy(port_id, &port->flow_transfer_proxy, 5961179f05cSIvan Malov NULL); 5971179f05cSIvan Malov if (ret != 0) { 5981179f05cSIvan Malov fprintf(stderr, "Error picking flow transfer proxy for port %u: %s - ignore\n", 5991179f05cSIvan Malov port_id, rte_strerror(-ret)); 6001179f05cSIvan Malov } 6011179f05cSIvan Malov } 6021179f05cSIvan Malov 603a550baf2SMin Hu (Connor) static int 604a550baf2SMin Hu (Connor) eth_dev_configure_mp(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q, 605a550baf2SMin Hu (Connor) const struct rte_eth_conf *dev_conf) 606a550baf2SMin Hu (Connor) { 607a550baf2SMin Hu (Connor) if (is_proc_primary()) 608a550baf2SMin Hu (Connor) return rte_eth_dev_configure(port_id, nb_rx_q, nb_tx_q, 609a550baf2SMin Hu (Connor) dev_conf); 610a550baf2SMin Hu (Connor) return 0; 611a550baf2SMin Hu (Connor) } 612a550baf2SMin Hu (Connor) 613a550baf2SMin Hu (Connor) static int 614a550baf2SMin Hu (Connor) eth_dev_start_mp(uint16_t port_id) 615a550baf2SMin Hu (Connor) { 616a550baf2SMin Hu (Connor) if (is_proc_primary()) 617a550baf2SMin Hu (Connor) return rte_eth_dev_start(port_id); 618a550baf2SMin Hu (Connor) 619a550baf2SMin Hu (Connor) return 0; 620a550baf2SMin Hu (Connor) } 621a550baf2SMin Hu (Connor) 622a550baf2SMin Hu (Connor) static int 623a550baf2SMin Hu (Connor) eth_dev_stop_mp(uint16_t port_id) 624a550baf2SMin Hu (Connor) { 625a550baf2SMin Hu (Connor) if (is_proc_primary()) 626a550baf2SMin Hu (Connor) return rte_eth_dev_stop(port_id); 627a550baf2SMin Hu (Connor) 628a550baf2SMin Hu (Connor) return 0; 629a550baf2SMin Hu (Connor) } 630a550baf2SMin Hu (Connor) 631a550baf2SMin Hu (Connor) static void 632a550baf2SMin Hu (Connor) mempool_free_mp(struct rte_mempool *mp) 633a550baf2SMin Hu (Connor) { 634a550baf2SMin Hu (Connor) if (is_proc_primary()) 635a550baf2SMin Hu (Connor) rte_mempool_free(mp); 636a550baf2SMin Hu (Connor) } 637a550baf2SMin Hu (Connor) 638a550baf2SMin Hu (Connor) static int 639a550baf2SMin Hu (Connor) eth_dev_set_mtu_mp(uint16_t port_id, uint16_t mtu) 640a550baf2SMin Hu (Connor) { 641a550baf2SMin Hu (Connor) if (is_proc_primary()) 642a550baf2SMin Hu (Connor) return rte_eth_dev_set_mtu(port_id, mtu); 643a550baf2SMin Hu (Connor) 644a550baf2SMin Hu (Connor) return 0; 645a550baf2SMin Hu (Connor) } 646a550baf2SMin Hu (Connor) 647ed30d9b6SIntel /* Forward function declarations */ 648c9cce428SThomas Monjalon static void setup_attached_port(portid_t pi); 649edab33b1STetsuya Mukawa static void check_all_ports_link_status(uint32_t port_mask); 650f8244c63SZhiyong Yang static int eth_event_callback(portid_t port_id, 65176ad4a2dSGaetan Rivet enum rte_eth_event_type type, 652d6af1a13SBernard Iremonger void *param, void *ret_param); 653cc1bf307SJeff Guo static void dev_event_callback(const char *device_name, 654fb73e096SJeff Guo enum rte_dev_event_type type, 655fb73e096SJeff Guo void *param); 65663b72657SIvan Ilchenko static void fill_xstats_display_info(void); 657ce8d5614SIntel 658ce8d5614SIntel /* 659ce8d5614SIntel * Check if all the ports are started. 660ce8d5614SIntel * If yes, return positive value. If not, return zero. 661ce8d5614SIntel */ 662ce8d5614SIntel static int all_ports_started(void); 663ed30d9b6SIntel 6646970401eSDavid Marchand #ifdef RTE_LIB_GSO 66552f38a20SJiayu Hu struct gso_status gso_ports[RTE_MAX_ETHPORTS]; 66635b2d13fSOlivier Matz uint16_t gso_max_segment_size = RTE_ETHER_MAX_LEN - RTE_ETHER_CRC_LEN; 6676970401eSDavid Marchand #endif 66852f38a20SJiayu Hu 669b57b66a9SOri Kam /* Holds the registered mbuf dynamic flags names. */ 670b57b66a9SOri Kam char dynf_names[64][RTE_MBUF_DYN_NAMESIZE]; 671b57b66a9SOri Kam 67263b72657SIvan Ilchenko 673af75078fSIntel /* 67498a7ea33SJerin Jacob * Helper function to check if socket is already discovered. 675c9cafcc8SShahaf Shuler * If yes, return positive value. If not, return zero. 676c9cafcc8SShahaf Shuler */ 677c9cafcc8SShahaf Shuler int 678c9cafcc8SShahaf Shuler new_socket_id(unsigned int socket_id) 679c9cafcc8SShahaf Shuler { 680c9cafcc8SShahaf Shuler unsigned int i; 681c9cafcc8SShahaf Shuler 682c9cafcc8SShahaf Shuler for (i = 0; i < num_sockets; i++) { 683c9cafcc8SShahaf Shuler if (socket_ids[i] == socket_id) 684c9cafcc8SShahaf Shuler return 0; 685c9cafcc8SShahaf Shuler } 686c9cafcc8SShahaf Shuler return 1; 687c9cafcc8SShahaf Shuler } 688c9cafcc8SShahaf Shuler 689c9cafcc8SShahaf Shuler /* 690af75078fSIntel * Setup default configuration. 691af75078fSIntel */ 692af75078fSIntel static void 693af75078fSIntel set_default_fwd_lcores_config(void) 694af75078fSIntel { 695af75078fSIntel unsigned int i; 696af75078fSIntel unsigned int nb_lc; 6977acf894dSStephen Hurd unsigned int sock_num; 698af75078fSIntel 699af75078fSIntel nb_lc = 0; 700af75078fSIntel for (i = 0; i < RTE_MAX_LCORE; i++) { 701dbfb8ec7SPhil Yang if (!rte_lcore_is_enabled(i)) 702dbfb8ec7SPhil Yang continue; 703c9cafcc8SShahaf Shuler sock_num = rte_lcore_to_socket_id(i); 704c9cafcc8SShahaf Shuler if (new_socket_id(sock_num)) { 705c9cafcc8SShahaf Shuler if (num_sockets >= RTE_MAX_NUMA_NODES) { 706c9cafcc8SShahaf Shuler rte_exit(EXIT_FAILURE, 707c9cafcc8SShahaf Shuler "Total sockets greater than %u\n", 708c9cafcc8SShahaf Shuler RTE_MAX_NUMA_NODES); 709c9cafcc8SShahaf Shuler } 710c9cafcc8SShahaf Shuler socket_ids[num_sockets++] = sock_num; 7117acf894dSStephen Hurd } 712cb056611SStephen Hemminger if (i == rte_get_main_lcore()) 713f54fe5eeSStephen Hurd continue; 714f54fe5eeSStephen Hurd fwd_lcores_cpuids[nb_lc++] = i; 715af75078fSIntel } 716af75078fSIntel nb_lcores = (lcoreid_t) nb_lc; 717af75078fSIntel nb_cfg_lcores = nb_lcores; 718af75078fSIntel nb_fwd_lcores = 1; 719af75078fSIntel } 720af75078fSIntel 721af75078fSIntel static void 722af75078fSIntel set_def_peer_eth_addrs(void) 723af75078fSIntel { 724af75078fSIntel portid_t i; 725af75078fSIntel 726af75078fSIntel for (i = 0; i < RTE_MAX_ETHPORTS; i++) { 72735b2d13fSOlivier Matz peer_eth_addrs[i].addr_bytes[0] = RTE_ETHER_LOCAL_ADMIN_ADDR; 728af75078fSIntel peer_eth_addrs[i].addr_bytes[5] = i; 729af75078fSIntel } 730af75078fSIntel } 731af75078fSIntel 732af75078fSIntel static void 733af75078fSIntel set_default_fwd_ports_config(void) 734af75078fSIntel { 735af75078fSIntel portid_t pt_id; 73665a7360cSMatan Azrad int i = 0; 737af75078fSIntel 738effdb8bbSPhil Yang RTE_ETH_FOREACH_DEV(pt_id) { 73965a7360cSMatan Azrad fwd_ports_ids[i++] = pt_id; 740af75078fSIntel 741effdb8bbSPhil Yang /* Update sockets info according to the attached device */ 742effdb8bbSPhil Yang int socket_id = rte_eth_dev_socket_id(pt_id); 743effdb8bbSPhil Yang if (socket_id >= 0 && new_socket_id(socket_id)) { 744effdb8bbSPhil Yang if (num_sockets >= RTE_MAX_NUMA_NODES) { 745effdb8bbSPhil Yang rte_exit(EXIT_FAILURE, 746effdb8bbSPhil Yang "Total sockets greater than %u\n", 747effdb8bbSPhil Yang RTE_MAX_NUMA_NODES); 748effdb8bbSPhil Yang } 749effdb8bbSPhil Yang socket_ids[num_sockets++] = socket_id; 750effdb8bbSPhil Yang } 751effdb8bbSPhil Yang } 752effdb8bbSPhil Yang 753af75078fSIntel nb_cfg_ports = nb_ports; 754af75078fSIntel nb_fwd_ports = nb_ports; 755af75078fSIntel } 756af75078fSIntel 757af75078fSIntel void 758af75078fSIntel set_def_fwd_config(void) 759af75078fSIntel { 760af75078fSIntel set_default_fwd_lcores_config(); 761af75078fSIntel set_def_peer_eth_addrs(); 762af75078fSIntel set_default_fwd_ports_config(); 763af75078fSIntel } 764af75078fSIntel 765761f7ae1SJie Zhou #ifndef RTE_EXEC_ENV_WINDOWS 766c7f5dba7SAnatoly Burakov /* extremely pessimistic estimation of memory required to create a mempool */ 767c7f5dba7SAnatoly Burakov static int 768c7f5dba7SAnatoly Burakov calc_mem_size(uint32_t nb_mbufs, uint32_t mbuf_sz, size_t pgsz, size_t *out) 769c7f5dba7SAnatoly Burakov { 770c7f5dba7SAnatoly Burakov unsigned int n_pages, mbuf_per_pg, leftover; 771c7f5dba7SAnatoly Burakov uint64_t total_mem, mbuf_mem, obj_sz; 772c7f5dba7SAnatoly Burakov 773c7f5dba7SAnatoly Burakov /* there is no good way to predict how much space the mempool will 774c7f5dba7SAnatoly Burakov * occupy because it will allocate chunks on the fly, and some of those 775c7f5dba7SAnatoly Burakov * will come from default DPDK memory while some will come from our 776c7f5dba7SAnatoly Burakov * external memory, so just assume 128MB will be enough for everyone. 777c7f5dba7SAnatoly Burakov */ 778c7f5dba7SAnatoly Burakov uint64_t hdr_mem = 128 << 20; 779c7f5dba7SAnatoly Burakov 780c7f5dba7SAnatoly Burakov /* account for possible non-contiguousness */ 781c7f5dba7SAnatoly Burakov obj_sz = rte_mempool_calc_obj_size(mbuf_sz, 0, NULL); 782c7f5dba7SAnatoly Burakov if (obj_sz > pgsz) { 783c7f5dba7SAnatoly Burakov TESTPMD_LOG(ERR, "Object size is bigger than page size\n"); 784c7f5dba7SAnatoly Burakov return -1; 785c7f5dba7SAnatoly Burakov } 786c7f5dba7SAnatoly Burakov 787c7f5dba7SAnatoly Burakov mbuf_per_pg = pgsz / obj_sz; 788c7f5dba7SAnatoly Burakov leftover = (nb_mbufs % mbuf_per_pg) > 0; 789c7f5dba7SAnatoly Burakov n_pages = (nb_mbufs / mbuf_per_pg) + leftover; 790c7f5dba7SAnatoly Burakov 791c7f5dba7SAnatoly Burakov mbuf_mem = n_pages * pgsz; 792c7f5dba7SAnatoly Burakov 793c7f5dba7SAnatoly Burakov total_mem = RTE_ALIGN(hdr_mem + mbuf_mem, pgsz); 794c7f5dba7SAnatoly Burakov 795c7f5dba7SAnatoly Burakov if (total_mem > SIZE_MAX) { 796c7f5dba7SAnatoly Burakov TESTPMD_LOG(ERR, "Memory size too big\n"); 797c7f5dba7SAnatoly Burakov return -1; 798c7f5dba7SAnatoly Burakov } 799c7f5dba7SAnatoly Burakov *out = (size_t)total_mem; 800c7f5dba7SAnatoly Burakov 801c7f5dba7SAnatoly Burakov return 0; 802c7f5dba7SAnatoly Burakov } 803c7f5dba7SAnatoly Burakov 804c7f5dba7SAnatoly Burakov static int 805c7f5dba7SAnatoly Burakov pagesz_flags(uint64_t page_sz) 806c7f5dba7SAnatoly Burakov { 807c7f5dba7SAnatoly Burakov /* as per mmap() manpage, all page sizes are log2 of page size 808c7f5dba7SAnatoly Burakov * shifted by MAP_HUGE_SHIFT 809c7f5dba7SAnatoly Burakov */ 8109d650537SAnatoly Burakov int log2 = rte_log2_u64(page_sz); 811c7f5dba7SAnatoly Burakov 812c7f5dba7SAnatoly Burakov return (log2 << HUGE_SHIFT); 813c7f5dba7SAnatoly Burakov } 814c7f5dba7SAnatoly Burakov 815c7f5dba7SAnatoly Burakov static void * 816c7f5dba7SAnatoly Burakov alloc_mem(size_t memsz, size_t pgsz, bool huge) 817c7f5dba7SAnatoly Burakov { 818c7f5dba7SAnatoly Burakov void *addr; 819c7f5dba7SAnatoly Burakov int flags; 820c7f5dba7SAnatoly Burakov 821c7f5dba7SAnatoly Burakov /* allocate anonymous hugepages */ 822c7f5dba7SAnatoly Burakov flags = MAP_ANONYMOUS | MAP_PRIVATE; 823c7f5dba7SAnatoly Burakov if (huge) 824c7f5dba7SAnatoly Burakov flags |= HUGE_FLAG | pagesz_flags(pgsz); 825c7f5dba7SAnatoly Burakov 826c7f5dba7SAnatoly Burakov addr = mmap(NULL, memsz, PROT_READ | PROT_WRITE, flags, -1, 0); 827c7f5dba7SAnatoly Burakov if (addr == MAP_FAILED) 828c7f5dba7SAnatoly Burakov return NULL; 829c7f5dba7SAnatoly Burakov 830c7f5dba7SAnatoly Burakov return addr; 831c7f5dba7SAnatoly Burakov } 832c7f5dba7SAnatoly Burakov 833c7f5dba7SAnatoly Burakov struct extmem_param { 834c7f5dba7SAnatoly Burakov void *addr; 835c7f5dba7SAnatoly Burakov size_t len; 836c7f5dba7SAnatoly Burakov size_t pgsz; 837c7f5dba7SAnatoly Burakov rte_iova_t *iova_table; 838c7f5dba7SAnatoly Burakov unsigned int iova_table_len; 839c7f5dba7SAnatoly Burakov }; 840c7f5dba7SAnatoly Burakov 841c7f5dba7SAnatoly Burakov static int 842c7f5dba7SAnatoly Burakov create_extmem(uint32_t nb_mbufs, uint32_t mbuf_sz, struct extmem_param *param, 843c7f5dba7SAnatoly Burakov bool huge) 844c7f5dba7SAnatoly Burakov { 845c7f5dba7SAnatoly Burakov uint64_t pgsizes[] = {RTE_PGSIZE_2M, RTE_PGSIZE_1G, /* x86_64, ARM */ 846c7f5dba7SAnatoly Burakov RTE_PGSIZE_16M, RTE_PGSIZE_16G}; /* POWER */ 847c7f5dba7SAnatoly Burakov unsigned int cur_page, n_pages, pgsz_idx; 848c7f5dba7SAnatoly Burakov size_t mem_sz, cur_pgsz; 849c7f5dba7SAnatoly Burakov rte_iova_t *iovas = NULL; 850c7f5dba7SAnatoly Burakov void *addr; 851c7f5dba7SAnatoly Burakov int ret; 852c7f5dba7SAnatoly Burakov 853c7f5dba7SAnatoly Burakov for (pgsz_idx = 0; pgsz_idx < RTE_DIM(pgsizes); pgsz_idx++) { 854c7f5dba7SAnatoly Burakov /* skip anything that is too big */ 855c7f5dba7SAnatoly Burakov if (pgsizes[pgsz_idx] > SIZE_MAX) 856c7f5dba7SAnatoly Burakov continue; 857c7f5dba7SAnatoly Burakov 858c7f5dba7SAnatoly Burakov cur_pgsz = pgsizes[pgsz_idx]; 859c7f5dba7SAnatoly Burakov 860c7f5dba7SAnatoly Burakov /* if we were told not to allocate hugepages, override */ 861c7f5dba7SAnatoly Burakov if (!huge) 862c7f5dba7SAnatoly Burakov cur_pgsz = sysconf(_SC_PAGESIZE); 863c7f5dba7SAnatoly Burakov 864c7f5dba7SAnatoly Burakov ret = calc_mem_size(nb_mbufs, mbuf_sz, cur_pgsz, &mem_sz); 865c7f5dba7SAnatoly Burakov if (ret < 0) { 866c7f5dba7SAnatoly Burakov TESTPMD_LOG(ERR, "Cannot calculate memory size\n"); 867c7f5dba7SAnatoly Burakov return -1; 868c7f5dba7SAnatoly Burakov } 869c7f5dba7SAnatoly Burakov 870c7f5dba7SAnatoly Burakov /* allocate our memory */ 871c7f5dba7SAnatoly Burakov addr = alloc_mem(mem_sz, cur_pgsz, huge); 872c7f5dba7SAnatoly Burakov 873c7f5dba7SAnatoly Burakov /* if we couldn't allocate memory with a specified page size, 874c7f5dba7SAnatoly Burakov * that doesn't mean we can't do it with other page sizes, so 875c7f5dba7SAnatoly Burakov * try another one. 876c7f5dba7SAnatoly Burakov */ 877c7f5dba7SAnatoly Burakov if (addr == NULL) 878c7f5dba7SAnatoly Burakov continue; 879c7f5dba7SAnatoly Burakov 880c7f5dba7SAnatoly Burakov /* store IOVA addresses for every page in this memory area */ 881c7f5dba7SAnatoly Burakov n_pages = mem_sz / cur_pgsz; 882c7f5dba7SAnatoly Burakov 883c7f5dba7SAnatoly Burakov iovas = malloc(sizeof(*iovas) * n_pages); 884c7f5dba7SAnatoly Burakov 885c7f5dba7SAnatoly Burakov if (iovas == NULL) { 886c7f5dba7SAnatoly Burakov TESTPMD_LOG(ERR, "Cannot allocate memory for iova addresses\n"); 887c7f5dba7SAnatoly Burakov goto fail; 888c7f5dba7SAnatoly Burakov } 889c7f5dba7SAnatoly Burakov /* lock memory if it's not huge pages */ 890c7f5dba7SAnatoly Burakov if (!huge) 891c7f5dba7SAnatoly Burakov mlock(addr, mem_sz); 892c7f5dba7SAnatoly Burakov 893c7f5dba7SAnatoly Burakov /* populate IOVA addresses */ 894c7f5dba7SAnatoly Burakov for (cur_page = 0; cur_page < n_pages; cur_page++) { 895c7f5dba7SAnatoly Burakov rte_iova_t iova; 896c7f5dba7SAnatoly Burakov size_t offset; 897c7f5dba7SAnatoly Burakov void *cur; 898c7f5dba7SAnatoly Burakov 899c7f5dba7SAnatoly Burakov offset = cur_pgsz * cur_page; 900c7f5dba7SAnatoly Burakov cur = RTE_PTR_ADD(addr, offset); 901c7f5dba7SAnatoly Burakov 902c7f5dba7SAnatoly Burakov /* touch the page before getting its IOVA */ 903c7f5dba7SAnatoly Burakov *(volatile char *)cur = 0; 904c7f5dba7SAnatoly Burakov 905c7f5dba7SAnatoly Burakov iova = rte_mem_virt2iova(cur); 906c7f5dba7SAnatoly Burakov 907c7f5dba7SAnatoly Burakov iovas[cur_page] = iova; 908c7f5dba7SAnatoly Burakov } 909c7f5dba7SAnatoly Burakov 910c7f5dba7SAnatoly Burakov break; 911c7f5dba7SAnatoly Burakov } 912c7f5dba7SAnatoly Burakov /* if we couldn't allocate anything */ 913c7f5dba7SAnatoly Burakov if (iovas == NULL) 914c7f5dba7SAnatoly Burakov return -1; 915c7f5dba7SAnatoly Burakov 916c7f5dba7SAnatoly Burakov param->addr = addr; 917c7f5dba7SAnatoly Burakov param->len = mem_sz; 918c7f5dba7SAnatoly Burakov param->pgsz = cur_pgsz; 919c7f5dba7SAnatoly Burakov param->iova_table = iovas; 920c7f5dba7SAnatoly Burakov param->iova_table_len = n_pages; 921c7f5dba7SAnatoly Burakov 922c7f5dba7SAnatoly Burakov return 0; 923c7f5dba7SAnatoly Burakov fail: 924c7f5dba7SAnatoly Burakov if (iovas) 925c7f5dba7SAnatoly Burakov free(iovas); 926c7f5dba7SAnatoly Burakov if (addr) 927c7f5dba7SAnatoly Burakov munmap(addr, mem_sz); 928c7f5dba7SAnatoly Burakov 929c7f5dba7SAnatoly Burakov return -1; 930c7f5dba7SAnatoly Burakov } 931c7f5dba7SAnatoly Burakov 932c7f5dba7SAnatoly Burakov static int 933c7f5dba7SAnatoly Burakov setup_extmem(uint32_t nb_mbufs, uint32_t mbuf_sz, bool huge) 934c7f5dba7SAnatoly Burakov { 935c7f5dba7SAnatoly Burakov struct extmem_param param; 936c7f5dba7SAnatoly Burakov int socket_id, ret; 937c7f5dba7SAnatoly Burakov 938c7f5dba7SAnatoly Burakov memset(¶m, 0, sizeof(param)); 939c7f5dba7SAnatoly Burakov 940c7f5dba7SAnatoly Burakov /* check if our heap exists */ 941c7f5dba7SAnatoly Burakov socket_id = rte_malloc_heap_get_socket(EXTMEM_HEAP_NAME); 942c7f5dba7SAnatoly Burakov if (socket_id < 0) { 943c7f5dba7SAnatoly Burakov /* create our heap */ 944c7f5dba7SAnatoly Burakov ret = rte_malloc_heap_create(EXTMEM_HEAP_NAME); 945c7f5dba7SAnatoly Burakov if (ret < 0) { 946c7f5dba7SAnatoly Burakov TESTPMD_LOG(ERR, "Cannot create heap\n"); 947c7f5dba7SAnatoly Burakov return -1; 948c7f5dba7SAnatoly Burakov } 949c7f5dba7SAnatoly Burakov } 950c7f5dba7SAnatoly Burakov 951c7f5dba7SAnatoly Burakov ret = create_extmem(nb_mbufs, mbuf_sz, ¶m, huge); 952c7f5dba7SAnatoly Burakov if (ret < 0) { 953c7f5dba7SAnatoly Burakov TESTPMD_LOG(ERR, "Cannot create memory area\n"); 954c7f5dba7SAnatoly Burakov return -1; 955c7f5dba7SAnatoly Burakov } 956c7f5dba7SAnatoly Burakov 957c7f5dba7SAnatoly Burakov /* we now have a valid memory area, so add it to heap */ 958c7f5dba7SAnatoly Burakov ret = rte_malloc_heap_memory_add(EXTMEM_HEAP_NAME, 959c7f5dba7SAnatoly Burakov param.addr, param.len, param.iova_table, 960c7f5dba7SAnatoly Burakov param.iova_table_len, param.pgsz); 961c7f5dba7SAnatoly Burakov 962c7f5dba7SAnatoly Burakov /* when using VFIO, memory is automatically mapped for DMA by EAL */ 963c7f5dba7SAnatoly Burakov 964c7f5dba7SAnatoly Burakov /* not needed any more */ 965c7f5dba7SAnatoly Burakov free(param.iova_table); 966c7f5dba7SAnatoly Burakov 967c7f5dba7SAnatoly Burakov if (ret < 0) { 968c7f5dba7SAnatoly Burakov TESTPMD_LOG(ERR, "Cannot add memory to heap\n"); 969c7f5dba7SAnatoly Burakov munmap(param.addr, param.len); 970c7f5dba7SAnatoly Burakov return -1; 971c7f5dba7SAnatoly Burakov } 972c7f5dba7SAnatoly Burakov 973c7f5dba7SAnatoly Burakov /* success */ 974c7f5dba7SAnatoly Burakov 975c7f5dba7SAnatoly Burakov TESTPMD_LOG(DEBUG, "Allocated %zuMB of external memory\n", 976c7f5dba7SAnatoly Burakov param.len >> 20); 977c7f5dba7SAnatoly Burakov 978c7f5dba7SAnatoly Burakov return 0; 979c7f5dba7SAnatoly Burakov } 9803a0968c8SShahaf Shuler static void 9813a0968c8SShahaf Shuler dma_unmap_cb(struct rte_mempool *mp __rte_unused, void *opaque __rte_unused, 9823a0968c8SShahaf Shuler struct rte_mempool_memhdr *memhdr, unsigned mem_idx __rte_unused) 9833a0968c8SShahaf Shuler { 9843a0968c8SShahaf Shuler uint16_t pid = 0; 9853a0968c8SShahaf Shuler int ret; 9863a0968c8SShahaf Shuler 9873a0968c8SShahaf Shuler RTE_ETH_FOREACH_DEV(pid) { 9880a0821bcSPaulis Gributs struct rte_eth_dev_info dev_info; 9893a0968c8SShahaf Shuler 9900a0821bcSPaulis Gributs ret = eth_dev_info_get_print_err(pid, &dev_info); 9910a0821bcSPaulis Gributs if (ret != 0) { 9920a0821bcSPaulis Gributs TESTPMD_LOG(DEBUG, 9930a0821bcSPaulis Gributs "unable to get device info for port %d on addr 0x%p," 9940a0821bcSPaulis Gributs "mempool unmapping will not be performed\n", 9950a0821bcSPaulis Gributs pid, memhdr->addr); 9960a0821bcSPaulis Gributs continue; 9970a0821bcSPaulis Gributs } 9980a0821bcSPaulis Gributs 9990a0821bcSPaulis Gributs ret = rte_dev_dma_unmap(dev_info.device, memhdr->addr, 0, memhdr->len); 10003a0968c8SShahaf Shuler if (ret) { 10013a0968c8SShahaf Shuler TESTPMD_LOG(DEBUG, 10023a0968c8SShahaf Shuler "unable to DMA unmap addr 0x%p " 10033a0968c8SShahaf Shuler "for device %s\n", 10040a0821bcSPaulis Gributs memhdr->addr, dev_info.device->name); 10053a0968c8SShahaf Shuler } 10063a0968c8SShahaf Shuler } 10073a0968c8SShahaf Shuler ret = rte_extmem_unregister(memhdr->addr, memhdr->len); 10083a0968c8SShahaf Shuler if (ret) { 10093a0968c8SShahaf Shuler TESTPMD_LOG(DEBUG, 10103a0968c8SShahaf Shuler "unable to un-register addr 0x%p\n", memhdr->addr); 10113a0968c8SShahaf Shuler } 10123a0968c8SShahaf Shuler } 10133a0968c8SShahaf Shuler 10143a0968c8SShahaf Shuler static void 10153a0968c8SShahaf Shuler dma_map_cb(struct rte_mempool *mp __rte_unused, void *opaque __rte_unused, 10163a0968c8SShahaf Shuler struct rte_mempool_memhdr *memhdr, unsigned mem_idx __rte_unused) 10173a0968c8SShahaf Shuler { 10183a0968c8SShahaf Shuler uint16_t pid = 0; 10193a0968c8SShahaf Shuler size_t page_size = sysconf(_SC_PAGESIZE); 10203a0968c8SShahaf Shuler int ret; 10213a0968c8SShahaf Shuler 10223a0968c8SShahaf Shuler ret = rte_extmem_register(memhdr->addr, memhdr->len, NULL, 0, 10233a0968c8SShahaf Shuler page_size); 10243a0968c8SShahaf Shuler if (ret) { 10253a0968c8SShahaf Shuler TESTPMD_LOG(DEBUG, 10263a0968c8SShahaf Shuler "unable to register addr 0x%p\n", memhdr->addr); 10273a0968c8SShahaf Shuler return; 10283a0968c8SShahaf Shuler } 10293a0968c8SShahaf Shuler RTE_ETH_FOREACH_DEV(pid) { 10300a0821bcSPaulis Gributs struct rte_eth_dev_info dev_info; 10313a0968c8SShahaf Shuler 10320a0821bcSPaulis Gributs ret = eth_dev_info_get_print_err(pid, &dev_info); 10330a0821bcSPaulis Gributs if (ret != 0) { 10340a0821bcSPaulis Gributs TESTPMD_LOG(DEBUG, 10350a0821bcSPaulis Gributs "unable to get device info for port %d on addr 0x%p," 10360a0821bcSPaulis Gributs "mempool mapping will not be performed\n", 10370a0821bcSPaulis Gributs pid, memhdr->addr); 10380a0821bcSPaulis Gributs continue; 10390a0821bcSPaulis Gributs } 10400a0821bcSPaulis Gributs ret = rte_dev_dma_map(dev_info.device, memhdr->addr, 0, memhdr->len); 10413a0968c8SShahaf Shuler if (ret) { 10423a0968c8SShahaf Shuler TESTPMD_LOG(DEBUG, 10433a0968c8SShahaf Shuler "unable to DMA map addr 0x%p " 10443a0968c8SShahaf Shuler "for device %s\n", 10450a0821bcSPaulis Gributs memhdr->addr, dev_info.device->name); 10463a0968c8SShahaf Shuler } 10473a0968c8SShahaf Shuler } 10483a0968c8SShahaf Shuler } 1049761f7ae1SJie Zhou #endif 1050c7f5dba7SAnatoly Burakov 105172512e18SViacheslav Ovsiienko static unsigned int 105272512e18SViacheslav Ovsiienko setup_extbuf(uint32_t nb_mbufs, uint16_t mbuf_sz, unsigned int socket_id, 105372512e18SViacheslav Ovsiienko char *pool_name, struct rte_pktmbuf_extmem **ext_mem) 105472512e18SViacheslav Ovsiienko { 105572512e18SViacheslav Ovsiienko struct rte_pktmbuf_extmem *xmem; 105672512e18SViacheslav Ovsiienko unsigned int ext_num, zone_num, elt_num; 105772512e18SViacheslav Ovsiienko uint16_t elt_size; 105872512e18SViacheslav Ovsiienko 105972512e18SViacheslav Ovsiienko elt_size = RTE_ALIGN_CEIL(mbuf_sz, RTE_CACHE_LINE_SIZE); 106072512e18SViacheslav Ovsiienko elt_num = EXTBUF_ZONE_SIZE / elt_size; 106172512e18SViacheslav Ovsiienko zone_num = (nb_mbufs + elt_num - 1) / elt_num; 106272512e18SViacheslav Ovsiienko 106372512e18SViacheslav Ovsiienko xmem = malloc(sizeof(struct rte_pktmbuf_extmem) * zone_num); 106472512e18SViacheslav Ovsiienko if (xmem == NULL) { 106572512e18SViacheslav Ovsiienko TESTPMD_LOG(ERR, "Cannot allocate memory for " 106672512e18SViacheslav Ovsiienko "external buffer descriptors\n"); 106772512e18SViacheslav Ovsiienko *ext_mem = NULL; 106872512e18SViacheslav Ovsiienko return 0; 106972512e18SViacheslav Ovsiienko } 107072512e18SViacheslav Ovsiienko for (ext_num = 0; ext_num < zone_num; ext_num++) { 107172512e18SViacheslav Ovsiienko struct rte_pktmbuf_extmem *xseg = xmem + ext_num; 107272512e18SViacheslav Ovsiienko const struct rte_memzone *mz; 107372512e18SViacheslav Ovsiienko char mz_name[RTE_MEMZONE_NAMESIZE]; 107472512e18SViacheslav Ovsiienko int ret; 107572512e18SViacheslav Ovsiienko 107672512e18SViacheslav Ovsiienko ret = snprintf(mz_name, sizeof(mz_name), 107772512e18SViacheslav Ovsiienko RTE_MEMPOOL_MZ_FORMAT "_xb_%u", pool_name, ext_num); 107872512e18SViacheslav Ovsiienko if (ret < 0 || ret >= (int)sizeof(mz_name)) { 107972512e18SViacheslav Ovsiienko errno = ENAMETOOLONG; 108072512e18SViacheslav Ovsiienko ext_num = 0; 108172512e18SViacheslav Ovsiienko break; 108272512e18SViacheslav Ovsiienko } 108372512e18SViacheslav Ovsiienko mz = rte_memzone_reserve_aligned(mz_name, EXTBUF_ZONE_SIZE, 108472512e18SViacheslav Ovsiienko socket_id, 108572512e18SViacheslav Ovsiienko RTE_MEMZONE_IOVA_CONTIG | 108672512e18SViacheslav Ovsiienko RTE_MEMZONE_1GB | 108772512e18SViacheslav Ovsiienko RTE_MEMZONE_SIZE_HINT_ONLY, 108872512e18SViacheslav Ovsiienko EXTBUF_ZONE_SIZE); 108972512e18SViacheslav Ovsiienko if (mz == NULL) { 109072512e18SViacheslav Ovsiienko /* 109172512e18SViacheslav Ovsiienko * The caller exits on external buffer creation 109272512e18SViacheslav Ovsiienko * error, so there is no need to free memzones. 109372512e18SViacheslav Ovsiienko */ 109472512e18SViacheslav Ovsiienko errno = ENOMEM; 109572512e18SViacheslav Ovsiienko ext_num = 0; 109672512e18SViacheslav Ovsiienko break; 109772512e18SViacheslav Ovsiienko } 109872512e18SViacheslav Ovsiienko xseg->buf_ptr = mz->addr; 109972512e18SViacheslav Ovsiienko xseg->buf_iova = mz->iova; 110072512e18SViacheslav Ovsiienko xseg->buf_len = EXTBUF_ZONE_SIZE; 110172512e18SViacheslav Ovsiienko xseg->elt_size = elt_size; 110272512e18SViacheslav Ovsiienko } 110372512e18SViacheslav Ovsiienko if (ext_num == 0 && xmem != NULL) { 110472512e18SViacheslav Ovsiienko free(xmem); 110572512e18SViacheslav Ovsiienko xmem = NULL; 110672512e18SViacheslav Ovsiienko } 110772512e18SViacheslav Ovsiienko *ext_mem = xmem; 110872512e18SViacheslav Ovsiienko return ext_num; 110972512e18SViacheslav Ovsiienko } 111072512e18SViacheslav Ovsiienko 1111af75078fSIntel /* 1112af75078fSIntel * Configuration initialisation done once at init time. 1113af75078fSIntel */ 1114401b744dSShahaf Shuler static struct rte_mempool * 1115af75078fSIntel mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf, 111626cbb419SViacheslav Ovsiienko unsigned int socket_id, uint16_t size_idx) 1117af75078fSIntel { 1118af75078fSIntel char pool_name[RTE_MEMPOOL_NAMESIZE]; 1119bece7b6cSChristian Ehrhardt struct rte_mempool *rte_mp = NULL; 1120761f7ae1SJie Zhou #ifndef RTE_EXEC_ENV_WINDOWS 1121af75078fSIntel uint32_t mb_size; 1122af75078fSIntel 1123dfb03bbeSOlivier Matz mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size; 1124761f7ae1SJie Zhou #endif 112526cbb419SViacheslav Ovsiienko mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name), size_idx); 1126a550baf2SMin Hu (Connor) if (!is_proc_primary()) { 1127a550baf2SMin Hu (Connor) rte_mp = rte_mempool_lookup(pool_name); 1128a550baf2SMin Hu (Connor) if (rte_mp == NULL) 1129a550baf2SMin Hu (Connor) rte_exit(EXIT_FAILURE, 1130a550baf2SMin Hu (Connor) "Get mbuf pool for socket %u failed: %s\n", 1131a550baf2SMin Hu (Connor) socket_id, rte_strerror(rte_errno)); 1132a550baf2SMin Hu (Connor) return rte_mp; 1133a550baf2SMin Hu (Connor) } 1134148f963fSBruce Richardson 1135285fd101SOlivier Matz TESTPMD_LOG(INFO, 1136d1eb542eSOlivier Matz "create a new mbuf pool <%s>: n=%u, size=%u, socket=%u\n", 1137d1eb542eSOlivier Matz pool_name, nb_mbuf, mbuf_seg_size, socket_id); 1138d1eb542eSOlivier Matz 1139c7f5dba7SAnatoly Burakov switch (mp_alloc_type) { 1140c7f5dba7SAnatoly Burakov case MP_ALLOC_NATIVE: 1141c7f5dba7SAnatoly Burakov { 1142c7f5dba7SAnatoly Burakov /* wrapper to rte_mempool_create() */ 1143c7f5dba7SAnatoly Burakov TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n", 1144c7f5dba7SAnatoly Burakov rte_mbuf_best_mempool_ops()); 1145c7f5dba7SAnatoly Burakov rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf, 1146c7f5dba7SAnatoly Burakov mb_mempool_cache, 0, mbuf_seg_size, socket_id); 1147c7f5dba7SAnatoly Burakov break; 1148c7f5dba7SAnatoly Burakov } 1149761f7ae1SJie Zhou #ifndef RTE_EXEC_ENV_WINDOWS 1150c7f5dba7SAnatoly Burakov case MP_ALLOC_ANON: 1151c7f5dba7SAnatoly Burakov { 1152b19a0c75SOlivier Matz rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf, 1153c7f5dba7SAnatoly Burakov mb_size, (unsigned int) mb_mempool_cache, 1154148f963fSBruce Richardson sizeof(struct rte_pktmbuf_pool_private), 115559fcf854SShahaf Shuler socket_id, mempool_flags); 115624427bb9SOlivier Matz if (rte_mp == NULL) 115724427bb9SOlivier Matz goto err; 1158b19a0c75SOlivier Matz 1159b19a0c75SOlivier Matz if (rte_mempool_populate_anon(rte_mp) == 0) { 1160b19a0c75SOlivier Matz rte_mempool_free(rte_mp); 1161b19a0c75SOlivier Matz rte_mp = NULL; 116224427bb9SOlivier Matz goto err; 1163b19a0c75SOlivier Matz } 1164b19a0c75SOlivier Matz rte_pktmbuf_pool_init(rte_mp, NULL); 1165b19a0c75SOlivier Matz rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL); 11663a0968c8SShahaf Shuler rte_mempool_mem_iter(rte_mp, dma_map_cb, NULL); 1167c7f5dba7SAnatoly Burakov break; 1168c7f5dba7SAnatoly Burakov } 1169c7f5dba7SAnatoly Burakov case MP_ALLOC_XMEM: 1170c7f5dba7SAnatoly Burakov case MP_ALLOC_XMEM_HUGE: 1171c7f5dba7SAnatoly Burakov { 1172c7f5dba7SAnatoly Burakov int heap_socket; 1173c7f5dba7SAnatoly Burakov bool huge = mp_alloc_type == MP_ALLOC_XMEM_HUGE; 1174c7f5dba7SAnatoly Burakov 1175c7f5dba7SAnatoly Burakov if (setup_extmem(nb_mbuf, mbuf_seg_size, huge) < 0) 1176c7f5dba7SAnatoly Burakov rte_exit(EXIT_FAILURE, "Could not create external memory\n"); 1177c7f5dba7SAnatoly Burakov 1178c7f5dba7SAnatoly Burakov heap_socket = 1179c7f5dba7SAnatoly Burakov rte_malloc_heap_get_socket(EXTMEM_HEAP_NAME); 1180c7f5dba7SAnatoly Burakov if (heap_socket < 0) 1181c7f5dba7SAnatoly Burakov rte_exit(EXIT_FAILURE, "Could not get external memory socket ID\n"); 1182c7f5dba7SAnatoly Burakov 11830e798567SPavan Nikhilesh TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n", 11840e798567SPavan Nikhilesh rte_mbuf_best_mempool_ops()); 1185ea0c20eaSOlivier Matz rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf, 1186c7f5dba7SAnatoly Burakov mb_mempool_cache, 0, mbuf_seg_size, 1187c7f5dba7SAnatoly Burakov heap_socket); 1188c7f5dba7SAnatoly Burakov break; 1189c7f5dba7SAnatoly Burakov } 1190761f7ae1SJie Zhou #endif 119172512e18SViacheslav Ovsiienko case MP_ALLOC_XBUF: 119272512e18SViacheslav Ovsiienko { 119372512e18SViacheslav Ovsiienko struct rte_pktmbuf_extmem *ext_mem; 119472512e18SViacheslav Ovsiienko unsigned int ext_num; 119572512e18SViacheslav Ovsiienko 119672512e18SViacheslav Ovsiienko ext_num = setup_extbuf(nb_mbuf, mbuf_seg_size, 119772512e18SViacheslav Ovsiienko socket_id, pool_name, &ext_mem); 119872512e18SViacheslav Ovsiienko if (ext_num == 0) 119972512e18SViacheslav Ovsiienko rte_exit(EXIT_FAILURE, 120072512e18SViacheslav Ovsiienko "Can't create pinned data buffers\n"); 120172512e18SViacheslav Ovsiienko 120272512e18SViacheslav Ovsiienko TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n", 120372512e18SViacheslav Ovsiienko rte_mbuf_best_mempool_ops()); 120472512e18SViacheslav Ovsiienko rte_mp = rte_pktmbuf_pool_create_extbuf 120572512e18SViacheslav Ovsiienko (pool_name, nb_mbuf, mb_mempool_cache, 120672512e18SViacheslav Ovsiienko 0, mbuf_seg_size, socket_id, 120772512e18SViacheslav Ovsiienko ext_mem, ext_num); 120872512e18SViacheslav Ovsiienko free(ext_mem); 120972512e18SViacheslav Ovsiienko break; 121072512e18SViacheslav Ovsiienko } 1211c7f5dba7SAnatoly Burakov default: 1212c7f5dba7SAnatoly Burakov { 1213c7f5dba7SAnatoly Burakov rte_exit(EXIT_FAILURE, "Invalid mempool creation mode\n"); 1214c7f5dba7SAnatoly Burakov } 1215bece7b6cSChristian Ehrhardt } 1216148f963fSBruce Richardson 1217761f7ae1SJie Zhou #ifndef RTE_EXEC_ENV_WINDOWS 121824427bb9SOlivier Matz err: 1219761f7ae1SJie Zhou #endif 1220af75078fSIntel if (rte_mp == NULL) { 1221d1eb542eSOlivier Matz rte_exit(EXIT_FAILURE, 1222d1eb542eSOlivier Matz "Creation of mbuf pool for socket %u failed: %s\n", 1223d1eb542eSOlivier Matz socket_id, rte_strerror(rte_errno)); 1224148f963fSBruce Richardson } else if (verbose_level > 0) { 1225591a9d79SStephen Hemminger rte_mempool_dump(stdout, rte_mp); 1226af75078fSIntel } 1227401b744dSShahaf Shuler return rte_mp; 1228af75078fSIntel } 1229af75078fSIntel 123020a0286fSLiu Xiaofeng /* 123120a0286fSLiu Xiaofeng * Check given socket id is valid or not with NUMA mode, 123220a0286fSLiu Xiaofeng * if valid, return 0, else return -1 123320a0286fSLiu Xiaofeng */ 123420a0286fSLiu Xiaofeng static int 123520a0286fSLiu Xiaofeng check_socket_id(const unsigned int socket_id) 123620a0286fSLiu Xiaofeng { 123720a0286fSLiu Xiaofeng static int warning_once = 0; 123820a0286fSLiu Xiaofeng 1239c9cafcc8SShahaf Shuler if (new_socket_id(socket_id)) { 124020a0286fSLiu Xiaofeng if (!warning_once && numa_support) 124161a3b0e5SAndrew Rybchenko fprintf(stderr, 124261a3b0e5SAndrew Rybchenko "Warning: NUMA should be configured manually by using --port-numa-config and --ring-numa-config parameters along with --numa.\n"); 124320a0286fSLiu Xiaofeng warning_once = 1; 124420a0286fSLiu Xiaofeng return -1; 124520a0286fSLiu Xiaofeng } 124620a0286fSLiu Xiaofeng return 0; 124720a0286fSLiu Xiaofeng } 124820a0286fSLiu Xiaofeng 12493f7311baSWei Dai /* 12503f7311baSWei Dai * Get the allowed maximum number of RX queues. 12513f7311baSWei Dai * *pid return the port id which has minimal value of 12523f7311baSWei Dai * max_rx_queues in all ports. 12533f7311baSWei Dai */ 12543f7311baSWei Dai queueid_t 12553f7311baSWei Dai get_allowed_max_nb_rxq(portid_t *pid) 12563f7311baSWei Dai { 12579e6b36c3SDavid Marchand queueid_t allowed_max_rxq = RTE_MAX_QUEUES_PER_PORT; 12586f51deb9SIvan Ilchenko bool max_rxq_valid = false; 12593f7311baSWei Dai portid_t pi; 12603f7311baSWei Dai struct rte_eth_dev_info dev_info; 12613f7311baSWei Dai 12623f7311baSWei Dai RTE_ETH_FOREACH_DEV(pi) { 12636f51deb9SIvan Ilchenko if (eth_dev_info_get_print_err(pi, &dev_info) != 0) 12646f51deb9SIvan Ilchenko continue; 12656f51deb9SIvan Ilchenko 12666f51deb9SIvan Ilchenko max_rxq_valid = true; 12673f7311baSWei Dai if (dev_info.max_rx_queues < allowed_max_rxq) { 12683f7311baSWei Dai allowed_max_rxq = dev_info.max_rx_queues; 12693f7311baSWei Dai *pid = pi; 12703f7311baSWei Dai } 12713f7311baSWei Dai } 12726f51deb9SIvan Ilchenko return max_rxq_valid ? allowed_max_rxq : 0; 12733f7311baSWei Dai } 12743f7311baSWei Dai 12753f7311baSWei Dai /* 12763f7311baSWei Dai * Check input rxq is valid or not. 12773f7311baSWei Dai * If input rxq is not greater than any of maximum number 12783f7311baSWei Dai * of RX queues of all ports, it is valid. 12793f7311baSWei Dai * if valid, return 0, else return -1 12803f7311baSWei Dai */ 12813f7311baSWei Dai int 12823f7311baSWei Dai check_nb_rxq(queueid_t rxq) 12833f7311baSWei Dai { 12843f7311baSWei Dai queueid_t allowed_max_rxq; 12853f7311baSWei Dai portid_t pid = 0; 12863f7311baSWei Dai 12873f7311baSWei Dai allowed_max_rxq = get_allowed_max_nb_rxq(&pid); 12883f7311baSWei Dai if (rxq > allowed_max_rxq) { 128961a3b0e5SAndrew Rybchenko fprintf(stderr, 129061a3b0e5SAndrew Rybchenko "Fail: input rxq (%u) can't be greater than max_rx_queues (%u) of port %u\n", 129161a3b0e5SAndrew Rybchenko rxq, allowed_max_rxq, pid); 12923f7311baSWei Dai return -1; 12933f7311baSWei Dai } 12943f7311baSWei Dai return 0; 12953f7311baSWei Dai } 12963f7311baSWei Dai 129736db4f6cSWei Dai /* 129836db4f6cSWei Dai * Get the allowed maximum number of TX queues. 129936db4f6cSWei Dai * *pid return the port id which has minimal value of 130036db4f6cSWei Dai * max_tx_queues in all ports. 130136db4f6cSWei Dai */ 130236db4f6cSWei Dai queueid_t 130336db4f6cSWei Dai get_allowed_max_nb_txq(portid_t *pid) 130436db4f6cSWei Dai { 13059e6b36c3SDavid Marchand queueid_t allowed_max_txq = RTE_MAX_QUEUES_PER_PORT; 13066f51deb9SIvan Ilchenko bool max_txq_valid = false; 130736db4f6cSWei Dai portid_t pi; 130836db4f6cSWei Dai struct rte_eth_dev_info dev_info; 130936db4f6cSWei Dai 131036db4f6cSWei Dai RTE_ETH_FOREACH_DEV(pi) { 13116f51deb9SIvan Ilchenko if (eth_dev_info_get_print_err(pi, &dev_info) != 0) 13126f51deb9SIvan Ilchenko continue; 13136f51deb9SIvan Ilchenko 13146f51deb9SIvan Ilchenko max_txq_valid = true; 131536db4f6cSWei Dai if (dev_info.max_tx_queues < allowed_max_txq) { 131636db4f6cSWei Dai allowed_max_txq = dev_info.max_tx_queues; 131736db4f6cSWei Dai *pid = pi; 131836db4f6cSWei Dai } 131936db4f6cSWei Dai } 13206f51deb9SIvan Ilchenko return max_txq_valid ? allowed_max_txq : 0; 132136db4f6cSWei Dai } 132236db4f6cSWei Dai 132336db4f6cSWei Dai /* 132436db4f6cSWei Dai * Check input txq is valid or not. 132536db4f6cSWei Dai * If input txq is not greater than any of maximum number 132636db4f6cSWei Dai * of TX queues of all ports, it is valid. 132736db4f6cSWei Dai * if valid, return 0, else return -1 132836db4f6cSWei Dai */ 132936db4f6cSWei Dai int 133036db4f6cSWei Dai check_nb_txq(queueid_t txq) 133136db4f6cSWei Dai { 133236db4f6cSWei Dai queueid_t allowed_max_txq; 133336db4f6cSWei Dai portid_t pid = 0; 133436db4f6cSWei Dai 133536db4f6cSWei Dai allowed_max_txq = get_allowed_max_nb_txq(&pid); 133636db4f6cSWei Dai if (txq > allowed_max_txq) { 133761a3b0e5SAndrew Rybchenko fprintf(stderr, 133861a3b0e5SAndrew Rybchenko "Fail: input txq (%u) can't be greater than max_tx_queues (%u) of port %u\n", 133961a3b0e5SAndrew Rybchenko txq, allowed_max_txq, pid); 134036db4f6cSWei Dai return -1; 134136db4f6cSWei Dai } 134236db4f6cSWei Dai return 0; 134336db4f6cSWei Dai } 134436db4f6cSWei Dai 13451c69df45SOri Kam /* 134699e040d3SLijun Ou * Get the allowed maximum number of RXDs of every rx queue. 134799e040d3SLijun Ou * *pid return the port id which has minimal value of 134899e040d3SLijun Ou * max_rxd in all queues of all ports. 134999e040d3SLijun Ou */ 135099e040d3SLijun Ou static uint16_t 135199e040d3SLijun Ou get_allowed_max_nb_rxd(portid_t *pid) 135299e040d3SLijun Ou { 135399e040d3SLijun Ou uint16_t allowed_max_rxd = UINT16_MAX; 135499e040d3SLijun Ou portid_t pi; 135599e040d3SLijun Ou struct rte_eth_dev_info dev_info; 135699e040d3SLijun Ou 135799e040d3SLijun Ou RTE_ETH_FOREACH_DEV(pi) { 135899e040d3SLijun Ou if (eth_dev_info_get_print_err(pi, &dev_info) != 0) 135999e040d3SLijun Ou continue; 136099e040d3SLijun Ou 136199e040d3SLijun Ou if (dev_info.rx_desc_lim.nb_max < allowed_max_rxd) { 136299e040d3SLijun Ou allowed_max_rxd = dev_info.rx_desc_lim.nb_max; 136399e040d3SLijun Ou *pid = pi; 136499e040d3SLijun Ou } 136599e040d3SLijun Ou } 136699e040d3SLijun Ou return allowed_max_rxd; 136799e040d3SLijun Ou } 136899e040d3SLijun Ou 136999e040d3SLijun Ou /* 137099e040d3SLijun Ou * Get the allowed minimal number of RXDs of every rx queue. 137199e040d3SLijun Ou * *pid return the port id which has minimal value of 137299e040d3SLijun Ou * min_rxd in all queues of all ports. 137399e040d3SLijun Ou */ 137499e040d3SLijun Ou static uint16_t 137599e040d3SLijun Ou get_allowed_min_nb_rxd(portid_t *pid) 137699e040d3SLijun Ou { 137799e040d3SLijun Ou uint16_t allowed_min_rxd = 0; 137899e040d3SLijun Ou portid_t pi; 137999e040d3SLijun Ou struct rte_eth_dev_info dev_info; 138099e040d3SLijun Ou 138199e040d3SLijun Ou RTE_ETH_FOREACH_DEV(pi) { 138299e040d3SLijun Ou if (eth_dev_info_get_print_err(pi, &dev_info) != 0) 138399e040d3SLijun Ou continue; 138499e040d3SLijun Ou 138599e040d3SLijun Ou if (dev_info.rx_desc_lim.nb_min > allowed_min_rxd) { 138699e040d3SLijun Ou allowed_min_rxd = dev_info.rx_desc_lim.nb_min; 138799e040d3SLijun Ou *pid = pi; 138899e040d3SLijun Ou } 138999e040d3SLijun Ou } 139099e040d3SLijun Ou 139199e040d3SLijun Ou return allowed_min_rxd; 139299e040d3SLijun Ou } 139399e040d3SLijun Ou 139499e040d3SLijun Ou /* 139599e040d3SLijun Ou * Check input rxd is valid or not. 139699e040d3SLijun Ou * If input rxd is not greater than any of maximum number 139799e040d3SLijun Ou * of RXDs of every Rx queues and is not less than any of 139899e040d3SLijun Ou * minimal number of RXDs of every Rx queues, it is valid. 139999e040d3SLijun Ou * if valid, return 0, else return -1 140099e040d3SLijun Ou */ 140199e040d3SLijun Ou int 140299e040d3SLijun Ou check_nb_rxd(queueid_t rxd) 140399e040d3SLijun Ou { 140499e040d3SLijun Ou uint16_t allowed_max_rxd; 140599e040d3SLijun Ou uint16_t allowed_min_rxd; 140699e040d3SLijun Ou portid_t pid = 0; 140799e040d3SLijun Ou 140899e040d3SLijun Ou allowed_max_rxd = get_allowed_max_nb_rxd(&pid); 140999e040d3SLijun Ou if (rxd > allowed_max_rxd) { 141061a3b0e5SAndrew Rybchenko fprintf(stderr, 141161a3b0e5SAndrew Rybchenko "Fail: input rxd (%u) can't be greater than max_rxds (%u) of port %u\n", 141261a3b0e5SAndrew Rybchenko rxd, allowed_max_rxd, pid); 141399e040d3SLijun Ou return -1; 141499e040d3SLijun Ou } 141599e040d3SLijun Ou 141699e040d3SLijun Ou allowed_min_rxd = get_allowed_min_nb_rxd(&pid); 141799e040d3SLijun Ou if (rxd < allowed_min_rxd) { 141861a3b0e5SAndrew Rybchenko fprintf(stderr, 141961a3b0e5SAndrew Rybchenko "Fail: input rxd (%u) can't be less than min_rxds (%u) of port %u\n", 142061a3b0e5SAndrew Rybchenko rxd, allowed_min_rxd, pid); 142199e040d3SLijun Ou return -1; 142299e040d3SLijun Ou } 142399e040d3SLijun Ou 142499e040d3SLijun Ou return 0; 142599e040d3SLijun Ou } 142699e040d3SLijun Ou 142799e040d3SLijun Ou /* 142899e040d3SLijun Ou * Get the allowed maximum number of TXDs of every rx queues. 142999e040d3SLijun Ou * *pid return the port id which has minimal value of 143099e040d3SLijun Ou * max_txd in every tx queue. 143199e040d3SLijun Ou */ 143299e040d3SLijun Ou static uint16_t 143399e040d3SLijun Ou get_allowed_max_nb_txd(portid_t *pid) 143499e040d3SLijun Ou { 143599e040d3SLijun Ou uint16_t allowed_max_txd = UINT16_MAX; 143699e040d3SLijun Ou portid_t pi; 143799e040d3SLijun Ou struct rte_eth_dev_info dev_info; 143899e040d3SLijun Ou 143999e040d3SLijun Ou RTE_ETH_FOREACH_DEV(pi) { 144099e040d3SLijun Ou if (eth_dev_info_get_print_err(pi, &dev_info) != 0) 144199e040d3SLijun Ou continue; 144299e040d3SLijun Ou 144399e040d3SLijun Ou if (dev_info.tx_desc_lim.nb_max < allowed_max_txd) { 144499e040d3SLijun Ou allowed_max_txd = dev_info.tx_desc_lim.nb_max; 144599e040d3SLijun Ou *pid = pi; 144699e040d3SLijun Ou } 144799e040d3SLijun Ou } 144899e040d3SLijun Ou return allowed_max_txd; 144999e040d3SLijun Ou } 145099e040d3SLijun Ou 145199e040d3SLijun Ou /* 145299e040d3SLijun Ou * Get the allowed maximum number of TXDs of every tx queues. 145399e040d3SLijun Ou * *pid return the port id which has minimal value of 145499e040d3SLijun Ou * min_txd in every tx queue. 145599e040d3SLijun Ou */ 145699e040d3SLijun Ou static uint16_t 145799e040d3SLijun Ou get_allowed_min_nb_txd(portid_t *pid) 145899e040d3SLijun Ou { 145999e040d3SLijun Ou uint16_t allowed_min_txd = 0; 146099e040d3SLijun Ou portid_t pi; 146199e040d3SLijun Ou struct rte_eth_dev_info dev_info; 146299e040d3SLijun Ou 146399e040d3SLijun Ou RTE_ETH_FOREACH_DEV(pi) { 146499e040d3SLijun Ou if (eth_dev_info_get_print_err(pi, &dev_info) != 0) 146599e040d3SLijun Ou continue; 146699e040d3SLijun Ou 146799e040d3SLijun Ou if (dev_info.tx_desc_lim.nb_min > allowed_min_txd) { 146899e040d3SLijun Ou allowed_min_txd = dev_info.tx_desc_lim.nb_min; 146999e040d3SLijun Ou *pid = pi; 147099e040d3SLijun Ou } 147199e040d3SLijun Ou } 147299e040d3SLijun Ou 147399e040d3SLijun Ou return allowed_min_txd; 147499e040d3SLijun Ou } 147599e040d3SLijun Ou 147699e040d3SLijun Ou /* 147799e040d3SLijun Ou * Check input txd is valid or not. 147899e040d3SLijun Ou * If input txd is not greater than any of maximum number 147999e040d3SLijun Ou * of TXDs of every Rx queues, it is valid. 148099e040d3SLijun Ou * if valid, return 0, else return -1 148199e040d3SLijun Ou */ 148299e040d3SLijun Ou int 148399e040d3SLijun Ou check_nb_txd(queueid_t txd) 148499e040d3SLijun Ou { 148599e040d3SLijun Ou uint16_t allowed_max_txd; 148699e040d3SLijun Ou uint16_t allowed_min_txd; 148799e040d3SLijun Ou portid_t pid = 0; 148899e040d3SLijun Ou 148999e040d3SLijun Ou allowed_max_txd = get_allowed_max_nb_txd(&pid); 149099e040d3SLijun Ou if (txd > allowed_max_txd) { 149161a3b0e5SAndrew Rybchenko fprintf(stderr, 149261a3b0e5SAndrew Rybchenko "Fail: input txd (%u) can't be greater than max_txds (%u) of port %u\n", 149361a3b0e5SAndrew Rybchenko txd, allowed_max_txd, pid); 149499e040d3SLijun Ou return -1; 149599e040d3SLijun Ou } 149699e040d3SLijun Ou 149799e040d3SLijun Ou allowed_min_txd = get_allowed_min_nb_txd(&pid); 149899e040d3SLijun Ou if (txd < allowed_min_txd) { 149961a3b0e5SAndrew Rybchenko fprintf(stderr, 150061a3b0e5SAndrew Rybchenko "Fail: input txd (%u) can't be less than min_txds (%u) of port %u\n", 150161a3b0e5SAndrew Rybchenko txd, allowed_min_txd, pid); 150299e040d3SLijun Ou return -1; 150399e040d3SLijun Ou } 150499e040d3SLijun Ou return 0; 150599e040d3SLijun Ou } 150699e040d3SLijun Ou 150799e040d3SLijun Ou 150899e040d3SLijun Ou /* 15091c69df45SOri Kam * Get the allowed maximum number of hairpin queues. 15101c69df45SOri Kam * *pid return the port id which has minimal value of 15111c69df45SOri Kam * max_hairpin_queues in all ports. 15121c69df45SOri Kam */ 15131c69df45SOri Kam queueid_t 15141c69df45SOri Kam get_allowed_max_nb_hairpinq(portid_t *pid) 15151c69df45SOri Kam { 15169e6b36c3SDavid Marchand queueid_t allowed_max_hairpinq = RTE_MAX_QUEUES_PER_PORT; 15171c69df45SOri Kam portid_t pi; 15181c69df45SOri Kam struct rte_eth_hairpin_cap cap; 15191c69df45SOri Kam 15201c69df45SOri Kam RTE_ETH_FOREACH_DEV(pi) { 15211c69df45SOri Kam if (rte_eth_dev_hairpin_capability_get(pi, &cap) != 0) { 15221c69df45SOri Kam *pid = pi; 15231c69df45SOri Kam return 0; 15241c69df45SOri Kam } 15251c69df45SOri Kam if (cap.max_nb_queues < allowed_max_hairpinq) { 15261c69df45SOri Kam allowed_max_hairpinq = cap.max_nb_queues; 15271c69df45SOri Kam *pid = pi; 15281c69df45SOri Kam } 15291c69df45SOri Kam } 15301c69df45SOri Kam return allowed_max_hairpinq; 15311c69df45SOri Kam } 15321c69df45SOri Kam 15331c69df45SOri Kam /* 15341c69df45SOri Kam * Check input hairpin is valid or not. 15351c69df45SOri Kam * If input hairpin is not greater than any of maximum number 15361c69df45SOri Kam * of hairpin queues of all ports, it is valid. 15371c69df45SOri Kam * if valid, return 0, else return -1 15381c69df45SOri Kam */ 15391c69df45SOri Kam int 15401c69df45SOri Kam check_nb_hairpinq(queueid_t hairpinq) 15411c69df45SOri Kam { 15421c69df45SOri Kam queueid_t allowed_max_hairpinq; 15431c69df45SOri Kam portid_t pid = 0; 15441c69df45SOri Kam 15451c69df45SOri Kam allowed_max_hairpinq = get_allowed_max_nb_hairpinq(&pid); 15461c69df45SOri Kam if (hairpinq > allowed_max_hairpinq) { 154761a3b0e5SAndrew Rybchenko fprintf(stderr, 154861a3b0e5SAndrew Rybchenko "Fail: input hairpin (%u) can't be greater than max_hairpin_queues (%u) of port %u\n", 15491c69df45SOri Kam hairpinq, allowed_max_hairpinq, pid); 15501c69df45SOri Kam return -1; 15511c69df45SOri Kam } 15521c69df45SOri Kam return 0; 15531c69df45SOri Kam } 15541c69df45SOri Kam 15551bb4a528SFerruh Yigit static int 15561bb4a528SFerruh Yigit get_eth_overhead(struct rte_eth_dev_info *dev_info) 15571bb4a528SFerruh Yigit { 15581bb4a528SFerruh Yigit uint32_t eth_overhead; 15591bb4a528SFerruh Yigit 15601bb4a528SFerruh Yigit if (dev_info->max_mtu != UINT16_MAX && 15611bb4a528SFerruh Yigit dev_info->max_rx_pktlen > dev_info->max_mtu) 15621bb4a528SFerruh Yigit eth_overhead = dev_info->max_rx_pktlen - dev_info->max_mtu; 15631bb4a528SFerruh Yigit else 15641bb4a528SFerruh Yigit eth_overhead = RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN; 15651bb4a528SFerruh Yigit 15661bb4a528SFerruh Yigit return eth_overhead; 15671bb4a528SFerruh Yigit } 15681bb4a528SFerruh Yigit 1569af75078fSIntel static void 1570b6b8a1ebSViacheslav Ovsiienko init_config_port_offloads(portid_t pid, uint32_t socket_id) 1571b6b8a1ebSViacheslav Ovsiienko { 1572b6b8a1ebSViacheslav Ovsiienko struct rte_port *port = &ports[pid]; 1573b6b8a1ebSViacheslav Ovsiienko int ret; 1574b6b8a1ebSViacheslav Ovsiienko int i; 1575b6b8a1ebSViacheslav Ovsiienko 1576f6d8a6d3SIvan Malov eth_rx_metadata_negotiate_mp(pid); 15771179f05cSIvan Malov flow_pick_transfer_proxy_mp(pid); 1578f6d8a6d3SIvan Malov 1579b6b8a1ebSViacheslav Ovsiienko port->dev_conf.txmode = tx_mode; 1580b6b8a1ebSViacheslav Ovsiienko port->dev_conf.rxmode = rx_mode; 1581b6b8a1ebSViacheslav Ovsiienko 1582b6b8a1ebSViacheslav Ovsiienko ret = eth_dev_info_get_print_err(pid, &port->dev_info); 1583b6b8a1ebSViacheslav Ovsiienko if (ret != 0) 1584b6b8a1ebSViacheslav Ovsiienko rte_exit(EXIT_FAILURE, "rte_eth_dev_info_get() failed\n"); 1585b6b8a1ebSViacheslav Ovsiienko 1586295968d1SFerruh Yigit if (!(port->dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)) 1587b6b8a1ebSViacheslav Ovsiienko port->dev_conf.txmode.offloads &= 1588295968d1SFerruh Yigit ~RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE; 1589b6b8a1ebSViacheslav Ovsiienko 1590b6b8a1ebSViacheslav Ovsiienko /* Apply Rx offloads configuration */ 1591b6b8a1ebSViacheslav Ovsiienko for (i = 0; i < port->dev_info.max_rx_queues; i++) 1592b6b8a1ebSViacheslav Ovsiienko port->rx_conf[i].offloads = port->dev_conf.rxmode.offloads; 1593b6b8a1ebSViacheslav Ovsiienko /* Apply Tx offloads configuration */ 1594b6b8a1ebSViacheslav Ovsiienko for (i = 0; i < port->dev_info.max_tx_queues; i++) 1595b6b8a1ebSViacheslav Ovsiienko port->tx_conf[i].offloads = port->dev_conf.txmode.offloads; 1596b6b8a1ebSViacheslav Ovsiienko 1597b6b8a1ebSViacheslav Ovsiienko if (eth_link_speed) 1598b6b8a1ebSViacheslav Ovsiienko port->dev_conf.link_speeds = eth_link_speed; 1599b6b8a1ebSViacheslav Ovsiienko 16001bb4a528SFerruh Yigit if (max_rx_pkt_len) 16011bb4a528SFerruh Yigit port->dev_conf.rxmode.mtu = max_rx_pkt_len - 16021bb4a528SFerruh Yigit get_eth_overhead(&port->dev_info); 16031bb4a528SFerruh Yigit 1604b6b8a1ebSViacheslav Ovsiienko /* set flag to initialize port/queue */ 1605b6b8a1ebSViacheslav Ovsiienko port->need_reconfig = 1; 1606b6b8a1ebSViacheslav Ovsiienko port->need_reconfig_queues = 1; 1607b6b8a1ebSViacheslav Ovsiienko port->socket_id = socket_id; 1608b6b8a1ebSViacheslav Ovsiienko port->tx_metadata = 0; 1609b6b8a1ebSViacheslav Ovsiienko 1610b6b8a1ebSViacheslav Ovsiienko /* 1611b6b8a1ebSViacheslav Ovsiienko * Check for maximum number of segments per MTU. 1612b6b8a1ebSViacheslav Ovsiienko * Accordingly update the mbuf data size. 1613b6b8a1ebSViacheslav Ovsiienko */ 1614b6b8a1ebSViacheslav Ovsiienko if (port->dev_info.rx_desc_lim.nb_mtu_seg_max != UINT16_MAX && 1615b6b8a1ebSViacheslav Ovsiienko port->dev_info.rx_desc_lim.nb_mtu_seg_max != 0) { 16161bb4a528SFerruh Yigit uint32_t eth_overhead = get_eth_overhead(&port->dev_info); 16171bb4a528SFerruh Yigit uint16_t mtu; 1618b6b8a1ebSViacheslav Ovsiienko 16191bb4a528SFerruh Yigit if (rte_eth_dev_get_mtu(pid, &mtu) == 0) { 16201bb4a528SFerruh Yigit uint16_t data_size = (mtu + eth_overhead) / 16211bb4a528SFerruh Yigit port->dev_info.rx_desc_lim.nb_mtu_seg_max; 16221bb4a528SFerruh Yigit uint16_t buffer_size = data_size + RTE_PKTMBUF_HEADROOM; 16231bb4a528SFerruh Yigit 16241bb4a528SFerruh Yigit if (buffer_size > mbuf_data_size[0]) { 16251bb4a528SFerruh Yigit mbuf_data_size[0] = buffer_size; 1626b6b8a1ebSViacheslav Ovsiienko TESTPMD_LOG(WARNING, 1627b6b8a1ebSViacheslav Ovsiienko "Configured mbuf size of the first segment %hu\n", 1628b6b8a1ebSViacheslav Ovsiienko mbuf_data_size[0]); 1629b6b8a1ebSViacheslav Ovsiienko } 1630b6b8a1ebSViacheslav Ovsiienko } 1631b6b8a1ebSViacheslav Ovsiienko } 16321bb4a528SFerruh Yigit } 1633b6b8a1ebSViacheslav Ovsiienko 1634b6b8a1ebSViacheslav Ovsiienko static void 1635af75078fSIntel init_config(void) 1636af75078fSIntel { 1637ce8d5614SIntel portid_t pid; 1638af75078fSIntel struct rte_mempool *mbp; 1639af75078fSIntel unsigned int nb_mbuf_per_pool; 1640af75078fSIntel lcoreid_t lc_id; 16416970401eSDavid Marchand #ifdef RTE_LIB_GRO 1642b7091f1dSJiayu Hu struct rte_gro_param gro_param; 16436970401eSDavid Marchand #endif 16446970401eSDavid Marchand #ifdef RTE_LIB_GSO 164552f38a20SJiayu Hu uint32_t gso_types; 16466970401eSDavid Marchand #endif 1647487f9a59SYulong Pei 1648af75078fSIntel /* Configuration of logical cores. */ 1649af75078fSIntel fwd_lcores = rte_zmalloc("testpmd: fwd_lcores", 1650af75078fSIntel sizeof(struct fwd_lcore *) * nb_lcores, 1651fdf20fa7SSergio Gonzalez Monroy RTE_CACHE_LINE_SIZE); 1652af75078fSIntel if (fwd_lcores == NULL) { 1653ce8d5614SIntel rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) " 1654ce8d5614SIntel "failed\n", nb_lcores); 1655af75078fSIntel } 1656af75078fSIntel for (lc_id = 0; lc_id < nb_lcores; lc_id++) { 1657af75078fSIntel fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore", 1658af75078fSIntel sizeof(struct fwd_lcore), 1659fdf20fa7SSergio Gonzalez Monroy RTE_CACHE_LINE_SIZE); 1660af75078fSIntel if (fwd_lcores[lc_id] == NULL) { 1661ce8d5614SIntel rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) " 1662ce8d5614SIntel "failed\n"); 1663af75078fSIntel } 1664af75078fSIntel fwd_lcores[lc_id]->cpuid_idx = lc_id; 1665af75078fSIntel } 1666af75078fSIntel 16677d89b261SGaetan Rivet RTE_ETH_FOREACH_DEV(pid) { 1668b6b8a1ebSViacheslav Ovsiienko uint32_t socket_id; 16696f51deb9SIvan Ilchenko 1670b6ea6408SIntel if (numa_support) { 1671b6b8a1ebSViacheslav Ovsiienko socket_id = port_numa[pid]; 1672b6b8a1ebSViacheslav Ovsiienko if (port_numa[pid] == NUMA_NO_CONFIG) { 1673b6b8a1ebSViacheslav Ovsiienko socket_id = rte_eth_dev_socket_id(pid); 167420a0286fSLiu Xiaofeng 167529841336SPhil Yang /* 167629841336SPhil Yang * if socket_id is invalid, 167729841336SPhil Yang * set to the first available socket. 167829841336SPhil Yang */ 167920a0286fSLiu Xiaofeng if (check_socket_id(socket_id) < 0) 168029841336SPhil Yang socket_id = socket_ids[0]; 1681b6ea6408SIntel } 1682b6b8a1ebSViacheslav Ovsiienko } else { 1683b6b8a1ebSViacheslav Ovsiienko socket_id = (socket_num == UMA_NO_CONFIG) ? 1684b6b8a1ebSViacheslav Ovsiienko 0 : socket_num; 1685b6ea6408SIntel } 1686b6b8a1ebSViacheslav Ovsiienko /* Apply default TxRx configuration for all ports */ 1687b6b8a1ebSViacheslav Ovsiienko init_config_port_offloads(pid, socket_id); 1688ce8d5614SIntel } 16893ab64341SOlivier Matz /* 16903ab64341SOlivier Matz * Create pools of mbuf. 16913ab64341SOlivier Matz * If NUMA support is disabled, create a single pool of mbuf in 16923ab64341SOlivier Matz * socket 0 memory by default. 16933ab64341SOlivier Matz * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1. 16943ab64341SOlivier Matz * 16953ab64341SOlivier Matz * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and 16963ab64341SOlivier Matz * nb_txd can be configured at run time. 16973ab64341SOlivier Matz */ 16983ab64341SOlivier Matz if (param_total_num_mbufs) 16993ab64341SOlivier Matz nb_mbuf_per_pool = param_total_num_mbufs; 17003ab64341SOlivier Matz else { 17013ab64341SOlivier Matz nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX + 17023ab64341SOlivier Matz (nb_lcores * mb_mempool_cache) + 17033ab64341SOlivier Matz RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST; 17043ab64341SOlivier Matz nb_mbuf_per_pool *= RTE_MAX_ETHPORTS; 17053ab64341SOlivier Matz } 17063ab64341SOlivier Matz 1707b6ea6408SIntel if (numa_support) { 170826cbb419SViacheslav Ovsiienko uint8_t i, j; 1709ce8d5614SIntel 1710c9cafcc8SShahaf Shuler for (i = 0; i < num_sockets; i++) 171126cbb419SViacheslav Ovsiienko for (j = 0; j < mbuf_data_size_n; j++) 171226cbb419SViacheslav Ovsiienko mempools[i * MAX_SEGS_BUFFER_SPLIT + j] = 171326cbb419SViacheslav Ovsiienko mbuf_pool_create(mbuf_data_size[j], 1714401b744dSShahaf Shuler nb_mbuf_per_pool, 171526cbb419SViacheslav Ovsiienko socket_ids[i], j); 17163ab64341SOlivier Matz } else { 171726cbb419SViacheslav Ovsiienko uint8_t i; 171826cbb419SViacheslav Ovsiienko 171926cbb419SViacheslav Ovsiienko for (i = 0; i < mbuf_data_size_n; i++) 172026cbb419SViacheslav Ovsiienko mempools[i] = mbuf_pool_create 172126cbb419SViacheslav Ovsiienko (mbuf_data_size[i], 1722401b744dSShahaf Shuler nb_mbuf_per_pool, 172326cbb419SViacheslav Ovsiienko socket_num == UMA_NO_CONFIG ? 172426cbb419SViacheslav Ovsiienko 0 : socket_num, i); 17253ab64341SOlivier Matz } 1726b6ea6408SIntel 1727b6ea6408SIntel init_port_config(); 17285886ae07SAdrien Mazarguil 17296970401eSDavid Marchand #ifdef RTE_LIB_GSO 1730295968d1SFerruh Yigit gso_types = RTE_ETH_TX_OFFLOAD_TCP_TSO | RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO | 1731295968d1SFerruh Yigit RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO | RTE_ETH_TX_OFFLOAD_UDP_TSO; 17326970401eSDavid Marchand #endif 17335886ae07SAdrien Mazarguil /* 17345886ae07SAdrien Mazarguil * Records which Mbuf pool to use by each logical core, if needed. 17355886ae07SAdrien Mazarguil */ 17365886ae07SAdrien Mazarguil for (lc_id = 0; lc_id < nb_lcores; lc_id++) { 17378fd8bebcSAdrien Mazarguil mbp = mbuf_pool_find( 173826cbb419SViacheslav Ovsiienko rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]), 0); 17398fd8bebcSAdrien Mazarguil 17405886ae07SAdrien Mazarguil if (mbp == NULL) 174126cbb419SViacheslav Ovsiienko mbp = mbuf_pool_find(0, 0); 17425886ae07SAdrien Mazarguil fwd_lcores[lc_id]->mbp = mbp; 17436970401eSDavid Marchand #ifdef RTE_LIB_GSO 174452f38a20SJiayu Hu /* initialize GSO context */ 174552f38a20SJiayu Hu fwd_lcores[lc_id]->gso_ctx.direct_pool = mbp; 174652f38a20SJiayu Hu fwd_lcores[lc_id]->gso_ctx.indirect_pool = mbp; 174752f38a20SJiayu Hu fwd_lcores[lc_id]->gso_ctx.gso_types = gso_types; 174835b2d13fSOlivier Matz fwd_lcores[lc_id]->gso_ctx.gso_size = RTE_ETHER_MAX_LEN - 174935b2d13fSOlivier Matz RTE_ETHER_CRC_LEN; 175052f38a20SJiayu Hu fwd_lcores[lc_id]->gso_ctx.flag = 0; 17516970401eSDavid Marchand #endif 17525886ae07SAdrien Mazarguil } 17535886ae07SAdrien Mazarguil 17540c0db76fSBernard Iremonger fwd_config_setup(); 1755b7091f1dSJiayu Hu 17566970401eSDavid Marchand #ifdef RTE_LIB_GRO 1757b7091f1dSJiayu Hu /* create a gro context for each lcore */ 1758b7091f1dSJiayu Hu gro_param.gro_types = RTE_GRO_TCP_IPV4; 1759b7091f1dSJiayu Hu gro_param.max_flow_num = GRO_MAX_FLUSH_CYCLES; 1760b7091f1dSJiayu Hu gro_param.max_item_per_flow = MAX_PKT_BURST; 1761b7091f1dSJiayu Hu for (lc_id = 0; lc_id < nb_lcores; lc_id++) { 1762b7091f1dSJiayu Hu gro_param.socket_id = rte_lcore_to_socket_id( 1763b7091f1dSJiayu Hu fwd_lcores_cpuids[lc_id]); 1764b7091f1dSJiayu Hu fwd_lcores[lc_id]->gro_ctx = rte_gro_ctx_create(&gro_param); 1765b7091f1dSJiayu Hu if (fwd_lcores[lc_id]->gro_ctx == NULL) { 1766b7091f1dSJiayu Hu rte_exit(EXIT_FAILURE, 1767b7091f1dSJiayu Hu "rte_gro_ctx_create() failed\n"); 1768b7091f1dSJiayu Hu } 1769b7091f1dSJiayu Hu } 17706970401eSDavid Marchand #endif 1771ce8d5614SIntel } 1772ce8d5614SIntel 17732950a769SDeclan Doherty 17742950a769SDeclan Doherty void 1775a21d5a4bSDeclan Doherty reconfig(portid_t new_port_id, unsigned socket_id) 17762950a769SDeclan Doherty { 17772950a769SDeclan Doherty /* Reconfiguration of Ethernet ports. */ 1778b6b8a1ebSViacheslav Ovsiienko init_config_port_offloads(new_port_id, socket_id); 17792950a769SDeclan Doherty init_port_config(); 17802950a769SDeclan Doherty } 17812950a769SDeclan Doherty 17822950a769SDeclan Doherty 1783ce8d5614SIntel int 1784ce8d5614SIntel init_fwd_streams(void) 1785ce8d5614SIntel { 1786ce8d5614SIntel portid_t pid; 1787ce8d5614SIntel struct rte_port *port; 1788ce8d5614SIntel streamid_t sm_id, nb_fwd_streams_new; 17895a8fb55cSReshma Pattan queueid_t q; 1790ce8d5614SIntel 1791ce8d5614SIntel /* set socket id according to numa or not */ 17927d89b261SGaetan Rivet RTE_ETH_FOREACH_DEV(pid) { 1793ce8d5614SIntel port = &ports[pid]; 1794ce8d5614SIntel if (nb_rxq > port->dev_info.max_rx_queues) { 179561a3b0e5SAndrew Rybchenko fprintf(stderr, 179661a3b0e5SAndrew Rybchenko "Fail: nb_rxq(%d) is greater than max_rx_queues(%d)\n", 179761a3b0e5SAndrew Rybchenko nb_rxq, port->dev_info.max_rx_queues); 1798ce8d5614SIntel return -1; 1799ce8d5614SIntel } 1800ce8d5614SIntel if (nb_txq > port->dev_info.max_tx_queues) { 180161a3b0e5SAndrew Rybchenko fprintf(stderr, 180261a3b0e5SAndrew Rybchenko "Fail: nb_txq(%d) is greater than max_tx_queues(%d)\n", 180361a3b0e5SAndrew Rybchenko nb_txq, port->dev_info.max_tx_queues); 1804ce8d5614SIntel return -1; 1805ce8d5614SIntel } 180620a0286fSLiu Xiaofeng if (numa_support) { 180720a0286fSLiu Xiaofeng if (port_numa[pid] != NUMA_NO_CONFIG) 180820a0286fSLiu Xiaofeng port->socket_id = port_numa[pid]; 180920a0286fSLiu Xiaofeng else { 1810b6ea6408SIntel port->socket_id = rte_eth_dev_socket_id(pid); 181120a0286fSLiu Xiaofeng 181229841336SPhil Yang /* 181329841336SPhil Yang * if socket_id is invalid, 181429841336SPhil Yang * set to the first available socket. 181529841336SPhil Yang */ 181620a0286fSLiu Xiaofeng if (check_socket_id(port->socket_id) < 0) 181729841336SPhil Yang port->socket_id = socket_ids[0]; 181820a0286fSLiu Xiaofeng } 181920a0286fSLiu Xiaofeng } 1820b6ea6408SIntel else { 1821b6ea6408SIntel if (socket_num == UMA_NO_CONFIG) 1822af75078fSIntel port->socket_id = 0; 1823b6ea6408SIntel else 1824b6ea6408SIntel port->socket_id = socket_num; 1825b6ea6408SIntel } 1826af75078fSIntel } 1827af75078fSIntel 18285a8fb55cSReshma Pattan q = RTE_MAX(nb_rxq, nb_txq); 18295a8fb55cSReshma Pattan if (q == 0) { 183061a3b0e5SAndrew Rybchenko fprintf(stderr, 183161a3b0e5SAndrew Rybchenko "Fail: Cannot allocate fwd streams as number of queues is 0\n"); 18325a8fb55cSReshma Pattan return -1; 18335a8fb55cSReshma Pattan } 18345a8fb55cSReshma Pattan nb_fwd_streams_new = (streamid_t)(nb_ports * q); 1835ce8d5614SIntel if (nb_fwd_streams_new == nb_fwd_streams) 1836ce8d5614SIntel return 0; 1837ce8d5614SIntel /* clear the old */ 1838ce8d5614SIntel if (fwd_streams != NULL) { 1839ce8d5614SIntel for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) { 1840ce8d5614SIntel if (fwd_streams[sm_id] == NULL) 1841ce8d5614SIntel continue; 1842ce8d5614SIntel rte_free(fwd_streams[sm_id]); 1843ce8d5614SIntel fwd_streams[sm_id] = NULL; 1844af75078fSIntel } 1845ce8d5614SIntel rte_free(fwd_streams); 1846ce8d5614SIntel fwd_streams = NULL; 1847ce8d5614SIntel } 1848ce8d5614SIntel 1849ce8d5614SIntel /* init new */ 1850ce8d5614SIntel nb_fwd_streams = nb_fwd_streams_new; 18511f84c469SMatan Azrad if (nb_fwd_streams) { 1852ce8d5614SIntel fwd_streams = rte_zmalloc("testpmd: fwd_streams", 18531f84c469SMatan Azrad sizeof(struct fwd_stream *) * nb_fwd_streams, 18541f84c469SMatan Azrad RTE_CACHE_LINE_SIZE); 1855ce8d5614SIntel if (fwd_streams == NULL) 18561f84c469SMatan Azrad rte_exit(EXIT_FAILURE, "rte_zmalloc(%d" 18571f84c469SMatan Azrad " (struct fwd_stream *)) failed\n", 18581f84c469SMatan Azrad nb_fwd_streams); 1859ce8d5614SIntel 1860af75078fSIntel for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) { 18611f84c469SMatan Azrad fwd_streams[sm_id] = rte_zmalloc("testpmd:" 18621f84c469SMatan Azrad " struct fwd_stream", sizeof(struct fwd_stream), 18631f84c469SMatan Azrad RTE_CACHE_LINE_SIZE); 1864ce8d5614SIntel if (fwd_streams[sm_id] == NULL) 18651f84c469SMatan Azrad rte_exit(EXIT_FAILURE, "rte_zmalloc" 18661f84c469SMatan Azrad "(struct fwd_stream) failed\n"); 18671f84c469SMatan Azrad } 1868af75078fSIntel } 1869ce8d5614SIntel 1870ce8d5614SIntel return 0; 1871af75078fSIntel } 1872af75078fSIntel 1873af75078fSIntel static void 1874af75078fSIntel pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs) 1875af75078fSIntel { 18767569b8c1SHonnappa Nagarahalli uint64_t total_burst, sburst; 187785de481aSHonnappa Nagarahalli uint64_t nb_burst; 18787569b8c1SHonnappa Nagarahalli uint64_t burst_stats[4]; 18797569b8c1SHonnappa Nagarahalli uint16_t pktnb_stats[4]; 1880af75078fSIntel uint16_t nb_pkt; 18817569b8c1SHonnappa Nagarahalli int burst_percent[4], sburstp; 18827569b8c1SHonnappa Nagarahalli int i; 1883af75078fSIntel 1884af75078fSIntel /* 1885af75078fSIntel * First compute the total number of packet bursts and the 1886af75078fSIntel * two highest numbers of bursts of the same number of packets. 1887af75078fSIntel */ 18887569b8c1SHonnappa Nagarahalli memset(&burst_stats, 0x0, sizeof(burst_stats)); 18897569b8c1SHonnappa Nagarahalli memset(&pktnb_stats, 0x0, sizeof(pktnb_stats)); 18907569b8c1SHonnappa Nagarahalli 18917569b8c1SHonnappa Nagarahalli /* Show stats for 0 burst size always */ 18927569b8c1SHonnappa Nagarahalli total_burst = pbs->pkt_burst_spread[0]; 18937569b8c1SHonnappa Nagarahalli burst_stats[0] = pbs->pkt_burst_spread[0]; 18947569b8c1SHonnappa Nagarahalli pktnb_stats[0] = 0; 18957569b8c1SHonnappa Nagarahalli 18967569b8c1SHonnappa Nagarahalli /* Find the next 2 burst sizes with highest occurrences. */ 18976a8b64fdSEli Britstein for (nb_pkt = 1; nb_pkt < MAX_PKT_BURST + 1; nb_pkt++) { 1898af75078fSIntel nb_burst = pbs->pkt_burst_spread[nb_pkt]; 18997569b8c1SHonnappa Nagarahalli 1900af75078fSIntel if (nb_burst == 0) 1901af75078fSIntel continue; 19027569b8c1SHonnappa Nagarahalli 1903af75078fSIntel total_burst += nb_burst; 19047569b8c1SHonnappa Nagarahalli 19057569b8c1SHonnappa Nagarahalli if (nb_burst > burst_stats[1]) { 19067569b8c1SHonnappa Nagarahalli burst_stats[2] = burst_stats[1]; 19077569b8c1SHonnappa Nagarahalli pktnb_stats[2] = pktnb_stats[1]; 1908fe613657SDaniel Shelepov burst_stats[1] = nb_burst; 1909fe613657SDaniel Shelepov pktnb_stats[1] = nb_pkt; 19107569b8c1SHonnappa Nagarahalli } else if (nb_burst > burst_stats[2]) { 19117569b8c1SHonnappa Nagarahalli burst_stats[2] = nb_burst; 19127569b8c1SHonnappa Nagarahalli pktnb_stats[2] = nb_pkt; 1913af75078fSIntel } 1914af75078fSIntel } 1915af75078fSIntel if (total_burst == 0) 1916af75078fSIntel return; 19177569b8c1SHonnappa Nagarahalli 19187569b8c1SHonnappa Nagarahalli printf(" %s-bursts : %"PRIu64" [", rx_tx, total_burst); 19197569b8c1SHonnappa Nagarahalli for (i = 0, sburst = 0, sburstp = 0; i < 4; i++) { 19207569b8c1SHonnappa Nagarahalli if (i == 3) { 19217569b8c1SHonnappa Nagarahalli printf("%d%% of other]\n", 100 - sburstp); 1922af75078fSIntel return; 1923af75078fSIntel } 19247569b8c1SHonnappa Nagarahalli 19257569b8c1SHonnappa Nagarahalli sburst += burst_stats[i]; 19267569b8c1SHonnappa Nagarahalli if (sburst == total_burst) { 19277569b8c1SHonnappa Nagarahalli printf("%d%% of %d pkts]\n", 19287569b8c1SHonnappa Nagarahalli 100 - sburstp, (int) pktnb_stats[i]); 1929af75078fSIntel return; 1930af75078fSIntel } 19317569b8c1SHonnappa Nagarahalli 19327569b8c1SHonnappa Nagarahalli burst_percent[i] = 19337569b8c1SHonnappa Nagarahalli (double)burst_stats[i] / total_burst * 100; 19347569b8c1SHonnappa Nagarahalli printf("%d%% of %d pkts + ", 19357569b8c1SHonnappa Nagarahalli burst_percent[i], (int) pktnb_stats[i]); 19367569b8c1SHonnappa Nagarahalli sburstp += burst_percent[i]; 1937af75078fSIntel } 1938af75078fSIntel } 1939af75078fSIntel 1940af75078fSIntel static void 1941af75078fSIntel fwd_stream_stats_display(streamid_t stream_id) 1942af75078fSIntel { 1943af75078fSIntel struct fwd_stream *fs; 1944af75078fSIntel static const char *fwd_top_stats_border = "-------"; 1945af75078fSIntel 1946af75078fSIntel fs = fwd_streams[stream_id]; 1947af75078fSIntel if ((fs->rx_packets == 0) && (fs->tx_packets == 0) && 1948af75078fSIntel (fs->fwd_dropped == 0)) 1949af75078fSIntel return; 1950af75078fSIntel printf("\n %s Forward Stats for RX Port=%2d/Queue=%2d -> " 1951af75078fSIntel "TX Port=%2d/Queue=%2d %s\n", 1952af75078fSIntel fwd_top_stats_border, fs->rx_port, fs->rx_queue, 1953af75078fSIntel fs->tx_port, fs->tx_queue, fwd_top_stats_border); 1954c185d42cSDavid Marchand printf(" RX-packets: %-14"PRIu64" TX-packets: %-14"PRIu64 1955c185d42cSDavid Marchand " TX-dropped: %-14"PRIu64, 1956af75078fSIntel fs->rx_packets, fs->tx_packets, fs->fwd_dropped); 1957af75078fSIntel 1958af75078fSIntel /* if checksum mode */ 1959af75078fSIntel if (cur_fwd_eng == &csum_fwd_engine) { 1960c185d42cSDavid Marchand printf(" RX- bad IP checksum: %-14"PRIu64 1961c185d42cSDavid Marchand " Rx- bad L4 checksum: %-14"PRIu64 1962c185d42cSDavid Marchand " Rx- bad outer L4 checksum: %-14"PRIu64"\n", 196358d475b7SJerin Jacob fs->rx_bad_ip_csum, fs->rx_bad_l4_csum, 196458d475b7SJerin Jacob fs->rx_bad_outer_l4_csum); 1965d139cf23SLance Richardson printf(" RX- bad outer IP checksum: %-14"PRIu64"\n", 1966d139cf23SLance Richardson fs->rx_bad_outer_ip_csum); 196794d65546SDavid Marchand } else { 196894d65546SDavid Marchand printf("\n"); 1969af75078fSIntel } 1970af75078fSIntel 19710e4b1963SDharmik Thakkar if (record_burst_stats) { 1972af75078fSIntel pkt_burst_stats_display("RX", &fs->rx_burst_stats); 1973af75078fSIntel pkt_burst_stats_display("TX", &fs->tx_burst_stats); 19740e4b1963SDharmik Thakkar } 1975af75078fSIntel } 1976af75078fSIntel 197753324971SDavid Marchand void 197853324971SDavid Marchand fwd_stats_display(void) 197953324971SDavid Marchand { 198053324971SDavid Marchand static const char *fwd_stats_border = "----------------------"; 198153324971SDavid Marchand static const char *acc_stats_border = "+++++++++++++++"; 198253324971SDavid Marchand struct { 198353324971SDavid Marchand struct fwd_stream *rx_stream; 198453324971SDavid Marchand struct fwd_stream *tx_stream; 198553324971SDavid Marchand uint64_t tx_dropped; 198653324971SDavid Marchand uint64_t rx_bad_ip_csum; 198753324971SDavid Marchand uint64_t rx_bad_l4_csum; 198853324971SDavid Marchand uint64_t rx_bad_outer_l4_csum; 1989d139cf23SLance Richardson uint64_t rx_bad_outer_ip_csum; 199053324971SDavid Marchand } ports_stats[RTE_MAX_ETHPORTS]; 199153324971SDavid Marchand uint64_t total_rx_dropped = 0; 199253324971SDavid Marchand uint64_t total_tx_dropped = 0; 199353324971SDavid Marchand uint64_t total_rx_nombuf = 0; 199453324971SDavid Marchand struct rte_eth_stats stats; 199553324971SDavid Marchand uint64_t fwd_cycles = 0; 199653324971SDavid Marchand uint64_t total_recv = 0; 199753324971SDavid Marchand uint64_t total_xmit = 0; 199853324971SDavid Marchand struct rte_port *port; 199953324971SDavid Marchand streamid_t sm_id; 200053324971SDavid Marchand portid_t pt_id; 200153324971SDavid Marchand int i; 200253324971SDavid Marchand 200353324971SDavid Marchand memset(ports_stats, 0, sizeof(ports_stats)); 200453324971SDavid Marchand 200553324971SDavid Marchand for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) { 200653324971SDavid Marchand struct fwd_stream *fs = fwd_streams[sm_id]; 200753324971SDavid Marchand 200853324971SDavid Marchand if (cur_fwd_config.nb_fwd_streams > 200953324971SDavid Marchand cur_fwd_config.nb_fwd_ports) { 201053324971SDavid Marchand fwd_stream_stats_display(sm_id); 201153324971SDavid Marchand } else { 201253324971SDavid Marchand ports_stats[fs->tx_port].tx_stream = fs; 201353324971SDavid Marchand ports_stats[fs->rx_port].rx_stream = fs; 201453324971SDavid Marchand } 201553324971SDavid Marchand 201653324971SDavid Marchand ports_stats[fs->tx_port].tx_dropped += fs->fwd_dropped; 201753324971SDavid Marchand 201853324971SDavid Marchand ports_stats[fs->rx_port].rx_bad_ip_csum += fs->rx_bad_ip_csum; 201953324971SDavid Marchand ports_stats[fs->rx_port].rx_bad_l4_csum += fs->rx_bad_l4_csum; 202053324971SDavid Marchand ports_stats[fs->rx_port].rx_bad_outer_l4_csum += 202153324971SDavid Marchand fs->rx_bad_outer_l4_csum; 2022d139cf23SLance Richardson ports_stats[fs->rx_port].rx_bad_outer_ip_csum += 2023d139cf23SLance Richardson fs->rx_bad_outer_ip_csum; 202453324971SDavid Marchand 2025bc700b67SDharmik Thakkar if (record_core_cycles) 202653324971SDavid Marchand fwd_cycles += fs->core_cycles; 202753324971SDavid Marchand } 202853324971SDavid Marchand for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) { 202953324971SDavid Marchand pt_id = fwd_ports_ids[i]; 203053324971SDavid Marchand port = &ports[pt_id]; 203153324971SDavid Marchand 203253324971SDavid Marchand rte_eth_stats_get(pt_id, &stats); 203353324971SDavid Marchand stats.ipackets -= port->stats.ipackets; 203453324971SDavid Marchand stats.opackets -= port->stats.opackets; 203553324971SDavid Marchand stats.ibytes -= port->stats.ibytes; 203653324971SDavid Marchand stats.obytes -= port->stats.obytes; 203753324971SDavid Marchand stats.imissed -= port->stats.imissed; 203853324971SDavid Marchand stats.oerrors -= port->stats.oerrors; 203953324971SDavid Marchand stats.rx_nombuf -= port->stats.rx_nombuf; 204053324971SDavid Marchand 204153324971SDavid Marchand total_recv += stats.ipackets; 204253324971SDavid Marchand total_xmit += stats.opackets; 204353324971SDavid Marchand total_rx_dropped += stats.imissed; 204453324971SDavid Marchand total_tx_dropped += ports_stats[pt_id].tx_dropped; 204553324971SDavid Marchand total_tx_dropped += stats.oerrors; 204653324971SDavid Marchand total_rx_nombuf += stats.rx_nombuf; 204753324971SDavid Marchand 204853324971SDavid Marchand printf("\n %s Forward statistics for port %-2d %s\n", 204953324971SDavid Marchand fwd_stats_border, pt_id, fwd_stats_border); 205053324971SDavid Marchand 205108dcd187SHuisong Li printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64 205208dcd187SHuisong Li "RX-total: %-"PRIu64"\n", stats.ipackets, stats.imissed, 205353324971SDavid Marchand stats.ipackets + stats.imissed); 205453324971SDavid Marchand 2055d139cf23SLance Richardson if (cur_fwd_eng == &csum_fwd_engine) { 205653324971SDavid Marchand printf(" Bad-ipcsum: %-14"PRIu64 205753324971SDavid Marchand " Bad-l4csum: %-14"PRIu64 205853324971SDavid Marchand "Bad-outer-l4csum: %-14"PRIu64"\n", 205953324971SDavid Marchand ports_stats[pt_id].rx_bad_ip_csum, 206053324971SDavid Marchand ports_stats[pt_id].rx_bad_l4_csum, 206153324971SDavid Marchand ports_stats[pt_id].rx_bad_outer_l4_csum); 2062d139cf23SLance Richardson printf(" Bad-outer-ipcsum: %-14"PRIu64"\n", 2063d139cf23SLance Richardson ports_stats[pt_id].rx_bad_outer_ip_csum); 2064d139cf23SLance Richardson } 206553324971SDavid Marchand if (stats.ierrors + stats.rx_nombuf > 0) { 206608dcd187SHuisong Li printf(" RX-error: %-"PRIu64"\n", stats.ierrors); 206708dcd187SHuisong Li printf(" RX-nombufs: %-14"PRIu64"\n", stats.rx_nombuf); 206853324971SDavid Marchand } 206953324971SDavid Marchand 207008dcd187SHuisong Li printf(" TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64 207153324971SDavid Marchand "TX-total: %-"PRIu64"\n", 207253324971SDavid Marchand stats.opackets, ports_stats[pt_id].tx_dropped, 207353324971SDavid Marchand stats.opackets + ports_stats[pt_id].tx_dropped); 207453324971SDavid Marchand 20750e4b1963SDharmik Thakkar if (record_burst_stats) { 207653324971SDavid Marchand if (ports_stats[pt_id].rx_stream) 207753324971SDavid Marchand pkt_burst_stats_display("RX", 207853324971SDavid Marchand &ports_stats[pt_id].rx_stream->rx_burst_stats); 207953324971SDavid Marchand if (ports_stats[pt_id].tx_stream) 208053324971SDavid Marchand pkt_burst_stats_display("TX", 208153324971SDavid Marchand &ports_stats[pt_id].tx_stream->tx_burst_stats); 20820e4b1963SDharmik Thakkar } 208353324971SDavid Marchand 208453324971SDavid Marchand printf(" %s--------------------------------%s\n", 208553324971SDavid Marchand fwd_stats_border, fwd_stats_border); 208653324971SDavid Marchand } 208753324971SDavid Marchand 208853324971SDavid Marchand printf("\n %s Accumulated forward statistics for all ports" 208953324971SDavid Marchand "%s\n", 209053324971SDavid Marchand acc_stats_border, acc_stats_border); 209153324971SDavid Marchand printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: " 209253324971SDavid Marchand "%-"PRIu64"\n" 209353324971SDavid Marchand " TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: " 209453324971SDavid Marchand "%-"PRIu64"\n", 209553324971SDavid Marchand total_recv, total_rx_dropped, total_recv + total_rx_dropped, 209653324971SDavid Marchand total_xmit, total_tx_dropped, total_xmit + total_tx_dropped); 209753324971SDavid Marchand if (total_rx_nombuf > 0) 209853324971SDavid Marchand printf(" RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf); 209953324971SDavid Marchand printf(" %s++++++++++++++++++++++++++++++++++++++++++++++" 210053324971SDavid Marchand "%s\n", 210153324971SDavid Marchand acc_stats_border, acc_stats_border); 2102bc700b67SDharmik Thakkar if (record_core_cycles) { 21034c0497b1SDharmik Thakkar #define CYC_PER_MHZ 1E6 21043a164e00SPhil Yang if (total_recv > 0 || total_xmit > 0) { 21053a164e00SPhil Yang uint64_t total_pkts = 0; 21063a164e00SPhil Yang if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 || 21073a164e00SPhil Yang strcmp(cur_fwd_eng->fwd_mode_name, "flowgen") == 0) 21083a164e00SPhil Yang total_pkts = total_xmit; 21093a164e00SPhil Yang else 21103a164e00SPhil Yang total_pkts = total_recv; 21113a164e00SPhil Yang 21121920832aSDharmik Thakkar printf("\n CPU cycles/packet=%.2F (total cycles=" 21133a164e00SPhil Yang "%"PRIu64" / total %s packets=%"PRIu64") at %"PRIu64 21144c0497b1SDharmik Thakkar " MHz Clock\n", 21153a164e00SPhil Yang (double) fwd_cycles / total_pkts, 21163a164e00SPhil Yang fwd_cycles, cur_fwd_eng->fwd_mode_name, total_pkts, 21174c0497b1SDharmik Thakkar (uint64_t)(rte_get_tsc_hz() / CYC_PER_MHZ)); 21183a164e00SPhil Yang } 2119bc700b67SDharmik Thakkar } 212053324971SDavid Marchand } 212153324971SDavid Marchand 212253324971SDavid Marchand void 212353324971SDavid Marchand fwd_stats_reset(void) 212453324971SDavid Marchand { 212553324971SDavid Marchand streamid_t sm_id; 212653324971SDavid Marchand portid_t pt_id; 212753324971SDavid Marchand int i; 212853324971SDavid Marchand 212953324971SDavid Marchand for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) { 213053324971SDavid Marchand pt_id = fwd_ports_ids[i]; 213153324971SDavid Marchand rte_eth_stats_get(pt_id, &ports[pt_id].stats); 213253324971SDavid Marchand } 213353324971SDavid Marchand for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) { 213453324971SDavid Marchand struct fwd_stream *fs = fwd_streams[sm_id]; 213553324971SDavid Marchand 213653324971SDavid Marchand fs->rx_packets = 0; 213753324971SDavid Marchand fs->tx_packets = 0; 213853324971SDavid Marchand fs->fwd_dropped = 0; 213953324971SDavid Marchand fs->rx_bad_ip_csum = 0; 214053324971SDavid Marchand fs->rx_bad_l4_csum = 0; 214153324971SDavid Marchand fs->rx_bad_outer_l4_csum = 0; 2142d139cf23SLance Richardson fs->rx_bad_outer_ip_csum = 0; 214353324971SDavid Marchand 214453324971SDavid Marchand memset(&fs->rx_burst_stats, 0, sizeof(fs->rx_burst_stats)); 214553324971SDavid Marchand memset(&fs->tx_burst_stats, 0, sizeof(fs->tx_burst_stats)); 214653324971SDavid Marchand fs->core_cycles = 0; 214753324971SDavid Marchand } 214853324971SDavid Marchand } 214953324971SDavid Marchand 2150af75078fSIntel static void 21517741e4cfSIntel flush_fwd_rx_queues(void) 2152af75078fSIntel { 2153af75078fSIntel struct rte_mbuf *pkts_burst[MAX_PKT_BURST]; 2154af75078fSIntel portid_t rxp; 21557741e4cfSIntel portid_t port_id; 2156af75078fSIntel queueid_t rxq; 2157af75078fSIntel uint16_t nb_rx; 2158af75078fSIntel uint16_t i; 2159af75078fSIntel uint8_t j; 2160f487715fSReshma Pattan uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0; 2161594302c7SJames Poole uint64_t timer_period; 2162f487715fSReshma Pattan 2163a550baf2SMin Hu (Connor) if (num_procs > 1) { 2164a550baf2SMin Hu (Connor) printf("multi-process not support for flushing fwd Rx queues, skip the below lines and return.\n"); 2165a550baf2SMin Hu (Connor) return; 2166a550baf2SMin Hu (Connor) } 2167a550baf2SMin Hu (Connor) 2168f487715fSReshma Pattan /* convert to number of cycles */ 2169594302c7SJames Poole timer_period = rte_get_timer_hz(); /* 1 second timeout */ 2170af75078fSIntel 2171af75078fSIntel for (j = 0; j < 2; j++) { 21727741e4cfSIntel for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) { 2173af75078fSIntel for (rxq = 0; rxq < nb_rxq; rxq++) { 21747741e4cfSIntel port_id = fwd_ports_ids[rxp]; 2175f487715fSReshma Pattan /** 2176f487715fSReshma Pattan * testpmd can stuck in the below do while loop 2177f487715fSReshma Pattan * if rte_eth_rx_burst() always returns nonzero 2178f487715fSReshma Pattan * packets. So timer is added to exit this loop 2179f487715fSReshma Pattan * after 1sec timer expiry. 2180f487715fSReshma Pattan */ 2181f487715fSReshma Pattan prev_tsc = rte_rdtsc(); 2182af75078fSIntel do { 21837741e4cfSIntel nb_rx = rte_eth_rx_burst(port_id, rxq, 2184013af9b6SIntel pkts_burst, MAX_PKT_BURST); 2185af75078fSIntel for (i = 0; i < nb_rx; i++) 2186af75078fSIntel rte_pktmbuf_free(pkts_burst[i]); 2187f487715fSReshma Pattan 2188f487715fSReshma Pattan cur_tsc = rte_rdtsc(); 2189f487715fSReshma Pattan diff_tsc = cur_tsc - prev_tsc; 2190f487715fSReshma Pattan timer_tsc += diff_tsc; 2191f487715fSReshma Pattan } while ((nb_rx > 0) && 2192f487715fSReshma Pattan (timer_tsc < timer_period)); 2193f487715fSReshma Pattan timer_tsc = 0; 2194af75078fSIntel } 2195af75078fSIntel } 2196af75078fSIntel rte_delay_ms(10); /* wait 10 milli-seconds before retrying */ 2197af75078fSIntel } 2198af75078fSIntel } 2199af75078fSIntel 2200af75078fSIntel static void 2201af75078fSIntel run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd) 2202af75078fSIntel { 2203af75078fSIntel struct fwd_stream **fsm; 2204af75078fSIntel streamid_t nb_fs; 2205af75078fSIntel streamid_t sm_id; 2206a8d0d473SBruce Richardson #ifdef RTE_LIB_BITRATESTATS 22077e4441c8SRemy Horton uint64_t tics_per_1sec; 22087e4441c8SRemy Horton uint64_t tics_datum; 22097e4441c8SRemy Horton uint64_t tics_current; 22104918a357SXiaoyun Li uint16_t i, cnt_ports; 2211af75078fSIntel 22124918a357SXiaoyun Li cnt_ports = nb_ports; 22137e4441c8SRemy Horton tics_datum = rte_rdtsc(); 22147e4441c8SRemy Horton tics_per_1sec = rte_get_timer_hz(); 22157e4441c8SRemy Horton #endif 2216af75078fSIntel fsm = &fwd_streams[fc->stream_idx]; 2217af75078fSIntel nb_fs = fc->stream_nb; 2218af75078fSIntel do { 2219af75078fSIntel for (sm_id = 0; sm_id < nb_fs; sm_id++) 2220af75078fSIntel (*pkt_fwd)(fsm[sm_id]); 2221a8d0d473SBruce Richardson #ifdef RTE_LIB_BITRATESTATS 2222e25e6c70SRemy Horton if (bitrate_enabled != 0 && 2223e25e6c70SRemy Horton bitrate_lcore_id == rte_lcore_id()) { 22247e4441c8SRemy Horton tics_current = rte_rdtsc(); 22257e4441c8SRemy Horton if (tics_current - tics_datum >= tics_per_1sec) { 22267e4441c8SRemy Horton /* Periodic bitrate calculation */ 22274918a357SXiaoyun Li for (i = 0; i < cnt_ports; i++) 2228e25e6c70SRemy Horton rte_stats_bitrate_calc(bitrate_data, 22294918a357SXiaoyun Li ports_ids[i]); 22307e4441c8SRemy Horton tics_datum = tics_current; 22317e4441c8SRemy Horton } 2232e25e6c70SRemy Horton } 22337e4441c8SRemy Horton #endif 2234a8d0d473SBruce Richardson #ifdef RTE_LIB_LATENCYSTATS 223565eb1e54SPablo de Lara if (latencystats_enabled != 0 && 223665eb1e54SPablo de Lara latencystats_lcore_id == rte_lcore_id()) 223762d3216dSReshma Pattan rte_latencystats_update(); 223862d3216dSReshma Pattan #endif 223962d3216dSReshma Pattan 2240af75078fSIntel } while (! fc->stopped); 2241af75078fSIntel } 2242af75078fSIntel 2243af75078fSIntel static int 2244af75078fSIntel start_pkt_forward_on_core(void *fwd_arg) 2245af75078fSIntel { 2246af75078fSIntel run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg, 2247af75078fSIntel cur_fwd_config.fwd_eng->packet_fwd); 2248af75078fSIntel return 0; 2249af75078fSIntel } 2250af75078fSIntel 2251af75078fSIntel /* 2252af75078fSIntel * Run the TXONLY packet forwarding engine to send a single burst of packets. 2253af75078fSIntel * Used to start communication flows in network loopback test configurations. 2254af75078fSIntel */ 2255af75078fSIntel static int 2256af75078fSIntel run_one_txonly_burst_on_core(void *fwd_arg) 2257af75078fSIntel { 2258af75078fSIntel struct fwd_lcore *fwd_lc; 2259af75078fSIntel struct fwd_lcore tmp_lcore; 2260af75078fSIntel 2261af75078fSIntel fwd_lc = (struct fwd_lcore *) fwd_arg; 2262af75078fSIntel tmp_lcore = *fwd_lc; 2263af75078fSIntel tmp_lcore.stopped = 1; 2264af75078fSIntel run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd); 2265af75078fSIntel return 0; 2266af75078fSIntel } 2267af75078fSIntel 2268af75078fSIntel /* 2269af75078fSIntel * Launch packet forwarding: 2270af75078fSIntel * - Setup per-port forwarding context. 2271af75078fSIntel * - launch logical cores with their forwarding configuration. 2272af75078fSIntel */ 2273af75078fSIntel static void 2274af75078fSIntel launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore) 2275af75078fSIntel { 2276af75078fSIntel unsigned int i; 2277af75078fSIntel unsigned int lc_id; 2278af75078fSIntel int diag; 2279af75078fSIntel 2280af75078fSIntel for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) { 2281af75078fSIntel lc_id = fwd_lcores_cpuids[i]; 2282af75078fSIntel if ((interactive == 0) || (lc_id != rte_lcore_id())) { 2283af75078fSIntel fwd_lcores[i]->stopped = 0; 2284af75078fSIntel diag = rte_eal_remote_launch(pkt_fwd_on_lcore, 2285af75078fSIntel fwd_lcores[i], lc_id); 2286af75078fSIntel if (diag != 0) 228761a3b0e5SAndrew Rybchenko fprintf(stderr, 228861a3b0e5SAndrew Rybchenko "launch lcore %u failed - diag=%d\n", 2289af75078fSIntel lc_id, diag); 2290af75078fSIntel } 2291af75078fSIntel } 2292af75078fSIntel } 2293af75078fSIntel 2294af75078fSIntel /* 2295af75078fSIntel * Launch packet forwarding configuration. 2296af75078fSIntel */ 2297af75078fSIntel void 2298af75078fSIntel start_packet_forwarding(int with_tx_first) 2299af75078fSIntel { 2300af75078fSIntel port_fwd_begin_t port_fwd_begin; 2301af75078fSIntel port_fwd_end_t port_fwd_end; 2302af75078fSIntel unsigned int i; 2303af75078fSIntel 23045a8fb55cSReshma Pattan if (strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") == 0 && !nb_rxq) 23055a8fb55cSReshma Pattan rte_exit(EXIT_FAILURE, "rxq are 0, cannot use rxonly fwd mode\n"); 23065a8fb55cSReshma Pattan 23075a8fb55cSReshma Pattan if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 && !nb_txq) 23085a8fb55cSReshma Pattan rte_exit(EXIT_FAILURE, "txq are 0, cannot use txonly fwd mode\n"); 23095a8fb55cSReshma Pattan 23105a8fb55cSReshma Pattan if ((strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") != 0 && 23115a8fb55cSReshma Pattan strcmp(cur_fwd_eng->fwd_mode_name, "txonly") != 0) && 23125a8fb55cSReshma Pattan (!nb_rxq || !nb_txq)) 23135a8fb55cSReshma Pattan rte_exit(EXIT_FAILURE, 23145a8fb55cSReshma Pattan "Either rxq or txq are 0, cannot use %s fwd mode\n", 23155a8fb55cSReshma Pattan cur_fwd_eng->fwd_mode_name); 23165a8fb55cSReshma Pattan 2317ce8d5614SIntel if (all_ports_started() == 0) { 231861a3b0e5SAndrew Rybchenko fprintf(stderr, "Not all ports were started\n"); 2319ce8d5614SIntel return; 2320ce8d5614SIntel } 2321af75078fSIntel if (test_done == 0) { 232261a3b0e5SAndrew Rybchenko fprintf(stderr, "Packet forwarding already started\n"); 2323af75078fSIntel return; 2324af75078fSIntel } 23257741e4cfSIntel 232647a767b2SMatan Azrad fwd_config_setup(); 232747a767b2SMatan Azrad 232865744833SXueming Li pkt_fwd_config_display(&cur_fwd_config); 232965744833SXueming Li if (!pkt_fwd_shared_rxq_check()) 233065744833SXueming Li return; 233165744833SXueming Li 2332a78040c9SAlvin Zhang port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin; 2333a78040c9SAlvin Zhang if (port_fwd_begin != NULL) { 2334a78040c9SAlvin Zhang for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) { 2335a78040c9SAlvin Zhang if (port_fwd_begin(fwd_ports_ids[i])) { 2336a78040c9SAlvin Zhang fprintf(stderr, 2337a78040c9SAlvin Zhang "Packet forwarding is not ready\n"); 2338a78040c9SAlvin Zhang return; 2339a78040c9SAlvin Zhang } 2340a78040c9SAlvin Zhang } 2341a78040c9SAlvin Zhang } 2342a78040c9SAlvin Zhang 2343a78040c9SAlvin Zhang if (with_tx_first) { 2344a78040c9SAlvin Zhang port_fwd_begin = tx_only_engine.port_fwd_begin; 2345a78040c9SAlvin Zhang if (port_fwd_begin != NULL) { 2346a78040c9SAlvin Zhang for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) { 2347a78040c9SAlvin Zhang if (port_fwd_begin(fwd_ports_ids[i])) { 2348a78040c9SAlvin Zhang fprintf(stderr, 2349a78040c9SAlvin Zhang "Packet forwarding is not ready\n"); 2350a78040c9SAlvin Zhang return; 2351a78040c9SAlvin Zhang } 2352a78040c9SAlvin Zhang } 2353a78040c9SAlvin Zhang } 2354a78040c9SAlvin Zhang } 2355a78040c9SAlvin Zhang 2356a78040c9SAlvin Zhang test_done = 0; 2357a78040c9SAlvin Zhang 23587741e4cfSIntel if(!no_flush_rx) 23597741e4cfSIntel flush_fwd_rx_queues(); 23607741e4cfSIntel 2361af75078fSIntel rxtx_config_display(); 2362af75078fSIntel 236353324971SDavid Marchand fwd_stats_reset(); 2364af75078fSIntel if (with_tx_first) { 2365acbf77a6SZhihong Wang while (with_tx_first--) { 2366acbf77a6SZhihong Wang launch_packet_forwarding( 2367acbf77a6SZhihong Wang run_one_txonly_burst_on_core); 2368af75078fSIntel rte_eal_mp_wait_lcore(); 2369acbf77a6SZhihong Wang } 2370af75078fSIntel port_fwd_end = tx_only_engine.port_fwd_end; 2371af75078fSIntel if (port_fwd_end != NULL) { 2372af75078fSIntel for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) 2373af75078fSIntel (*port_fwd_end)(fwd_ports_ids[i]); 2374af75078fSIntel } 2375af75078fSIntel } 2376af75078fSIntel launch_packet_forwarding(start_pkt_forward_on_core); 2377af75078fSIntel } 2378af75078fSIntel 2379af75078fSIntel void 2380af75078fSIntel stop_packet_forwarding(void) 2381af75078fSIntel { 2382af75078fSIntel port_fwd_end_t port_fwd_end; 2383af75078fSIntel lcoreid_t lc_id; 238453324971SDavid Marchand portid_t pt_id; 238553324971SDavid Marchand int i; 2386af75078fSIntel 2387af75078fSIntel if (test_done) { 238861a3b0e5SAndrew Rybchenko fprintf(stderr, "Packet forwarding not started\n"); 2389af75078fSIntel return; 2390af75078fSIntel } 2391af75078fSIntel printf("Telling cores to stop..."); 2392af75078fSIntel for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) 2393af75078fSIntel fwd_lcores[lc_id]->stopped = 1; 2394af75078fSIntel printf("\nWaiting for lcores to finish...\n"); 2395af75078fSIntel rte_eal_mp_wait_lcore(); 2396af75078fSIntel port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end; 2397af75078fSIntel if (port_fwd_end != NULL) { 2398af75078fSIntel for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) { 2399af75078fSIntel pt_id = fwd_ports_ids[i]; 2400af75078fSIntel (*port_fwd_end)(pt_id); 2401af75078fSIntel } 2402af75078fSIntel } 2403c185d42cSDavid Marchand 240453324971SDavid Marchand fwd_stats_display(); 240558d475b7SJerin Jacob 2406af75078fSIntel printf("\nDone.\n"); 2407af75078fSIntel test_done = 1; 2408af75078fSIntel } 2409af75078fSIntel 2410cfae07fdSOuyang Changchun void 2411cfae07fdSOuyang Changchun dev_set_link_up(portid_t pid) 2412cfae07fdSOuyang Changchun { 2413492ab604SZhiyong Yang if (rte_eth_dev_set_link_up(pid) < 0) 241461a3b0e5SAndrew Rybchenko fprintf(stderr, "\nSet link up fail.\n"); 2415cfae07fdSOuyang Changchun } 2416cfae07fdSOuyang Changchun 2417cfae07fdSOuyang Changchun void 2418cfae07fdSOuyang Changchun dev_set_link_down(portid_t pid) 2419cfae07fdSOuyang Changchun { 2420492ab604SZhiyong Yang if (rte_eth_dev_set_link_down(pid) < 0) 242161a3b0e5SAndrew Rybchenko fprintf(stderr, "\nSet link down fail.\n"); 2422cfae07fdSOuyang Changchun } 2423cfae07fdSOuyang Changchun 2424ce8d5614SIntel static int 2425ce8d5614SIntel all_ports_started(void) 2426ce8d5614SIntel { 2427ce8d5614SIntel portid_t pi; 2428ce8d5614SIntel struct rte_port *port; 2429ce8d5614SIntel 24307d89b261SGaetan Rivet RTE_ETH_FOREACH_DEV(pi) { 2431ce8d5614SIntel port = &ports[pi]; 2432ce8d5614SIntel /* Check if there is a port which is not started */ 243341b05095SBernard Iremonger if ((port->port_status != RTE_PORT_STARTED) && 243441b05095SBernard Iremonger (port->slave_flag == 0)) 2435ce8d5614SIntel return 0; 2436ce8d5614SIntel } 2437ce8d5614SIntel 2438ce8d5614SIntel /* No port is not started */ 2439ce8d5614SIntel return 1; 2440ce8d5614SIntel } 2441ce8d5614SIntel 2442148f963fSBruce Richardson int 24436018eb8cSShahaf Shuler port_is_stopped(portid_t port_id) 24446018eb8cSShahaf Shuler { 24456018eb8cSShahaf Shuler struct rte_port *port = &ports[port_id]; 24466018eb8cSShahaf Shuler 24476018eb8cSShahaf Shuler if ((port->port_status != RTE_PORT_STOPPED) && 24486018eb8cSShahaf Shuler (port->slave_flag == 0)) 24496018eb8cSShahaf Shuler return 0; 24506018eb8cSShahaf Shuler return 1; 24516018eb8cSShahaf Shuler } 24526018eb8cSShahaf Shuler 24536018eb8cSShahaf Shuler int 2454edab33b1STetsuya Mukawa all_ports_stopped(void) 2455edab33b1STetsuya Mukawa { 2456edab33b1STetsuya Mukawa portid_t pi; 2457edab33b1STetsuya Mukawa 24587d89b261SGaetan Rivet RTE_ETH_FOREACH_DEV(pi) { 24596018eb8cSShahaf Shuler if (!port_is_stopped(pi)) 2460edab33b1STetsuya Mukawa return 0; 2461edab33b1STetsuya Mukawa } 2462edab33b1STetsuya Mukawa 2463edab33b1STetsuya Mukawa return 1; 2464edab33b1STetsuya Mukawa } 2465edab33b1STetsuya Mukawa 2466edab33b1STetsuya Mukawa int 2467edab33b1STetsuya Mukawa port_is_started(portid_t port_id) 2468edab33b1STetsuya Mukawa { 2469edab33b1STetsuya Mukawa if (port_id_is_invalid(port_id, ENABLED_WARN)) 2470edab33b1STetsuya Mukawa return 0; 2471edab33b1STetsuya Mukawa 2472edab33b1STetsuya Mukawa if (ports[port_id].port_status != RTE_PORT_STARTED) 2473edab33b1STetsuya Mukawa return 0; 2474edab33b1STetsuya Mukawa 2475edab33b1STetsuya Mukawa return 1; 2476edab33b1STetsuya Mukawa } 2477edab33b1STetsuya Mukawa 24781c69df45SOri Kam /* Configure the Rx and Tx hairpin queues for the selected port. */ 24791c69df45SOri Kam static int 248001817b10SBing Zhao setup_hairpin_queues(portid_t pi, portid_t p_pi, uint16_t cnt_pi) 24811c69df45SOri Kam { 24821c69df45SOri Kam queueid_t qi; 24831c69df45SOri Kam struct rte_eth_hairpin_conf hairpin_conf = { 24841c69df45SOri Kam .peer_count = 1, 24851c69df45SOri Kam }; 24861c69df45SOri Kam int i; 24871c69df45SOri Kam int diag; 24881c69df45SOri Kam struct rte_port *port = &ports[pi]; 248901817b10SBing Zhao uint16_t peer_rx_port = pi; 249001817b10SBing Zhao uint16_t peer_tx_port = pi; 249101817b10SBing Zhao uint32_t manual = 1; 249201817b10SBing Zhao uint32_t tx_exp = hairpin_mode & 0x10; 249301817b10SBing Zhao 249401817b10SBing Zhao if (!(hairpin_mode & 0xf)) { 249501817b10SBing Zhao peer_rx_port = pi; 249601817b10SBing Zhao peer_tx_port = pi; 249701817b10SBing Zhao manual = 0; 249801817b10SBing Zhao } else if (hairpin_mode & 0x1) { 249901817b10SBing Zhao peer_tx_port = rte_eth_find_next_owned_by(pi + 1, 250001817b10SBing Zhao RTE_ETH_DEV_NO_OWNER); 250101817b10SBing Zhao if (peer_tx_port >= RTE_MAX_ETHPORTS) 250201817b10SBing Zhao peer_tx_port = rte_eth_find_next_owned_by(0, 250301817b10SBing Zhao RTE_ETH_DEV_NO_OWNER); 250401817b10SBing Zhao if (p_pi != RTE_MAX_ETHPORTS) { 250501817b10SBing Zhao peer_rx_port = p_pi; 250601817b10SBing Zhao } else { 250701817b10SBing Zhao uint16_t next_pi; 250801817b10SBing Zhao 250901817b10SBing Zhao /* Last port will be the peer RX port of the first. */ 251001817b10SBing Zhao RTE_ETH_FOREACH_DEV(next_pi) 251101817b10SBing Zhao peer_rx_port = next_pi; 251201817b10SBing Zhao } 251301817b10SBing Zhao manual = 1; 251401817b10SBing Zhao } else if (hairpin_mode & 0x2) { 251501817b10SBing Zhao if (cnt_pi & 0x1) { 251601817b10SBing Zhao peer_rx_port = p_pi; 251701817b10SBing Zhao } else { 251801817b10SBing Zhao peer_rx_port = rte_eth_find_next_owned_by(pi + 1, 251901817b10SBing Zhao RTE_ETH_DEV_NO_OWNER); 252001817b10SBing Zhao if (peer_rx_port >= RTE_MAX_ETHPORTS) 252101817b10SBing Zhao peer_rx_port = pi; 252201817b10SBing Zhao } 252301817b10SBing Zhao peer_tx_port = peer_rx_port; 252401817b10SBing Zhao manual = 1; 252501817b10SBing Zhao } 25261c69df45SOri Kam 25271c69df45SOri Kam for (qi = nb_txq, i = 0; qi < nb_hairpinq + nb_txq; qi++) { 252801817b10SBing Zhao hairpin_conf.peers[0].port = peer_rx_port; 25291c69df45SOri Kam hairpin_conf.peers[0].queue = i + nb_rxq; 253001817b10SBing Zhao hairpin_conf.manual_bind = !!manual; 253101817b10SBing Zhao hairpin_conf.tx_explicit = !!tx_exp; 25321c69df45SOri Kam diag = rte_eth_tx_hairpin_queue_setup 25331c69df45SOri Kam (pi, qi, nb_txd, &hairpin_conf); 25341c69df45SOri Kam i++; 25351c69df45SOri Kam if (diag == 0) 25361c69df45SOri Kam continue; 25371c69df45SOri Kam 25381c69df45SOri Kam /* Fail to setup rx queue, return */ 2539eac341d3SJoyce Kong if (port->port_status == RTE_PORT_HANDLING) 2540eac341d3SJoyce Kong port->port_status = RTE_PORT_STOPPED; 2541eac341d3SJoyce Kong else 254261a3b0e5SAndrew Rybchenko fprintf(stderr, 254361a3b0e5SAndrew Rybchenko "Port %d can not be set back to stopped\n", pi); 254461a3b0e5SAndrew Rybchenko fprintf(stderr, "Fail to configure port %d hairpin queues\n", 254561a3b0e5SAndrew Rybchenko pi); 25461c69df45SOri Kam /* try to reconfigure queues next time */ 25471c69df45SOri Kam port->need_reconfig_queues = 1; 25481c69df45SOri Kam return -1; 25491c69df45SOri Kam } 25501c69df45SOri Kam for (qi = nb_rxq, i = 0; qi < nb_hairpinq + nb_rxq; qi++) { 255101817b10SBing Zhao hairpin_conf.peers[0].port = peer_tx_port; 25521c69df45SOri Kam hairpin_conf.peers[0].queue = i + nb_txq; 255301817b10SBing Zhao hairpin_conf.manual_bind = !!manual; 255401817b10SBing Zhao hairpin_conf.tx_explicit = !!tx_exp; 25551c69df45SOri Kam diag = rte_eth_rx_hairpin_queue_setup 25561c69df45SOri Kam (pi, qi, nb_rxd, &hairpin_conf); 25571c69df45SOri Kam i++; 25581c69df45SOri Kam if (diag == 0) 25591c69df45SOri Kam continue; 25601c69df45SOri Kam 25611c69df45SOri Kam /* Fail to setup rx queue, return */ 2562eac341d3SJoyce Kong if (port->port_status == RTE_PORT_HANDLING) 2563eac341d3SJoyce Kong port->port_status = RTE_PORT_STOPPED; 2564eac341d3SJoyce Kong else 256561a3b0e5SAndrew Rybchenko fprintf(stderr, 256661a3b0e5SAndrew Rybchenko "Port %d can not be set back to stopped\n", pi); 256761a3b0e5SAndrew Rybchenko fprintf(stderr, "Fail to configure port %d hairpin queues\n", 256861a3b0e5SAndrew Rybchenko pi); 25691c69df45SOri Kam /* try to reconfigure queues next time */ 25701c69df45SOri Kam port->need_reconfig_queues = 1; 25711c69df45SOri Kam return -1; 25721c69df45SOri Kam } 25731c69df45SOri Kam return 0; 25741c69df45SOri Kam } 25751c69df45SOri Kam 25762befc67fSViacheslav Ovsiienko /* Configure the Rx with optional split. */ 25772befc67fSViacheslav Ovsiienko int 25782befc67fSViacheslav Ovsiienko rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id, 25792befc67fSViacheslav Ovsiienko uint16_t nb_rx_desc, unsigned int socket_id, 25802befc67fSViacheslav Ovsiienko struct rte_eth_rxconf *rx_conf, struct rte_mempool *mp) 25812befc67fSViacheslav Ovsiienko { 25822befc67fSViacheslav Ovsiienko union rte_eth_rxseg rx_useg[MAX_SEGS_BUFFER_SPLIT] = {}; 25832befc67fSViacheslav Ovsiienko unsigned int i, mp_n; 25842befc67fSViacheslav Ovsiienko int ret; 25852befc67fSViacheslav Ovsiienko 25862befc67fSViacheslav Ovsiienko if (rx_pkt_nb_segs <= 1 || 25872befc67fSViacheslav Ovsiienko (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) == 0) { 25882befc67fSViacheslav Ovsiienko rx_conf->rx_seg = NULL; 25892befc67fSViacheslav Ovsiienko rx_conf->rx_nseg = 0; 25902befc67fSViacheslav Ovsiienko ret = rte_eth_rx_queue_setup(port_id, rx_queue_id, 25912befc67fSViacheslav Ovsiienko nb_rx_desc, socket_id, 25922befc67fSViacheslav Ovsiienko rx_conf, mp); 25932befc67fSViacheslav Ovsiienko return ret; 25942befc67fSViacheslav Ovsiienko } 25952befc67fSViacheslav Ovsiienko for (i = 0; i < rx_pkt_nb_segs; i++) { 25962befc67fSViacheslav Ovsiienko struct rte_eth_rxseg_split *rx_seg = &rx_useg[i].split; 25972befc67fSViacheslav Ovsiienko struct rte_mempool *mpx; 25982befc67fSViacheslav Ovsiienko /* 25992befc67fSViacheslav Ovsiienko * Use last valid pool for the segments with number 26002befc67fSViacheslav Ovsiienko * exceeding the pool index. 26012befc67fSViacheslav Ovsiienko */ 26022befc67fSViacheslav Ovsiienko mp_n = (i > mbuf_data_size_n) ? mbuf_data_size_n - 1 : i; 26032befc67fSViacheslav Ovsiienko mpx = mbuf_pool_find(socket_id, mp_n); 26042befc67fSViacheslav Ovsiienko /* Handle zero as mbuf data buffer size. */ 26052befc67fSViacheslav Ovsiienko rx_seg->length = rx_pkt_seg_lengths[i] ? 26062befc67fSViacheslav Ovsiienko rx_pkt_seg_lengths[i] : 26072befc67fSViacheslav Ovsiienko mbuf_data_size[mp_n]; 26082befc67fSViacheslav Ovsiienko rx_seg->offset = i < rx_pkt_nb_offs ? 26092befc67fSViacheslav Ovsiienko rx_pkt_seg_offsets[i] : 0; 26102befc67fSViacheslav Ovsiienko rx_seg->mp = mpx ? mpx : mp; 26112befc67fSViacheslav Ovsiienko } 26122befc67fSViacheslav Ovsiienko rx_conf->rx_nseg = rx_pkt_nb_segs; 26132befc67fSViacheslav Ovsiienko rx_conf->rx_seg = rx_useg; 26142befc67fSViacheslav Ovsiienko ret = rte_eth_rx_queue_setup(port_id, rx_queue_id, nb_rx_desc, 26152befc67fSViacheslav Ovsiienko socket_id, rx_conf, NULL); 26162befc67fSViacheslav Ovsiienko rx_conf->rx_seg = NULL; 26172befc67fSViacheslav Ovsiienko rx_conf->rx_nseg = 0; 26182befc67fSViacheslav Ovsiienko return ret; 26192befc67fSViacheslav Ovsiienko } 26202befc67fSViacheslav Ovsiienko 262163b72657SIvan Ilchenko static int 262263b72657SIvan Ilchenko alloc_xstats_display_info(portid_t pi) 262363b72657SIvan Ilchenko { 262463b72657SIvan Ilchenko uint64_t **ids_supp = &ports[pi].xstats_info.ids_supp; 262563b72657SIvan Ilchenko uint64_t **prev_values = &ports[pi].xstats_info.prev_values; 262663b72657SIvan Ilchenko uint64_t **curr_values = &ports[pi].xstats_info.curr_values; 262763b72657SIvan Ilchenko 262863b72657SIvan Ilchenko if (xstats_display_num == 0) 262963b72657SIvan Ilchenko return 0; 263063b72657SIvan Ilchenko 263163b72657SIvan Ilchenko *ids_supp = calloc(xstats_display_num, sizeof(**ids_supp)); 263263b72657SIvan Ilchenko if (*ids_supp == NULL) 263363b72657SIvan Ilchenko goto fail_ids_supp; 263463b72657SIvan Ilchenko 263563b72657SIvan Ilchenko *prev_values = calloc(xstats_display_num, 263663b72657SIvan Ilchenko sizeof(**prev_values)); 263763b72657SIvan Ilchenko if (*prev_values == NULL) 263863b72657SIvan Ilchenko goto fail_prev_values; 263963b72657SIvan Ilchenko 264063b72657SIvan Ilchenko *curr_values = calloc(xstats_display_num, 264163b72657SIvan Ilchenko sizeof(**curr_values)); 264263b72657SIvan Ilchenko if (*curr_values == NULL) 264363b72657SIvan Ilchenko goto fail_curr_values; 264463b72657SIvan Ilchenko 264563b72657SIvan Ilchenko ports[pi].xstats_info.allocated = true; 264663b72657SIvan Ilchenko 264763b72657SIvan Ilchenko return 0; 264863b72657SIvan Ilchenko 264963b72657SIvan Ilchenko fail_curr_values: 265063b72657SIvan Ilchenko free(*prev_values); 265163b72657SIvan Ilchenko fail_prev_values: 265263b72657SIvan Ilchenko free(*ids_supp); 265363b72657SIvan Ilchenko fail_ids_supp: 265463b72657SIvan Ilchenko return -ENOMEM; 265563b72657SIvan Ilchenko } 265663b72657SIvan Ilchenko 265763b72657SIvan Ilchenko static void 265863b72657SIvan Ilchenko free_xstats_display_info(portid_t pi) 265963b72657SIvan Ilchenko { 266063b72657SIvan Ilchenko if (!ports[pi].xstats_info.allocated) 266163b72657SIvan Ilchenko return; 266263b72657SIvan Ilchenko free(ports[pi].xstats_info.ids_supp); 266363b72657SIvan Ilchenko free(ports[pi].xstats_info.prev_values); 266463b72657SIvan Ilchenko free(ports[pi].xstats_info.curr_values); 266563b72657SIvan Ilchenko ports[pi].xstats_info.allocated = false; 266663b72657SIvan Ilchenko } 266763b72657SIvan Ilchenko 266863b72657SIvan Ilchenko /** Fill helper structures for specified port to show extended statistics. */ 266963b72657SIvan Ilchenko static void 267063b72657SIvan Ilchenko fill_xstats_display_info_for_port(portid_t pi) 267163b72657SIvan Ilchenko { 267263b72657SIvan Ilchenko unsigned int stat, stat_supp; 267363b72657SIvan Ilchenko const char *xstat_name; 267463b72657SIvan Ilchenko struct rte_port *port; 267563b72657SIvan Ilchenko uint64_t *ids_supp; 267663b72657SIvan Ilchenko int rc; 267763b72657SIvan Ilchenko 267863b72657SIvan Ilchenko if (xstats_display_num == 0) 267963b72657SIvan Ilchenko return; 268063b72657SIvan Ilchenko 268163b72657SIvan Ilchenko if (pi == (portid_t)RTE_PORT_ALL) { 268263b72657SIvan Ilchenko fill_xstats_display_info(); 268363b72657SIvan Ilchenko return; 268463b72657SIvan Ilchenko } 268563b72657SIvan Ilchenko 268663b72657SIvan Ilchenko port = &ports[pi]; 268763b72657SIvan Ilchenko if (port->port_status != RTE_PORT_STARTED) 268863b72657SIvan Ilchenko return; 268963b72657SIvan Ilchenko 269063b72657SIvan Ilchenko if (!port->xstats_info.allocated && alloc_xstats_display_info(pi) != 0) 269163b72657SIvan Ilchenko rte_exit(EXIT_FAILURE, 269263b72657SIvan Ilchenko "Failed to allocate xstats display memory\n"); 269363b72657SIvan Ilchenko 269463b72657SIvan Ilchenko ids_supp = port->xstats_info.ids_supp; 269563b72657SIvan Ilchenko for (stat = stat_supp = 0; stat < xstats_display_num; stat++) { 269663b72657SIvan Ilchenko xstat_name = xstats_display[stat].name; 269763b72657SIvan Ilchenko rc = rte_eth_xstats_get_id_by_name(pi, xstat_name, 269863b72657SIvan Ilchenko ids_supp + stat_supp); 269963b72657SIvan Ilchenko if (rc != 0) { 270063b72657SIvan Ilchenko fprintf(stderr, "No xstat '%s' on port %u - skip it %u\n", 270163b72657SIvan Ilchenko xstat_name, pi, stat); 270263b72657SIvan Ilchenko continue; 270363b72657SIvan Ilchenko } 270463b72657SIvan Ilchenko stat_supp++; 270563b72657SIvan Ilchenko } 270663b72657SIvan Ilchenko 270763b72657SIvan Ilchenko port->xstats_info.ids_supp_sz = stat_supp; 270863b72657SIvan Ilchenko } 270963b72657SIvan Ilchenko 271063b72657SIvan Ilchenko /** Fill helper structures for all ports to show extended statistics. */ 271163b72657SIvan Ilchenko static void 271263b72657SIvan Ilchenko fill_xstats_display_info(void) 271363b72657SIvan Ilchenko { 271463b72657SIvan Ilchenko portid_t pi; 271563b72657SIvan Ilchenko 271663b72657SIvan Ilchenko if (xstats_display_num == 0) 271763b72657SIvan Ilchenko return; 271863b72657SIvan Ilchenko 271963b72657SIvan Ilchenko RTE_ETH_FOREACH_DEV(pi) 272063b72657SIvan Ilchenko fill_xstats_display_info_for_port(pi); 272163b72657SIvan Ilchenko } 272263b72657SIvan Ilchenko 2723edab33b1STetsuya Mukawa int 2724ce8d5614SIntel start_port(portid_t pid) 2725ce8d5614SIntel { 272692d2703eSMichael Qiu int diag, need_check_link_status = -1; 2727ce8d5614SIntel portid_t pi; 272801817b10SBing Zhao portid_t p_pi = RTE_MAX_ETHPORTS; 272901817b10SBing Zhao portid_t pl[RTE_MAX_ETHPORTS]; 273001817b10SBing Zhao portid_t peer_pl[RTE_MAX_ETHPORTS]; 273101817b10SBing Zhao uint16_t cnt_pi = 0; 273201817b10SBing Zhao uint16_t cfg_pi = 0; 273301817b10SBing Zhao int peer_pi; 2734ce8d5614SIntel queueid_t qi; 2735ce8d5614SIntel struct rte_port *port; 27361c69df45SOri Kam struct rte_eth_hairpin_cap cap; 2737ce8d5614SIntel 27384468635fSMichael Qiu if (port_id_is_invalid(pid, ENABLED_WARN)) 27394468635fSMichael Qiu return 0; 27404468635fSMichael Qiu 27417d89b261SGaetan Rivet RTE_ETH_FOREACH_DEV(pi) { 2742edab33b1STetsuya Mukawa if (pid != pi && pid != (portid_t)RTE_PORT_ALL) 2743ce8d5614SIntel continue; 2744ce8d5614SIntel 274592d2703eSMichael Qiu need_check_link_status = 0; 2746ce8d5614SIntel port = &ports[pi]; 2747eac341d3SJoyce Kong if (port->port_status == RTE_PORT_STOPPED) 2748eac341d3SJoyce Kong port->port_status = RTE_PORT_HANDLING; 2749eac341d3SJoyce Kong else { 275061a3b0e5SAndrew Rybchenko fprintf(stderr, "Port %d is now not stopped\n", pi); 2751ce8d5614SIntel continue; 2752ce8d5614SIntel } 2753ce8d5614SIntel 2754ce8d5614SIntel if (port->need_reconfig > 0) { 2755655eae01SJie Wang struct rte_eth_conf dev_conf; 2756655eae01SJie Wang int k; 2757655eae01SJie Wang 2758ce8d5614SIntel port->need_reconfig = 0; 2759ce8d5614SIntel 27607ee3e944SVasily Philipov if (flow_isolate_all) { 27617ee3e944SVasily Philipov int ret = port_flow_isolate(pi, 1); 27627ee3e944SVasily Philipov if (ret) { 276361a3b0e5SAndrew Rybchenko fprintf(stderr, 276461a3b0e5SAndrew Rybchenko "Failed to apply isolated mode on port %d\n", 276561a3b0e5SAndrew Rybchenko pi); 27667ee3e944SVasily Philipov return -1; 27677ee3e944SVasily Philipov } 27687ee3e944SVasily Philipov } 2769b5b38ed8SRaslan Darawsheh configure_rxtx_dump_callbacks(0); 27705706de65SJulien Cretin printf("Configuring Port %d (socket %u)\n", pi, 277120a0286fSLiu Xiaofeng port->socket_id); 27721c69df45SOri Kam if (nb_hairpinq > 0 && 27731c69df45SOri Kam rte_eth_dev_hairpin_capability_get(pi, &cap)) { 277461a3b0e5SAndrew Rybchenko fprintf(stderr, 277561a3b0e5SAndrew Rybchenko "Port %d doesn't support hairpin queues\n", 277661a3b0e5SAndrew Rybchenko pi); 27771c69df45SOri Kam return -1; 27781c69df45SOri Kam } 27791bb4a528SFerruh Yigit 2780ce8d5614SIntel /* configure port */ 2781a550baf2SMin Hu (Connor) diag = eth_dev_configure_mp(pi, nb_rxq + nb_hairpinq, 27821c69df45SOri Kam nb_txq + nb_hairpinq, 2783ce8d5614SIntel &(port->dev_conf)); 2784ce8d5614SIntel if (diag != 0) { 2785eac341d3SJoyce Kong if (port->port_status == RTE_PORT_HANDLING) 2786eac341d3SJoyce Kong port->port_status = RTE_PORT_STOPPED; 2787eac341d3SJoyce Kong else 278861a3b0e5SAndrew Rybchenko fprintf(stderr, 278961a3b0e5SAndrew Rybchenko "Port %d can not be set back to stopped\n", 279061a3b0e5SAndrew Rybchenko pi); 279161a3b0e5SAndrew Rybchenko fprintf(stderr, "Fail to configure port %d\n", 279261a3b0e5SAndrew Rybchenko pi); 2793ce8d5614SIntel /* try to reconfigure port next time */ 2794ce8d5614SIntel port->need_reconfig = 1; 2795148f963fSBruce Richardson return -1; 2796ce8d5614SIntel } 2797655eae01SJie Wang /* get device configuration*/ 2798655eae01SJie Wang if (0 != 2799655eae01SJie Wang eth_dev_conf_get_print_err(pi, &dev_conf)) { 2800655eae01SJie Wang fprintf(stderr, 2801655eae01SJie Wang "port %d can not get device configuration\n", 2802655eae01SJie Wang pi); 2803655eae01SJie Wang return -1; 2804655eae01SJie Wang } 2805655eae01SJie Wang /* Apply Rx offloads configuration */ 2806655eae01SJie Wang if (dev_conf.rxmode.offloads != 2807655eae01SJie Wang port->dev_conf.rxmode.offloads) { 2808655eae01SJie Wang port->dev_conf.rxmode.offloads |= 2809655eae01SJie Wang dev_conf.rxmode.offloads; 2810655eae01SJie Wang for (k = 0; 2811655eae01SJie Wang k < port->dev_info.max_rx_queues; 2812655eae01SJie Wang k++) 2813655eae01SJie Wang port->rx_conf[k].offloads |= 2814655eae01SJie Wang dev_conf.rxmode.offloads; 2815655eae01SJie Wang } 2816655eae01SJie Wang /* Apply Tx offloads configuration */ 2817655eae01SJie Wang if (dev_conf.txmode.offloads != 2818655eae01SJie Wang port->dev_conf.txmode.offloads) { 2819655eae01SJie Wang port->dev_conf.txmode.offloads |= 2820655eae01SJie Wang dev_conf.txmode.offloads; 2821655eae01SJie Wang for (k = 0; 2822655eae01SJie Wang k < port->dev_info.max_tx_queues; 2823655eae01SJie Wang k++) 2824655eae01SJie Wang port->tx_conf[k].offloads |= 2825655eae01SJie Wang dev_conf.txmode.offloads; 2826655eae01SJie Wang } 2827ce8d5614SIntel } 2828a550baf2SMin Hu (Connor) if (port->need_reconfig_queues > 0 && is_proc_primary()) { 2829ce8d5614SIntel port->need_reconfig_queues = 0; 2830ce8d5614SIntel /* setup tx queues */ 2831ce8d5614SIntel for (qi = 0; qi < nb_txq; qi++) { 2832b6ea6408SIntel if ((numa_support) && 2833b6ea6408SIntel (txring_numa[pi] != NUMA_NO_CONFIG)) 2834b6ea6408SIntel diag = rte_eth_tx_queue_setup(pi, qi, 2835d44f8a48SQi Zhang port->nb_tx_desc[qi], 2836d44f8a48SQi Zhang txring_numa[pi], 2837d44f8a48SQi Zhang &(port->tx_conf[qi])); 2838b6ea6408SIntel else 2839b6ea6408SIntel diag = rte_eth_tx_queue_setup(pi, qi, 2840d44f8a48SQi Zhang port->nb_tx_desc[qi], 2841d44f8a48SQi Zhang port->socket_id, 2842d44f8a48SQi Zhang &(port->tx_conf[qi])); 2843b6ea6408SIntel 2844ce8d5614SIntel if (diag == 0) 2845ce8d5614SIntel continue; 2846ce8d5614SIntel 2847ce8d5614SIntel /* Fail to setup tx queue, return */ 2848eac341d3SJoyce Kong if (port->port_status == RTE_PORT_HANDLING) 2849eac341d3SJoyce Kong port->port_status = RTE_PORT_STOPPED; 2850eac341d3SJoyce Kong else 285161a3b0e5SAndrew Rybchenko fprintf(stderr, 285261a3b0e5SAndrew Rybchenko "Port %d can not be set back to stopped\n", 285361a3b0e5SAndrew Rybchenko pi); 285461a3b0e5SAndrew Rybchenko fprintf(stderr, 285561a3b0e5SAndrew Rybchenko "Fail to configure port %d tx queues\n", 2856d44f8a48SQi Zhang pi); 2857ce8d5614SIntel /* try to reconfigure queues next time */ 2858ce8d5614SIntel port->need_reconfig_queues = 1; 2859148f963fSBruce Richardson return -1; 2860ce8d5614SIntel } 2861ce8d5614SIntel for (qi = 0; qi < nb_rxq; qi++) { 2862d44f8a48SQi Zhang /* setup rx queues */ 2863b6ea6408SIntel if ((numa_support) && 2864b6ea6408SIntel (rxring_numa[pi] != NUMA_NO_CONFIG)) { 2865b6ea6408SIntel struct rte_mempool * mp = 286626cbb419SViacheslav Ovsiienko mbuf_pool_find 286726cbb419SViacheslav Ovsiienko (rxring_numa[pi], 0); 2868b6ea6408SIntel if (mp == NULL) { 286961a3b0e5SAndrew Rybchenko fprintf(stderr, 287061a3b0e5SAndrew Rybchenko "Failed to setup RX queue: No mempool allocation on the socket %d\n", 2871b6ea6408SIntel rxring_numa[pi]); 2872148f963fSBruce Richardson return -1; 2873b6ea6408SIntel } 2874b6ea6408SIntel 28752befc67fSViacheslav Ovsiienko diag = rx_queue_setup(pi, qi, 2876d4930794SFerruh Yigit port->nb_rx_desc[qi], 2877d44f8a48SQi Zhang rxring_numa[pi], 2878d44f8a48SQi Zhang &(port->rx_conf[qi]), 2879d44f8a48SQi Zhang mp); 28801e1d6bddSBernard Iremonger } else { 28811e1d6bddSBernard Iremonger struct rte_mempool *mp = 288226cbb419SViacheslav Ovsiienko mbuf_pool_find 288326cbb419SViacheslav Ovsiienko (port->socket_id, 0); 28841e1d6bddSBernard Iremonger if (mp == NULL) { 288561a3b0e5SAndrew Rybchenko fprintf(stderr, 288661a3b0e5SAndrew Rybchenko "Failed to setup RX queue: No mempool allocation on the socket %d\n", 28871e1d6bddSBernard Iremonger port->socket_id); 28881e1d6bddSBernard Iremonger return -1; 2889b6ea6408SIntel } 28902befc67fSViacheslav Ovsiienko diag = rx_queue_setup(pi, qi, 2891d4930794SFerruh Yigit port->nb_rx_desc[qi], 2892d44f8a48SQi Zhang port->socket_id, 2893d44f8a48SQi Zhang &(port->rx_conf[qi]), 2894d44f8a48SQi Zhang mp); 28951e1d6bddSBernard Iremonger } 2896ce8d5614SIntel if (diag == 0) 2897ce8d5614SIntel continue; 2898ce8d5614SIntel 2899ce8d5614SIntel /* Fail to setup rx queue, return */ 2900eac341d3SJoyce Kong if (port->port_status == RTE_PORT_HANDLING) 2901eac341d3SJoyce Kong port->port_status = RTE_PORT_STOPPED; 2902eac341d3SJoyce Kong else 290361a3b0e5SAndrew Rybchenko fprintf(stderr, 290461a3b0e5SAndrew Rybchenko "Port %d can not be set back to stopped\n", 290561a3b0e5SAndrew Rybchenko pi); 290661a3b0e5SAndrew Rybchenko fprintf(stderr, 290761a3b0e5SAndrew Rybchenko "Fail to configure port %d rx queues\n", 2908d44f8a48SQi Zhang pi); 2909ce8d5614SIntel /* try to reconfigure queues next time */ 2910ce8d5614SIntel port->need_reconfig_queues = 1; 2911148f963fSBruce Richardson return -1; 2912ce8d5614SIntel } 29131c69df45SOri Kam /* setup hairpin queues */ 291401817b10SBing Zhao if (setup_hairpin_queues(pi, p_pi, cnt_pi) != 0) 29151c69df45SOri Kam return -1; 2916ce8d5614SIntel } 2917b5b38ed8SRaslan Darawsheh configure_rxtx_dump_callbacks(verbose_level); 2918b0a9354aSPavan Nikhilesh if (clear_ptypes) { 2919b0a9354aSPavan Nikhilesh diag = rte_eth_dev_set_ptypes(pi, RTE_PTYPE_UNKNOWN, 2920b0a9354aSPavan Nikhilesh NULL, 0); 2921b0a9354aSPavan Nikhilesh if (diag < 0) 292261a3b0e5SAndrew Rybchenko fprintf(stderr, 2923b0a9354aSPavan Nikhilesh "Port %d: Failed to disable Ptype parsing\n", 2924b0a9354aSPavan Nikhilesh pi); 2925b0a9354aSPavan Nikhilesh } 2926b0a9354aSPavan Nikhilesh 292701817b10SBing Zhao p_pi = pi; 292801817b10SBing Zhao cnt_pi++; 292901817b10SBing Zhao 2930ce8d5614SIntel /* start port */ 2931a550baf2SMin Hu (Connor) diag = eth_dev_start_mp(pi); 293252f2c6f2SAndrew Rybchenko if (diag < 0) { 293361a3b0e5SAndrew Rybchenko fprintf(stderr, "Fail to start port %d: %s\n", 293461a3b0e5SAndrew Rybchenko pi, rte_strerror(-diag)); 2935ce8d5614SIntel 2936ce8d5614SIntel /* Fail to setup rx queue, return */ 2937eac341d3SJoyce Kong if (port->port_status == RTE_PORT_HANDLING) 2938eac341d3SJoyce Kong port->port_status = RTE_PORT_STOPPED; 2939eac341d3SJoyce Kong else 294061a3b0e5SAndrew Rybchenko fprintf(stderr, 294161a3b0e5SAndrew Rybchenko "Port %d can not be set back to stopped\n", 294261a3b0e5SAndrew Rybchenko pi); 2943ce8d5614SIntel continue; 2944ce8d5614SIntel } 2945ce8d5614SIntel 2946eac341d3SJoyce Kong if (port->port_status == RTE_PORT_HANDLING) 2947eac341d3SJoyce Kong port->port_status = RTE_PORT_STARTED; 2948eac341d3SJoyce Kong else 294961a3b0e5SAndrew Rybchenko fprintf(stderr, "Port %d can not be set into started\n", 295061a3b0e5SAndrew Rybchenko pi); 2951ce8d5614SIntel 29525ffc4a2aSYuying Zhang if (eth_macaddr_get_print_err(pi, &port->eth_addr) == 0) 2953c2c4f87bSAman Deep Singh printf("Port %d: " RTE_ETHER_ADDR_PRT_FMT "\n", pi, 2954a7db3afcSAman Deep Singh RTE_ETHER_ADDR_BYTES(&port->eth_addr)); 2955d8c89163SZijie Pan 2956ce8d5614SIntel /* at least one port started, need checking link status */ 2957ce8d5614SIntel need_check_link_status = 1; 295801817b10SBing Zhao 295901817b10SBing Zhao pl[cfg_pi++] = pi; 2960ce8d5614SIntel } 2961ce8d5614SIntel 296292d2703eSMichael Qiu if (need_check_link_status == 1 && !no_link_check) 2963edab33b1STetsuya Mukawa check_all_ports_link_status(RTE_PORT_ALL); 296492d2703eSMichael Qiu else if (need_check_link_status == 0) 296561a3b0e5SAndrew Rybchenko fprintf(stderr, "Please stop the ports first\n"); 2966ce8d5614SIntel 296701817b10SBing Zhao if (hairpin_mode & 0xf) { 296801817b10SBing Zhao uint16_t i; 296901817b10SBing Zhao int j; 297001817b10SBing Zhao 297101817b10SBing Zhao /* bind all started hairpin ports */ 297201817b10SBing Zhao for (i = 0; i < cfg_pi; i++) { 297301817b10SBing Zhao pi = pl[i]; 297401817b10SBing Zhao /* bind current Tx to all peer Rx */ 297501817b10SBing Zhao peer_pi = rte_eth_hairpin_get_peer_ports(pi, peer_pl, 297601817b10SBing Zhao RTE_MAX_ETHPORTS, 1); 297701817b10SBing Zhao if (peer_pi < 0) 297801817b10SBing Zhao return peer_pi; 297901817b10SBing Zhao for (j = 0; j < peer_pi; j++) { 298001817b10SBing Zhao if (!port_is_started(peer_pl[j])) 298101817b10SBing Zhao continue; 298201817b10SBing Zhao diag = rte_eth_hairpin_bind(pi, peer_pl[j]); 298301817b10SBing Zhao if (diag < 0) { 298461a3b0e5SAndrew Rybchenko fprintf(stderr, 298561a3b0e5SAndrew Rybchenko "Error during binding hairpin Tx port %u to %u: %s\n", 298601817b10SBing Zhao pi, peer_pl[j], 298701817b10SBing Zhao rte_strerror(-diag)); 298801817b10SBing Zhao return -1; 298901817b10SBing Zhao } 299001817b10SBing Zhao } 299101817b10SBing Zhao /* bind all peer Tx to current Rx */ 299201817b10SBing Zhao peer_pi = rte_eth_hairpin_get_peer_ports(pi, peer_pl, 299301817b10SBing Zhao RTE_MAX_ETHPORTS, 0); 299401817b10SBing Zhao if (peer_pi < 0) 299501817b10SBing Zhao return peer_pi; 299601817b10SBing Zhao for (j = 0; j < peer_pi; j++) { 299701817b10SBing Zhao if (!port_is_started(peer_pl[j])) 299801817b10SBing Zhao continue; 299901817b10SBing Zhao diag = rte_eth_hairpin_bind(peer_pl[j], pi); 300001817b10SBing Zhao if (diag < 0) { 300161a3b0e5SAndrew Rybchenko fprintf(stderr, 300261a3b0e5SAndrew Rybchenko "Error during binding hairpin Tx port %u to %u: %s\n", 300301817b10SBing Zhao peer_pl[j], pi, 300401817b10SBing Zhao rte_strerror(-diag)); 300501817b10SBing Zhao return -1; 300601817b10SBing Zhao } 300701817b10SBing Zhao } 300801817b10SBing Zhao } 300901817b10SBing Zhao } 301001817b10SBing Zhao 301163b72657SIvan Ilchenko fill_xstats_display_info_for_port(pid); 301263b72657SIvan Ilchenko 3013ce8d5614SIntel printf("Done\n"); 3014148f963fSBruce Richardson return 0; 3015ce8d5614SIntel } 3016ce8d5614SIntel 3017ce8d5614SIntel void 3018ce8d5614SIntel stop_port(portid_t pid) 3019ce8d5614SIntel { 3020ce8d5614SIntel portid_t pi; 3021ce8d5614SIntel struct rte_port *port; 3022ce8d5614SIntel int need_check_link_status = 0; 302301817b10SBing Zhao portid_t peer_pl[RTE_MAX_ETHPORTS]; 302401817b10SBing Zhao int peer_pi; 3025ce8d5614SIntel 30264468635fSMichael Qiu if (port_id_is_invalid(pid, ENABLED_WARN)) 30274468635fSMichael Qiu return; 30284468635fSMichael Qiu 3029ce8d5614SIntel printf("Stopping ports...\n"); 3030ce8d5614SIntel 30317d89b261SGaetan Rivet RTE_ETH_FOREACH_DEV(pi) { 30324468635fSMichael Qiu if (pid != pi && pid != (portid_t)RTE_PORT_ALL) 3033ce8d5614SIntel continue; 3034ce8d5614SIntel 3035a8ef3e3aSBernard Iremonger if (port_is_forwarding(pi) != 0 && test_done == 0) { 303661a3b0e5SAndrew Rybchenko fprintf(stderr, 303761a3b0e5SAndrew Rybchenko "Please remove port %d from forwarding configuration.\n", 303861a3b0e5SAndrew Rybchenko pi); 3039a8ef3e3aSBernard Iremonger continue; 3040a8ef3e3aSBernard Iremonger } 3041a8ef3e3aSBernard Iremonger 30420e545d30SBernard Iremonger if (port_is_bonding_slave(pi)) { 304361a3b0e5SAndrew Rybchenko fprintf(stderr, 304461a3b0e5SAndrew Rybchenko "Please remove port %d from bonded device.\n", 304561a3b0e5SAndrew Rybchenko pi); 30460e545d30SBernard Iremonger continue; 30470e545d30SBernard Iremonger } 30480e545d30SBernard Iremonger 3049ce8d5614SIntel port = &ports[pi]; 3050eac341d3SJoyce Kong if (port->port_status == RTE_PORT_STARTED) 3051eac341d3SJoyce Kong port->port_status = RTE_PORT_HANDLING; 3052eac341d3SJoyce Kong else 3053ce8d5614SIntel continue; 3054ce8d5614SIntel 305501817b10SBing Zhao if (hairpin_mode & 0xf) { 305601817b10SBing Zhao int j; 305701817b10SBing Zhao 305801817b10SBing Zhao rte_eth_hairpin_unbind(pi, RTE_MAX_ETHPORTS); 305901817b10SBing Zhao /* unbind all peer Tx from current Rx */ 306001817b10SBing Zhao peer_pi = rte_eth_hairpin_get_peer_ports(pi, peer_pl, 306101817b10SBing Zhao RTE_MAX_ETHPORTS, 0); 306201817b10SBing Zhao if (peer_pi < 0) 306301817b10SBing Zhao continue; 306401817b10SBing Zhao for (j = 0; j < peer_pi; j++) { 306501817b10SBing Zhao if (!port_is_started(peer_pl[j])) 306601817b10SBing Zhao continue; 306701817b10SBing Zhao rte_eth_hairpin_unbind(peer_pl[j], pi); 306801817b10SBing Zhao } 306901817b10SBing Zhao } 307001817b10SBing Zhao 30710f93edbfSGregory Etelson if (port->flow_list) 30720f93edbfSGregory Etelson port_flow_flush(pi); 30730f93edbfSGregory Etelson 3074a550baf2SMin Hu (Connor) if (eth_dev_stop_mp(pi) != 0) 3075e62c5a12SIvan Ilchenko RTE_LOG(ERR, EAL, "rte_eth_dev_stop failed for port %u\n", 3076e62c5a12SIvan Ilchenko pi); 3077ce8d5614SIntel 3078eac341d3SJoyce Kong if (port->port_status == RTE_PORT_HANDLING) 3079eac341d3SJoyce Kong port->port_status = RTE_PORT_STOPPED; 3080eac341d3SJoyce Kong else 308161a3b0e5SAndrew Rybchenko fprintf(stderr, "Port %d can not be set into stopped\n", 308261a3b0e5SAndrew Rybchenko pi); 3083ce8d5614SIntel need_check_link_status = 1; 3084ce8d5614SIntel } 3085bc202406SDavid Marchand if (need_check_link_status && !no_link_check) 3086edab33b1STetsuya Mukawa check_all_ports_link_status(RTE_PORT_ALL); 3087ce8d5614SIntel 3088ce8d5614SIntel printf("Done\n"); 3089ce8d5614SIntel } 3090ce8d5614SIntel 3091ce6959bfSWisam Jaddo static void 30924f1de450SThomas Monjalon remove_invalid_ports_in(portid_t *array, portid_t *total) 3093ce6959bfSWisam Jaddo { 30944f1de450SThomas Monjalon portid_t i; 30954f1de450SThomas Monjalon portid_t new_total = 0; 3096ce6959bfSWisam Jaddo 30974f1de450SThomas Monjalon for (i = 0; i < *total; i++) 30984f1de450SThomas Monjalon if (!port_id_is_invalid(array[i], DISABLED_WARN)) { 30994f1de450SThomas Monjalon array[new_total] = array[i]; 31004f1de450SThomas Monjalon new_total++; 3101ce6959bfSWisam Jaddo } 31024f1de450SThomas Monjalon *total = new_total; 31034f1de450SThomas Monjalon } 31044f1de450SThomas Monjalon 31054f1de450SThomas Monjalon static void 31064f1de450SThomas Monjalon remove_invalid_ports(void) 31074f1de450SThomas Monjalon { 31084f1de450SThomas Monjalon remove_invalid_ports_in(ports_ids, &nb_ports); 31094f1de450SThomas Monjalon remove_invalid_ports_in(fwd_ports_ids, &nb_fwd_ports); 31104f1de450SThomas Monjalon nb_cfg_ports = nb_fwd_ports; 3111ce6959bfSWisam Jaddo } 3112ce6959bfSWisam Jaddo 3113ce8d5614SIntel void 3114ce8d5614SIntel close_port(portid_t pid) 3115ce8d5614SIntel { 3116ce8d5614SIntel portid_t pi; 3117ce8d5614SIntel struct rte_port *port; 3118ce8d5614SIntel 31194468635fSMichael Qiu if (port_id_is_invalid(pid, ENABLED_WARN)) 31204468635fSMichael Qiu return; 31214468635fSMichael Qiu 3122ce8d5614SIntel printf("Closing ports...\n"); 3123ce8d5614SIntel 31247d89b261SGaetan Rivet RTE_ETH_FOREACH_DEV(pi) { 31254468635fSMichael Qiu if (pid != pi && pid != (portid_t)RTE_PORT_ALL) 3126ce8d5614SIntel continue; 3127ce8d5614SIntel 3128a8ef3e3aSBernard Iremonger if (port_is_forwarding(pi) != 0 && test_done == 0) { 312961a3b0e5SAndrew Rybchenko fprintf(stderr, 313061a3b0e5SAndrew Rybchenko "Please remove port %d from forwarding configuration.\n", 313161a3b0e5SAndrew Rybchenko pi); 3132a8ef3e3aSBernard Iremonger continue; 3133a8ef3e3aSBernard Iremonger } 3134a8ef3e3aSBernard Iremonger 31350e545d30SBernard Iremonger if (port_is_bonding_slave(pi)) { 313661a3b0e5SAndrew Rybchenko fprintf(stderr, 313761a3b0e5SAndrew Rybchenko "Please remove port %d from bonded device.\n", 313861a3b0e5SAndrew Rybchenko pi); 31390e545d30SBernard Iremonger continue; 31400e545d30SBernard Iremonger } 31410e545d30SBernard Iremonger 3142ce8d5614SIntel port = &ports[pi]; 3143eac341d3SJoyce Kong if (port->port_status == RTE_PORT_CLOSED) { 314461a3b0e5SAndrew Rybchenko fprintf(stderr, "Port %d is already closed\n", pi); 3145d4e8ad64SMichael Qiu continue; 3146d4e8ad64SMichael Qiu } 3147d4e8ad64SMichael Qiu 3148a550baf2SMin Hu (Connor) if (is_proc_primary()) { 3149938a184aSAdrien Mazarguil port_flow_flush(pi); 315059f3a8acSGregory Etelson port_flex_item_flush(pi); 3151ce8d5614SIntel rte_eth_dev_close(pi); 3152ce8d5614SIntel } 315363b72657SIvan Ilchenko 315463b72657SIvan Ilchenko free_xstats_display_info(pi); 3155a550baf2SMin Hu (Connor) } 3156ce8d5614SIntel 315785c6571cSThomas Monjalon remove_invalid_ports(); 3158ce8d5614SIntel printf("Done\n"); 3159ce8d5614SIntel } 3160ce8d5614SIntel 3161edab33b1STetsuya Mukawa void 316297f1e196SWei Dai reset_port(portid_t pid) 316397f1e196SWei Dai { 316497f1e196SWei Dai int diag; 316597f1e196SWei Dai portid_t pi; 316697f1e196SWei Dai struct rte_port *port; 316797f1e196SWei Dai 316897f1e196SWei Dai if (port_id_is_invalid(pid, ENABLED_WARN)) 316997f1e196SWei Dai return; 317097f1e196SWei Dai 31711cde1b9aSShougang Wang if ((pid == (portid_t)RTE_PORT_ALL && !all_ports_stopped()) || 31721cde1b9aSShougang Wang (pid != (portid_t)RTE_PORT_ALL && !port_is_stopped(pid))) { 317361a3b0e5SAndrew Rybchenko fprintf(stderr, 317461a3b0e5SAndrew Rybchenko "Can not reset port(s), please stop port(s) first.\n"); 31751cde1b9aSShougang Wang return; 31761cde1b9aSShougang Wang } 31771cde1b9aSShougang Wang 317897f1e196SWei Dai printf("Resetting ports...\n"); 317997f1e196SWei Dai 318097f1e196SWei Dai RTE_ETH_FOREACH_DEV(pi) { 318197f1e196SWei Dai if (pid != pi && pid != (portid_t)RTE_PORT_ALL) 318297f1e196SWei Dai continue; 318397f1e196SWei Dai 318497f1e196SWei Dai if (port_is_forwarding(pi) != 0 && test_done == 0) { 318561a3b0e5SAndrew Rybchenko fprintf(stderr, 318661a3b0e5SAndrew Rybchenko "Please remove port %d from forwarding configuration.\n", 318761a3b0e5SAndrew Rybchenko pi); 318897f1e196SWei Dai continue; 318997f1e196SWei Dai } 319097f1e196SWei Dai 319197f1e196SWei Dai if (port_is_bonding_slave(pi)) { 319261a3b0e5SAndrew Rybchenko fprintf(stderr, 319361a3b0e5SAndrew Rybchenko "Please remove port %d from bonded device.\n", 319497f1e196SWei Dai pi); 319597f1e196SWei Dai continue; 319697f1e196SWei Dai } 319797f1e196SWei Dai 319897f1e196SWei Dai diag = rte_eth_dev_reset(pi); 319997f1e196SWei Dai if (diag == 0) { 320097f1e196SWei Dai port = &ports[pi]; 320197f1e196SWei Dai port->need_reconfig = 1; 320297f1e196SWei Dai port->need_reconfig_queues = 1; 320397f1e196SWei Dai } else { 320461a3b0e5SAndrew Rybchenko fprintf(stderr, "Failed to reset port %d. diag=%d\n", 320561a3b0e5SAndrew Rybchenko pi, diag); 320697f1e196SWei Dai } 320797f1e196SWei Dai } 320897f1e196SWei Dai 320997f1e196SWei Dai printf("Done\n"); 321097f1e196SWei Dai } 321197f1e196SWei Dai 321297f1e196SWei Dai void 3213edab33b1STetsuya Mukawa attach_port(char *identifier) 3214ce8d5614SIntel { 32154f1ed78eSThomas Monjalon portid_t pi; 3216c9cce428SThomas Monjalon struct rte_dev_iterator iterator; 3217ce8d5614SIntel 3218edab33b1STetsuya Mukawa printf("Attaching a new port...\n"); 3219edab33b1STetsuya Mukawa 3220edab33b1STetsuya Mukawa if (identifier == NULL) { 322161a3b0e5SAndrew Rybchenko fprintf(stderr, "Invalid parameters are specified\n"); 3222edab33b1STetsuya Mukawa return; 3223ce8d5614SIntel } 3224ce8d5614SIntel 322575b66decSIlya Maximets if (rte_dev_probe(identifier) < 0) { 3226c9cce428SThomas Monjalon TESTPMD_LOG(ERR, "Failed to attach port %s\n", identifier); 3227edab33b1STetsuya Mukawa return; 3228c9cce428SThomas Monjalon } 3229c9cce428SThomas Monjalon 32304f1ed78eSThomas Monjalon /* first attach mode: event */ 32314f1ed78eSThomas Monjalon if (setup_on_probe_event) { 32324f1ed78eSThomas Monjalon /* new ports are detected on RTE_ETH_EVENT_NEW event */ 32334f1ed78eSThomas Monjalon for (pi = 0; pi < RTE_MAX_ETHPORTS; pi++) 32344f1ed78eSThomas Monjalon if (ports[pi].port_status == RTE_PORT_HANDLING && 32354f1ed78eSThomas Monjalon ports[pi].need_setup != 0) 32364f1ed78eSThomas Monjalon setup_attached_port(pi); 32374f1ed78eSThomas Monjalon return; 32384f1ed78eSThomas Monjalon } 32394f1ed78eSThomas Monjalon 32404f1ed78eSThomas Monjalon /* second attach mode: iterator */ 324186fa5de1SThomas Monjalon RTE_ETH_FOREACH_MATCHING_DEV(pi, identifier, &iterator) { 32424f1ed78eSThomas Monjalon /* setup ports matching the devargs used for probing */ 324386fa5de1SThomas Monjalon if (port_is_forwarding(pi)) 324486fa5de1SThomas Monjalon continue; /* port was already attached before */ 3245c9cce428SThomas Monjalon setup_attached_port(pi); 3246c9cce428SThomas Monjalon } 324786fa5de1SThomas Monjalon } 3248c9cce428SThomas Monjalon 3249c9cce428SThomas Monjalon static void 3250c9cce428SThomas Monjalon setup_attached_port(portid_t pi) 3251c9cce428SThomas Monjalon { 3252c9cce428SThomas Monjalon unsigned int socket_id; 325334fc1051SIvan Ilchenko int ret; 3254edab33b1STetsuya Mukawa 3255931126baSBernard Iremonger socket_id = (unsigned)rte_eth_dev_socket_id(pi); 325629841336SPhil Yang /* if socket_id is invalid, set to the first available socket. */ 3257931126baSBernard Iremonger if (check_socket_id(socket_id) < 0) 325829841336SPhil Yang socket_id = socket_ids[0]; 3259931126baSBernard Iremonger reconfig(pi, socket_id); 326034fc1051SIvan Ilchenko ret = rte_eth_promiscuous_enable(pi); 326134fc1051SIvan Ilchenko if (ret != 0) 326261a3b0e5SAndrew Rybchenko fprintf(stderr, 326361a3b0e5SAndrew Rybchenko "Error during enabling promiscuous mode for port %u: %s - ignore\n", 326434fc1051SIvan Ilchenko pi, rte_strerror(-ret)); 3265edab33b1STetsuya Mukawa 32664f1de450SThomas Monjalon ports_ids[nb_ports++] = pi; 32674f1de450SThomas Monjalon fwd_ports_ids[nb_fwd_ports++] = pi; 32684f1de450SThomas Monjalon nb_cfg_ports = nb_fwd_ports; 32694f1ed78eSThomas Monjalon ports[pi].need_setup = 0; 3270edab33b1STetsuya Mukawa ports[pi].port_status = RTE_PORT_STOPPED; 3271edab33b1STetsuya Mukawa 3272edab33b1STetsuya Mukawa printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports); 3273edab33b1STetsuya Mukawa printf("Done\n"); 3274edab33b1STetsuya Mukawa } 3275edab33b1STetsuya Mukawa 32760654d4a8SThomas Monjalon static void 32770654d4a8SThomas Monjalon detach_device(struct rte_device *dev) 32785f4ec54fSChen Jing D(Mark) { 3279f8e5baa2SThomas Monjalon portid_t sibling; 3280f8e5baa2SThomas Monjalon 3281f8e5baa2SThomas Monjalon if (dev == NULL) { 328261a3b0e5SAndrew Rybchenko fprintf(stderr, "Device already removed\n"); 3283f8e5baa2SThomas Monjalon return; 3284f8e5baa2SThomas Monjalon } 3285f8e5baa2SThomas Monjalon 32860654d4a8SThomas Monjalon printf("Removing a device...\n"); 3287938a184aSAdrien Mazarguil 32882a449871SThomas Monjalon RTE_ETH_FOREACH_DEV_OF(sibling, dev) { 32892a449871SThomas Monjalon if (ports[sibling].port_status != RTE_PORT_CLOSED) { 32902a449871SThomas Monjalon if (ports[sibling].port_status != RTE_PORT_STOPPED) { 329161a3b0e5SAndrew Rybchenko fprintf(stderr, "Port %u not stopped\n", 329261a3b0e5SAndrew Rybchenko sibling); 32932a449871SThomas Monjalon return; 32942a449871SThomas Monjalon } 32952a449871SThomas Monjalon port_flow_flush(sibling); 32962a449871SThomas Monjalon } 32972a449871SThomas Monjalon } 32982a449871SThomas Monjalon 329975b66decSIlya Maximets if (rte_dev_remove(dev) < 0) { 3300f8e5baa2SThomas Monjalon TESTPMD_LOG(ERR, "Failed to detach device %s\n", dev->name); 3301edab33b1STetsuya Mukawa return; 33023070419eSGaetan Rivet } 33034f1de450SThomas Monjalon remove_invalid_ports(); 330403ce2c53SMatan Azrad 33050654d4a8SThomas Monjalon printf("Device is detached\n"); 3306f8e5baa2SThomas Monjalon printf("Now total ports is %d\n", nb_ports); 3307edab33b1STetsuya Mukawa printf("Done\n"); 3308edab33b1STetsuya Mukawa return; 33095f4ec54fSChen Jing D(Mark) } 33105f4ec54fSChen Jing D(Mark) 3311af75078fSIntel void 33120654d4a8SThomas Monjalon detach_port_device(portid_t port_id) 33130654d4a8SThomas Monjalon { 33140a0821bcSPaulis Gributs int ret; 33150a0821bcSPaulis Gributs struct rte_eth_dev_info dev_info; 33160a0821bcSPaulis Gributs 33170654d4a8SThomas Monjalon if (port_id_is_invalid(port_id, ENABLED_WARN)) 33180654d4a8SThomas Monjalon return; 33190654d4a8SThomas Monjalon 33200654d4a8SThomas Monjalon if (ports[port_id].port_status != RTE_PORT_CLOSED) { 33210654d4a8SThomas Monjalon if (ports[port_id].port_status != RTE_PORT_STOPPED) { 332261a3b0e5SAndrew Rybchenko fprintf(stderr, "Port not stopped\n"); 33230654d4a8SThomas Monjalon return; 33240654d4a8SThomas Monjalon } 332561a3b0e5SAndrew Rybchenko fprintf(stderr, "Port was not closed\n"); 33260654d4a8SThomas Monjalon } 33270654d4a8SThomas Monjalon 33280a0821bcSPaulis Gributs ret = eth_dev_info_get_print_err(port_id, &dev_info); 33290a0821bcSPaulis Gributs if (ret != 0) { 33300a0821bcSPaulis Gributs TESTPMD_LOG(ERR, 33310a0821bcSPaulis Gributs "Failed to get device info for port %d, not detaching\n", 33320a0821bcSPaulis Gributs port_id); 33330a0821bcSPaulis Gributs return; 33340a0821bcSPaulis Gributs } 33350a0821bcSPaulis Gributs detach_device(dev_info.device); 33360654d4a8SThomas Monjalon } 33370654d4a8SThomas Monjalon 33380654d4a8SThomas Monjalon void 33395edee5f6SThomas Monjalon detach_devargs(char *identifier) 334055e51c96SNithin Dabilpuram { 334155e51c96SNithin Dabilpuram struct rte_dev_iterator iterator; 334255e51c96SNithin Dabilpuram struct rte_devargs da; 334355e51c96SNithin Dabilpuram portid_t port_id; 334455e51c96SNithin Dabilpuram 334555e51c96SNithin Dabilpuram printf("Removing a device...\n"); 334655e51c96SNithin Dabilpuram 334755e51c96SNithin Dabilpuram memset(&da, 0, sizeof(da)); 334855e51c96SNithin Dabilpuram if (rte_devargs_parsef(&da, "%s", identifier)) { 334961a3b0e5SAndrew Rybchenko fprintf(stderr, "cannot parse identifier\n"); 335055e51c96SNithin Dabilpuram return; 335155e51c96SNithin Dabilpuram } 335255e51c96SNithin Dabilpuram 335355e51c96SNithin Dabilpuram RTE_ETH_FOREACH_MATCHING_DEV(port_id, identifier, &iterator) { 335455e51c96SNithin Dabilpuram if (ports[port_id].port_status != RTE_PORT_CLOSED) { 335555e51c96SNithin Dabilpuram if (ports[port_id].port_status != RTE_PORT_STOPPED) { 335661a3b0e5SAndrew Rybchenko fprintf(stderr, "Port %u not stopped\n", 335761a3b0e5SAndrew Rybchenko port_id); 3358149677c9SStephen Hemminger rte_eth_iterator_cleanup(&iterator); 335964051bb1SXueming Li rte_devargs_reset(&da); 336055e51c96SNithin Dabilpuram return; 336155e51c96SNithin Dabilpuram } 336255e51c96SNithin Dabilpuram port_flow_flush(port_id); 336355e51c96SNithin Dabilpuram } 336455e51c96SNithin Dabilpuram } 336555e51c96SNithin Dabilpuram 336655e51c96SNithin Dabilpuram if (rte_eal_hotplug_remove(da.bus->name, da.name) != 0) { 336755e51c96SNithin Dabilpuram TESTPMD_LOG(ERR, "Failed to detach device %s(%s)\n", 336855e51c96SNithin Dabilpuram da.name, da.bus->name); 336964051bb1SXueming Li rte_devargs_reset(&da); 337055e51c96SNithin Dabilpuram return; 337155e51c96SNithin Dabilpuram } 337255e51c96SNithin Dabilpuram 337355e51c96SNithin Dabilpuram remove_invalid_ports(); 337455e51c96SNithin Dabilpuram 337555e51c96SNithin Dabilpuram printf("Device %s is detached\n", identifier); 337655e51c96SNithin Dabilpuram printf("Now total ports is %d\n", nb_ports); 337755e51c96SNithin Dabilpuram printf("Done\n"); 337864051bb1SXueming Li rte_devargs_reset(&da); 337955e51c96SNithin Dabilpuram } 338055e51c96SNithin Dabilpuram 338155e51c96SNithin Dabilpuram void 3382af75078fSIntel pmd_test_exit(void) 3383af75078fSIntel { 3384af75078fSIntel portid_t pt_id; 338526cbb419SViacheslav Ovsiienko unsigned int i; 3386fb73e096SJeff Guo int ret; 3387af75078fSIntel 33888210ec25SPablo de Lara if (test_done == 0) 33898210ec25SPablo de Lara stop_packet_forwarding(); 33908210ec25SPablo de Lara 3391761f7ae1SJie Zhou #ifndef RTE_EXEC_ENV_WINDOWS 339226cbb419SViacheslav Ovsiienko for (i = 0 ; i < RTE_DIM(mempools) ; i++) { 33933a0968c8SShahaf Shuler if (mempools[i]) { 33943a0968c8SShahaf Shuler if (mp_alloc_type == MP_ALLOC_ANON) 33953a0968c8SShahaf Shuler rte_mempool_mem_iter(mempools[i], dma_unmap_cb, 33963a0968c8SShahaf Shuler NULL); 33973a0968c8SShahaf Shuler } 33983a0968c8SShahaf Shuler } 3399761f7ae1SJie Zhou #endif 3400d3a274ceSZhihong Wang if (ports != NULL) { 3401d3a274ceSZhihong Wang no_link_check = 1; 34027d89b261SGaetan Rivet RTE_ETH_FOREACH_DEV(pt_id) { 340308fd782bSCristian Dumitrescu printf("\nStopping port %d...\n", pt_id); 3404af75078fSIntel fflush(stdout); 3405d3a274ceSZhihong Wang stop_port(pt_id); 340608fd782bSCristian Dumitrescu } 340708fd782bSCristian Dumitrescu RTE_ETH_FOREACH_DEV(pt_id) { 340808fd782bSCristian Dumitrescu printf("\nShutting down port %d...\n", pt_id); 340908fd782bSCristian Dumitrescu fflush(stdout); 3410d3a274ceSZhihong Wang close_port(pt_id); 3411af75078fSIntel } 3412d3a274ceSZhihong Wang } 3413fb73e096SJeff Guo 3414fb73e096SJeff Guo if (hot_plug) { 3415fb73e096SJeff Guo ret = rte_dev_event_monitor_stop(); 34162049c511SJeff Guo if (ret) { 3417fb73e096SJeff Guo RTE_LOG(ERR, EAL, 3418fb73e096SJeff Guo "fail to stop device event monitor."); 34192049c511SJeff Guo return; 34202049c511SJeff Guo } 3421fb73e096SJeff Guo 34222049c511SJeff Guo ret = rte_dev_event_callback_unregister(NULL, 3423cc1bf307SJeff Guo dev_event_callback, NULL); 34242049c511SJeff Guo if (ret < 0) { 3425fb73e096SJeff Guo RTE_LOG(ERR, EAL, 34262049c511SJeff Guo "fail to unregister device event callback.\n"); 34272049c511SJeff Guo return; 34282049c511SJeff Guo } 34292049c511SJeff Guo 34302049c511SJeff Guo ret = rte_dev_hotplug_handle_disable(); 34312049c511SJeff Guo if (ret) { 34322049c511SJeff Guo RTE_LOG(ERR, EAL, 34332049c511SJeff Guo "fail to disable hotplug handling.\n"); 34342049c511SJeff Guo return; 34352049c511SJeff Guo } 3436fb73e096SJeff Guo } 343726cbb419SViacheslav Ovsiienko for (i = 0 ; i < RTE_DIM(mempools) ; i++) { 3438401b744dSShahaf Shuler if (mempools[i]) 3439a550baf2SMin Hu (Connor) mempool_free_mp(mempools[i]); 3440401b744dSShahaf Shuler } 344163b72657SIvan Ilchenko free(xstats_display); 3442fb73e096SJeff Guo 3443d3a274ceSZhihong Wang printf("\nBye...\n"); 3444af75078fSIntel } 3445af75078fSIntel 3446af75078fSIntel typedef void (*cmd_func_t)(void); 3447af75078fSIntel struct pmd_test_command { 3448af75078fSIntel const char *cmd_name; 3449af75078fSIntel cmd_func_t cmd_func; 3450af75078fSIntel }; 3451af75078fSIntel 3452ce8d5614SIntel /* Check the link status of all ports in up to 9s, and print them finally */ 3453af75078fSIntel static void 3454edab33b1STetsuya Mukawa check_all_ports_link_status(uint32_t port_mask) 3455af75078fSIntel { 3456ce8d5614SIntel #define CHECK_INTERVAL 100 /* 100ms */ 3457ce8d5614SIntel #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */ 3458f8244c63SZhiyong Yang portid_t portid; 3459f8244c63SZhiyong Yang uint8_t count, all_ports_up, print_flag = 0; 3460ce8d5614SIntel struct rte_eth_link link; 3461e661a08bSIgor Romanov int ret; 3462ba5509a6SIvan Dyukov char link_status[RTE_ETH_LINK_MAX_STR_LEN]; 3463ce8d5614SIntel 3464ce8d5614SIntel printf("Checking link statuses...\n"); 3465ce8d5614SIntel fflush(stdout); 3466ce8d5614SIntel for (count = 0; count <= MAX_CHECK_TIME; count++) { 3467ce8d5614SIntel all_ports_up = 1; 34687d89b261SGaetan Rivet RTE_ETH_FOREACH_DEV(portid) { 3469ce8d5614SIntel if ((port_mask & (1 << portid)) == 0) 3470ce8d5614SIntel continue; 3471ce8d5614SIntel memset(&link, 0, sizeof(link)); 3472e661a08bSIgor Romanov ret = rte_eth_link_get_nowait(portid, &link); 3473e661a08bSIgor Romanov if (ret < 0) { 3474e661a08bSIgor Romanov all_ports_up = 0; 3475e661a08bSIgor Romanov if (print_flag == 1) 347661a3b0e5SAndrew Rybchenko fprintf(stderr, 347761a3b0e5SAndrew Rybchenko "Port %u link get failed: %s\n", 3478e661a08bSIgor Romanov portid, rte_strerror(-ret)); 3479e661a08bSIgor Romanov continue; 3480e661a08bSIgor Romanov } 3481ce8d5614SIntel /* print link status if flag set */ 3482ce8d5614SIntel if (print_flag == 1) { 3483ba5509a6SIvan Dyukov rte_eth_link_to_str(link_status, 3484ba5509a6SIvan Dyukov sizeof(link_status), &link); 3485ba5509a6SIvan Dyukov printf("Port %d %s\n", portid, link_status); 3486ce8d5614SIntel continue; 3487ce8d5614SIntel } 3488ce8d5614SIntel /* clear all_ports_up flag if any link down */ 3489295968d1SFerruh Yigit if (link.link_status == RTE_ETH_LINK_DOWN) { 3490ce8d5614SIntel all_ports_up = 0; 3491ce8d5614SIntel break; 3492ce8d5614SIntel } 3493ce8d5614SIntel } 3494ce8d5614SIntel /* after finally printing all link status, get out */ 3495ce8d5614SIntel if (print_flag == 1) 3496ce8d5614SIntel break; 3497ce8d5614SIntel 3498ce8d5614SIntel if (all_ports_up == 0) { 3499ce8d5614SIntel fflush(stdout); 3500ce8d5614SIntel rte_delay_ms(CHECK_INTERVAL); 3501ce8d5614SIntel } 3502ce8d5614SIntel 3503ce8d5614SIntel /* set the print_flag if all ports up or timeout */ 3504ce8d5614SIntel if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) { 3505ce8d5614SIntel print_flag = 1; 3506ce8d5614SIntel } 35078ea656f8SGaetan Rivet 35088ea656f8SGaetan Rivet if (lsc_interrupt) 35098ea656f8SGaetan Rivet break; 3510ce8d5614SIntel } 3511af75078fSIntel } 3512af75078fSIntel 3513284c908cSGaetan Rivet static void 3514cc1bf307SJeff Guo rmv_port_callback(void *arg) 3515284c908cSGaetan Rivet { 35163b97888aSMatan Azrad int need_to_start = 0; 35170da2a62bSMatan Azrad int org_no_link_check = no_link_check; 351828caa76aSZhiyong Yang portid_t port_id = (intptr_t)arg; 35190a0821bcSPaulis Gributs struct rte_eth_dev_info dev_info; 35200a0821bcSPaulis Gributs int ret; 3521284c908cSGaetan Rivet 3522284c908cSGaetan Rivet RTE_ETH_VALID_PORTID_OR_RET(port_id); 3523284c908cSGaetan Rivet 35243b97888aSMatan Azrad if (!test_done && port_is_forwarding(port_id)) { 35253b97888aSMatan Azrad need_to_start = 1; 35263b97888aSMatan Azrad stop_packet_forwarding(); 35273b97888aSMatan Azrad } 35280da2a62bSMatan Azrad no_link_check = 1; 3529284c908cSGaetan Rivet stop_port(port_id); 35300da2a62bSMatan Azrad no_link_check = org_no_link_check; 35310654d4a8SThomas Monjalon 35320a0821bcSPaulis Gributs ret = eth_dev_info_get_print_err(port_id, &dev_info); 35330a0821bcSPaulis Gributs if (ret != 0) 35340a0821bcSPaulis Gributs TESTPMD_LOG(ERR, 35350a0821bcSPaulis Gributs "Failed to get device info for port %d, not detaching\n", 35360a0821bcSPaulis Gributs port_id); 3537e1d38504SPaulis Gributs else { 3538e1d38504SPaulis Gributs struct rte_device *device = dev_info.device; 3539e1d38504SPaulis Gributs close_port(port_id); 3540e1d38504SPaulis Gributs detach_device(device); /* might be already removed or have more ports */ 3541e1d38504SPaulis Gributs } 35423b97888aSMatan Azrad if (need_to_start) 35433b97888aSMatan Azrad start_packet_forwarding(0); 3544284c908cSGaetan Rivet } 3545284c908cSGaetan Rivet 354676ad4a2dSGaetan Rivet /* This function is used by the interrupt thread */ 3547d6af1a13SBernard Iremonger static int 3548f8244c63SZhiyong Yang eth_event_callback(portid_t port_id, enum rte_eth_event_type type, void *param, 3549d6af1a13SBernard Iremonger void *ret_param) 355076ad4a2dSGaetan Rivet { 355176ad4a2dSGaetan Rivet RTE_SET_USED(param); 3552d6af1a13SBernard Iremonger RTE_SET_USED(ret_param); 355376ad4a2dSGaetan Rivet 355476ad4a2dSGaetan Rivet if (type >= RTE_ETH_EVENT_MAX) { 355561a3b0e5SAndrew Rybchenko fprintf(stderr, 355661a3b0e5SAndrew Rybchenko "\nPort %" PRIu16 ": %s called upon invalid event %d\n", 355776ad4a2dSGaetan Rivet port_id, __func__, type); 355876ad4a2dSGaetan Rivet fflush(stderr); 35593af72783SGaetan Rivet } else if (event_print_mask & (UINT32_C(1) << type)) { 3560f431e010SHerakliusz Lipiec printf("\nPort %" PRIu16 ": %s event\n", port_id, 356197b5d8b5SThomas Monjalon eth_event_desc[type]); 356276ad4a2dSGaetan Rivet fflush(stdout); 356376ad4a2dSGaetan Rivet } 3564284c908cSGaetan Rivet 3565284c908cSGaetan Rivet switch (type) { 35664f1ed78eSThomas Monjalon case RTE_ETH_EVENT_NEW: 35674f1ed78eSThomas Monjalon ports[port_id].need_setup = 1; 35684f1ed78eSThomas Monjalon ports[port_id].port_status = RTE_PORT_HANDLING; 35694f1ed78eSThomas Monjalon break; 3570284c908cSGaetan Rivet case RTE_ETH_EVENT_INTR_RMV: 35714f1ed78eSThomas Monjalon if (port_id_is_invalid(port_id, DISABLED_WARN)) 35724f1ed78eSThomas Monjalon break; 3573284c908cSGaetan Rivet if (rte_eal_alarm_set(100000, 3574cc1bf307SJeff Guo rmv_port_callback, (void *)(intptr_t)port_id)) 357561a3b0e5SAndrew Rybchenko fprintf(stderr, 357661a3b0e5SAndrew Rybchenko "Could not set up deferred device removal\n"); 3577284c908cSGaetan Rivet break; 357885c6571cSThomas Monjalon case RTE_ETH_EVENT_DESTROY: 357985c6571cSThomas Monjalon ports[port_id].port_status = RTE_PORT_CLOSED; 358085c6571cSThomas Monjalon printf("Port %u is closed\n", port_id); 358185c6571cSThomas Monjalon break; 3582284c908cSGaetan Rivet default: 3583284c908cSGaetan Rivet break; 3584284c908cSGaetan Rivet } 3585d6af1a13SBernard Iremonger return 0; 358676ad4a2dSGaetan Rivet } 358776ad4a2dSGaetan Rivet 358897b5d8b5SThomas Monjalon static int 358997b5d8b5SThomas Monjalon register_eth_event_callback(void) 359097b5d8b5SThomas Monjalon { 359197b5d8b5SThomas Monjalon int ret; 359297b5d8b5SThomas Monjalon enum rte_eth_event_type event; 359397b5d8b5SThomas Monjalon 359497b5d8b5SThomas Monjalon for (event = RTE_ETH_EVENT_UNKNOWN; 359597b5d8b5SThomas Monjalon event < RTE_ETH_EVENT_MAX; event++) { 359697b5d8b5SThomas Monjalon ret = rte_eth_dev_callback_register(RTE_ETH_ALL, 359797b5d8b5SThomas Monjalon event, 359897b5d8b5SThomas Monjalon eth_event_callback, 359997b5d8b5SThomas Monjalon NULL); 360097b5d8b5SThomas Monjalon if (ret != 0) { 360197b5d8b5SThomas Monjalon TESTPMD_LOG(ERR, "Failed to register callback for " 360297b5d8b5SThomas Monjalon "%s event\n", eth_event_desc[event]); 360397b5d8b5SThomas Monjalon return -1; 360497b5d8b5SThomas Monjalon } 360597b5d8b5SThomas Monjalon } 360697b5d8b5SThomas Monjalon 360797b5d8b5SThomas Monjalon return 0; 360897b5d8b5SThomas Monjalon } 360997b5d8b5SThomas Monjalon 3610fb73e096SJeff Guo /* This function is used by the interrupt thread */ 3611fb73e096SJeff Guo static void 3612cc1bf307SJeff Guo dev_event_callback(const char *device_name, enum rte_dev_event_type type, 3613fb73e096SJeff Guo __rte_unused void *arg) 3614fb73e096SJeff Guo { 36152049c511SJeff Guo uint16_t port_id; 36162049c511SJeff Guo int ret; 36172049c511SJeff Guo 3618fb73e096SJeff Guo if (type >= RTE_DEV_EVENT_MAX) { 3619fb73e096SJeff Guo fprintf(stderr, "%s called upon invalid event %d\n", 3620fb73e096SJeff Guo __func__, type); 3621fb73e096SJeff Guo fflush(stderr); 3622fb73e096SJeff Guo } 3623fb73e096SJeff Guo 3624fb73e096SJeff Guo switch (type) { 3625fb73e096SJeff Guo case RTE_DEV_EVENT_REMOVE: 3626cc1bf307SJeff Guo RTE_LOG(DEBUG, EAL, "The device: %s has been removed!\n", 3627fb73e096SJeff Guo device_name); 36282049c511SJeff Guo ret = rte_eth_dev_get_port_by_name(device_name, &port_id); 36292049c511SJeff Guo if (ret) { 36302049c511SJeff Guo RTE_LOG(ERR, EAL, "can not get port by device %s!\n", 36312049c511SJeff Guo device_name); 36322049c511SJeff Guo return; 36332049c511SJeff Guo } 3634cc1bf307SJeff Guo /* 3635cc1bf307SJeff Guo * Because the user's callback is invoked in eal interrupt 3636cc1bf307SJeff Guo * callback, the interrupt callback need to be finished before 3637cc1bf307SJeff Guo * it can be unregistered when detaching device. So finish 3638cc1bf307SJeff Guo * callback soon and use a deferred removal to detach device 3639cc1bf307SJeff Guo * is need. It is a workaround, once the device detaching be 3640cc1bf307SJeff Guo * moved into the eal in the future, the deferred removal could 3641cc1bf307SJeff Guo * be deleted. 3642cc1bf307SJeff Guo */ 3643cc1bf307SJeff Guo if (rte_eal_alarm_set(100000, 3644cc1bf307SJeff Guo rmv_port_callback, (void *)(intptr_t)port_id)) 3645cc1bf307SJeff Guo RTE_LOG(ERR, EAL, 3646cc1bf307SJeff Guo "Could not set up deferred device removal\n"); 3647fb73e096SJeff Guo break; 3648fb73e096SJeff Guo case RTE_DEV_EVENT_ADD: 3649fb73e096SJeff Guo RTE_LOG(ERR, EAL, "The device: %s has been added!\n", 3650fb73e096SJeff Guo device_name); 3651fb73e096SJeff Guo /* TODO: After finish kernel driver binding, 3652fb73e096SJeff Guo * begin to attach port. 3653fb73e096SJeff Guo */ 3654fb73e096SJeff Guo break; 3655fb73e096SJeff Guo default: 3656fb73e096SJeff Guo break; 3657fb73e096SJeff Guo } 3658fb73e096SJeff Guo } 3659fb73e096SJeff Guo 3660f2c5125aSPablo de Lara static void 3661f4d178c1SXueming Li rxtx_port_config(portid_t pid) 3662f2c5125aSPablo de Lara { 3663d44f8a48SQi Zhang uint16_t qid; 36645e91aeefSWei Zhao uint64_t offloads; 3665f4d178c1SXueming Li struct rte_port *port = &ports[pid]; 3666f2c5125aSPablo de Lara 3667d44f8a48SQi Zhang for (qid = 0; qid < nb_rxq; qid++) { 36685e91aeefSWei Zhao offloads = port->rx_conf[qid].offloads; 3669d44f8a48SQi Zhang port->rx_conf[qid] = port->dev_info.default_rxconf; 3670f4d178c1SXueming Li 3671f4d178c1SXueming Li if (rxq_share > 0 && 3672f4d178c1SXueming Li (port->dev_info.dev_capa & RTE_ETH_DEV_CAPA_RXQ_SHARE)) { 3673f4d178c1SXueming Li /* Non-zero share group to enable RxQ share. */ 3674f4d178c1SXueming Li port->rx_conf[qid].share_group = pid / rxq_share + 1; 3675f4d178c1SXueming Li port->rx_conf[qid].share_qid = qid; /* Equal mapping. */ 3676f4d178c1SXueming Li } 3677f4d178c1SXueming Li 3678575e0fd1SWei Zhao if (offloads != 0) 3679575e0fd1SWei Zhao port->rx_conf[qid].offloads = offloads; 3680d44f8a48SQi Zhang 3681d44f8a48SQi Zhang /* Check if any Rx parameters have been passed */ 3682f2c5125aSPablo de Lara if (rx_pthresh != RTE_PMD_PARAM_UNSET) 3683d44f8a48SQi Zhang port->rx_conf[qid].rx_thresh.pthresh = rx_pthresh; 3684f2c5125aSPablo de Lara 3685f2c5125aSPablo de Lara if (rx_hthresh != RTE_PMD_PARAM_UNSET) 3686d44f8a48SQi Zhang port->rx_conf[qid].rx_thresh.hthresh = rx_hthresh; 3687f2c5125aSPablo de Lara 3688f2c5125aSPablo de Lara if (rx_wthresh != RTE_PMD_PARAM_UNSET) 3689d44f8a48SQi Zhang port->rx_conf[qid].rx_thresh.wthresh = rx_wthresh; 3690f2c5125aSPablo de Lara 3691f2c5125aSPablo de Lara if (rx_free_thresh != RTE_PMD_PARAM_UNSET) 3692d44f8a48SQi Zhang port->rx_conf[qid].rx_free_thresh = rx_free_thresh; 3693f2c5125aSPablo de Lara 3694f2c5125aSPablo de Lara if (rx_drop_en != RTE_PMD_PARAM_UNSET) 3695d44f8a48SQi Zhang port->rx_conf[qid].rx_drop_en = rx_drop_en; 3696f2c5125aSPablo de Lara 3697d44f8a48SQi Zhang port->nb_rx_desc[qid] = nb_rxd; 3698d44f8a48SQi Zhang } 3699d44f8a48SQi Zhang 3700d44f8a48SQi Zhang for (qid = 0; qid < nb_txq; qid++) { 37015e91aeefSWei Zhao offloads = port->tx_conf[qid].offloads; 3702d44f8a48SQi Zhang port->tx_conf[qid] = port->dev_info.default_txconf; 3703575e0fd1SWei Zhao if (offloads != 0) 3704575e0fd1SWei Zhao port->tx_conf[qid].offloads = offloads; 3705d44f8a48SQi Zhang 3706d44f8a48SQi Zhang /* Check if any Tx parameters have been passed */ 3707f2c5125aSPablo de Lara if (tx_pthresh != RTE_PMD_PARAM_UNSET) 3708d44f8a48SQi Zhang port->tx_conf[qid].tx_thresh.pthresh = tx_pthresh; 3709f2c5125aSPablo de Lara 3710f2c5125aSPablo de Lara if (tx_hthresh != RTE_PMD_PARAM_UNSET) 3711d44f8a48SQi Zhang port->tx_conf[qid].tx_thresh.hthresh = tx_hthresh; 3712f2c5125aSPablo de Lara 3713f2c5125aSPablo de Lara if (tx_wthresh != RTE_PMD_PARAM_UNSET) 3714d44f8a48SQi Zhang port->tx_conf[qid].tx_thresh.wthresh = tx_wthresh; 3715f2c5125aSPablo de Lara 3716f2c5125aSPablo de Lara if (tx_rs_thresh != RTE_PMD_PARAM_UNSET) 3717d44f8a48SQi Zhang port->tx_conf[qid].tx_rs_thresh = tx_rs_thresh; 3718f2c5125aSPablo de Lara 3719f2c5125aSPablo de Lara if (tx_free_thresh != RTE_PMD_PARAM_UNSET) 3720d44f8a48SQi Zhang port->tx_conf[qid].tx_free_thresh = tx_free_thresh; 3721d44f8a48SQi Zhang 3722d44f8a48SQi Zhang port->nb_tx_desc[qid] = nb_txd; 3723d44f8a48SQi Zhang } 3724f2c5125aSPablo de Lara } 3725f2c5125aSPablo de Lara 37260c4abd36SSteve Yang /* 3727b563c142SFerruh Yigit * Helper function to set MTU from frame size 37280c4abd36SSteve Yang * 37290c4abd36SSteve Yang * port->dev_info should be set before calling this function. 37300c4abd36SSteve Yang * 37310c4abd36SSteve Yang * return 0 on success, negative on error 37320c4abd36SSteve Yang */ 37330c4abd36SSteve Yang int 3734b563c142SFerruh Yigit update_mtu_from_frame_size(portid_t portid, uint32_t max_rx_pktlen) 37350c4abd36SSteve Yang { 37360c4abd36SSteve Yang struct rte_port *port = &ports[portid]; 37370c4abd36SSteve Yang uint32_t eth_overhead; 37381bb4a528SFerruh Yigit uint16_t mtu, new_mtu; 37390c4abd36SSteve Yang 37401bb4a528SFerruh Yigit eth_overhead = get_eth_overhead(&port->dev_info); 37411bb4a528SFerruh Yigit 37421bb4a528SFerruh Yigit if (rte_eth_dev_get_mtu(portid, &mtu) != 0) { 37431bb4a528SFerruh Yigit printf("Failed to get MTU for port %u\n", portid); 37441bb4a528SFerruh Yigit return -1; 37451bb4a528SFerruh Yigit } 37461bb4a528SFerruh Yigit 37471bb4a528SFerruh Yigit new_mtu = max_rx_pktlen - eth_overhead; 37480c4abd36SSteve Yang 37491bb4a528SFerruh Yigit if (mtu == new_mtu) 37501bb4a528SFerruh Yigit return 0; 37511bb4a528SFerruh Yigit 37521bb4a528SFerruh Yigit if (eth_dev_set_mtu_mp(portid, new_mtu) != 0) { 375361a3b0e5SAndrew Rybchenko fprintf(stderr, 375461a3b0e5SAndrew Rybchenko "Failed to set MTU to %u for port %u\n", 37551bb4a528SFerruh Yigit new_mtu, portid); 37561bb4a528SFerruh Yigit return -1; 37570c4abd36SSteve Yang } 37580c4abd36SSteve Yang 37591bb4a528SFerruh Yigit port->dev_conf.rxmode.mtu = new_mtu; 37601bb4a528SFerruh Yigit 37610c4abd36SSteve Yang return 0; 37620c4abd36SSteve Yang } 37630c4abd36SSteve Yang 3764013af9b6SIntel void 3765013af9b6SIntel init_port_config(void) 3766013af9b6SIntel { 3767013af9b6SIntel portid_t pid; 3768013af9b6SIntel struct rte_port *port; 3769655eae01SJie Wang int ret, i; 3770013af9b6SIntel 37717d89b261SGaetan Rivet RTE_ETH_FOREACH_DEV(pid) { 3772013af9b6SIntel port = &ports[pid]; 3773013af9b6SIntel port->dev_conf.fdir_conf = fdir_conf; 37746f51deb9SIvan Ilchenko 37756f51deb9SIvan Ilchenko ret = eth_dev_info_get_print_err(pid, &port->dev_info); 37766f51deb9SIvan Ilchenko if (ret != 0) 37776f51deb9SIvan Ilchenko return; 37786f51deb9SIvan Ilchenko 37793ce690d3SBruce Richardson if (nb_rxq > 1) { 3780013af9b6SIntel port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL; 378190892962SQi Zhang port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 3782422515b9SAdrien Mazarguil rss_hf & port->dev_info.flow_type_rss_offloads; 3783af75078fSIntel } else { 3784013af9b6SIntel port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL; 3785013af9b6SIntel port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0; 3786af75078fSIntel } 37873ce690d3SBruce Richardson 37885f592039SJingjing Wu if (port->dcb_flag == 0) { 3789655eae01SJie Wang if (port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0) { 3790f9295aa2SXiaoyu Min port->dev_conf.rxmode.mq_mode = 3791f9295aa2SXiaoyu Min (enum rte_eth_rx_mq_mode) 3792295968d1SFerruh Yigit (rx_mq_mode & RTE_ETH_MQ_RX_RSS); 3793655eae01SJie Wang } else { 3794295968d1SFerruh Yigit port->dev_conf.rxmode.mq_mode = RTE_ETH_MQ_RX_NONE; 3795655eae01SJie Wang port->dev_conf.rxmode.offloads &= 3796295968d1SFerruh Yigit ~RTE_ETH_RX_OFFLOAD_RSS_HASH; 3797655eae01SJie Wang 3798655eae01SJie Wang for (i = 0; 3799655eae01SJie Wang i < port->dev_info.nb_rx_queues; 3800655eae01SJie Wang i++) 3801655eae01SJie Wang port->rx_conf[i].offloads &= 3802295968d1SFerruh Yigit ~RTE_ETH_RX_OFFLOAD_RSS_HASH; 3803655eae01SJie Wang } 38043ce690d3SBruce Richardson } 38053ce690d3SBruce Richardson 3806f4d178c1SXueming Li rxtx_port_config(pid); 3807013af9b6SIntel 3808a5279d25SIgor Romanov ret = eth_macaddr_get_print_err(pid, &port->eth_addr); 3809a5279d25SIgor Romanov if (ret != 0) 3810a5279d25SIgor Romanov return; 3811013af9b6SIntel 3812a8d0d473SBruce Richardson #if defined RTE_NET_IXGBE && defined RTE_LIBRTE_IXGBE_BYPASS 3813e261265eSRadu Nicolau rte_pmd_ixgbe_bypass_init(pid); 38147b7e5ba7SIntel #endif 38158ea656f8SGaetan Rivet 38160a0821bcSPaulis Gributs if (lsc_interrupt && (*port->dev_info.dev_flags & RTE_ETH_DEV_INTR_LSC)) 38178ea656f8SGaetan Rivet port->dev_conf.intr_conf.lsc = 1; 38180a0821bcSPaulis Gributs if (rmv_interrupt && (*port->dev_info.dev_flags & RTE_ETH_DEV_INTR_RMV)) 3819284c908cSGaetan Rivet port->dev_conf.intr_conf.rmv = 1; 3820013af9b6SIntel } 3821013af9b6SIntel } 3822013af9b6SIntel 382341b05095SBernard Iremonger void set_port_slave_flag(portid_t slave_pid) 382441b05095SBernard Iremonger { 382541b05095SBernard Iremonger struct rte_port *port; 382641b05095SBernard Iremonger 382741b05095SBernard Iremonger port = &ports[slave_pid]; 382841b05095SBernard Iremonger port->slave_flag = 1; 382941b05095SBernard Iremonger } 383041b05095SBernard Iremonger 383141b05095SBernard Iremonger void clear_port_slave_flag(portid_t slave_pid) 383241b05095SBernard Iremonger { 383341b05095SBernard Iremonger struct rte_port *port; 383441b05095SBernard Iremonger 383541b05095SBernard Iremonger port = &ports[slave_pid]; 383641b05095SBernard Iremonger port->slave_flag = 0; 383741b05095SBernard Iremonger } 383841b05095SBernard Iremonger 38390e545d30SBernard Iremonger uint8_t port_is_bonding_slave(portid_t slave_pid) 38400e545d30SBernard Iremonger { 38410e545d30SBernard Iremonger struct rte_port *port; 38420a0821bcSPaulis Gributs struct rte_eth_dev_info dev_info; 38430a0821bcSPaulis Gributs int ret; 38440e545d30SBernard Iremonger 38450e545d30SBernard Iremonger port = &ports[slave_pid]; 38460a0821bcSPaulis Gributs ret = eth_dev_info_get_print_err(slave_pid, &dev_info); 38470a0821bcSPaulis Gributs if (ret != 0) { 38480a0821bcSPaulis Gributs TESTPMD_LOG(ERR, 38490a0821bcSPaulis Gributs "Failed to get device info for port id %d," 38500a0821bcSPaulis Gributs "cannot determine if the port is a bonded slave", 38510a0821bcSPaulis Gributs slave_pid); 38520a0821bcSPaulis Gributs return 0; 38530a0821bcSPaulis Gributs } 38540a0821bcSPaulis Gributs if ((*dev_info.dev_flags & RTE_ETH_DEV_BONDED_SLAVE) || (port->slave_flag == 1)) 3855b8b8b344SMatan Azrad return 1; 3856b8b8b344SMatan Azrad return 0; 38570e545d30SBernard Iremonger } 38580e545d30SBernard Iremonger 3859013af9b6SIntel const uint16_t vlan_tags[] = { 3860013af9b6SIntel 0, 1, 2, 3, 4, 5, 6, 7, 3861013af9b6SIntel 8, 9, 10, 11, 12, 13, 14, 15, 3862013af9b6SIntel 16, 17, 18, 19, 20, 21, 22, 23, 3863013af9b6SIntel 24, 25, 26, 27, 28, 29, 30, 31 3864013af9b6SIntel }; 3865013af9b6SIntel 3866013af9b6SIntel static int 3867ac7c491cSKonstantin Ananyev get_eth_dcb_conf(portid_t pid, struct rte_eth_conf *eth_conf, 38681a572499SJingjing Wu enum dcb_mode_enable dcb_mode, 38691a572499SJingjing Wu enum rte_eth_nb_tcs num_tcs, 38701a572499SJingjing Wu uint8_t pfc_en) 3871013af9b6SIntel { 3872013af9b6SIntel uint8_t i; 3873ac7c491cSKonstantin Ananyev int32_t rc; 3874ac7c491cSKonstantin Ananyev struct rte_eth_rss_conf rss_conf; 3875af75078fSIntel 3876af75078fSIntel /* 3877013af9b6SIntel * Builds up the correct configuration for dcb+vt based on the vlan tags array 3878013af9b6SIntel * given above, and the number of traffic classes available for use. 3879af75078fSIntel */ 38801a572499SJingjing Wu if (dcb_mode == DCB_VT_ENABLED) { 38811a572499SJingjing Wu struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf = 38821a572499SJingjing Wu ð_conf->rx_adv_conf.vmdq_dcb_conf; 38831a572499SJingjing Wu struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf = 38841a572499SJingjing Wu ð_conf->tx_adv_conf.vmdq_dcb_tx_conf; 3885013af9b6SIntel 3886547d946cSNirmoy Das /* VMDQ+DCB RX and TX configurations */ 38871a572499SJingjing Wu vmdq_rx_conf->enable_default_pool = 0; 38881a572499SJingjing Wu vmdq_rx_conf->default_pool = 0; 38891a572499SJingjing Wu vmdq_rx_conf->nb_queue_pools = 3890295968d1SFerruh Yigit (num_tcs == RTE_ETH_4_TCS ? RTE_ETH_32_POOLS : RTE_ETH_16_POOLS); 38911a572499SJingjing Wu vmdq_tx_conf->nb_queue_pools = 3892295968d1SFerruh Yigit (num_tcs == RTE_ETH_4_TCS ? RTE_ETH_32_POOLS : RTE_ETH_16_POOLS); 3893013af9b6SIntel 38941a572499SJingjing Wu vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools; 38951a572499SJingjing Wu for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) { 38961a572499SJingjing Wu vmdq_rx_conf->pool_map[i].vlan_id = vlan_tags[i]; 38971a572499SJingjing Wu vmdq_rx_conf->pool_map[i].pools = 38981a572499SJingjing Wu 1 << (i % vmdq_rx_conf->nb_queue_pools); 3899af75078fSIntel } 3900295968d1SFerruh Yigit for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++) { 3901f59908feSWei Dai vmdq_rx_conf->dcb_tc[i] = i % num_tcs; 3902f59908feSWei Dai vmdq_tx_conf->dcb_tc[i] = i % num_tcs; 3903013af9b6SIntel } 3904013af9b6SIntel 3905013af9b6SIntel /* set DCB mode of RX and TX of multiple queues */ 3906f9295aa2SXiaoyu Min eth_conf->rxmode.mq_mode = 3907f9295aa2SXiaoyu Min (enum rte_eth_rx_mq_mode) 3908295968d1SFerruh Yigit (rx_mq_mode & RTE_ETH_MQ_RX_VMDQ_DCB); 3909295968d1SFerruh Yigit eth_conf->txmode.mq_mode = RTE_ETH_MQ_TX_VMDQ_DCB; 39101a572499SJingjing Wu } else { 39111a572499SJingjing Wu struct rte_eth_dcb_rx_conf *rx_conf = 39121a572499SJingjing Wu ð_conf->rx_adv_conf.dcb_rx_conf; 39131a572499SJingjing Wu struct rte_eth_dcb_tx_conf *tx_conf = 39141a572499SJingjing Wu ð_conf->tx_adv_conf.dcb_tx_conf; 3915013af9b6SIntel 39165139bc12STing Xu memset(&rss_conf, 0, sizeof(struct rte_eth_rss_conf)); 39175139bc12STing Xu 3918ac7c491cSKonstantin Ananyev rc = rte_eth_dev_rss_hash_conf_get(pid, &rss_conf); 3919ac7c491cSKonstantin Ananyev if (rc != 0) 3920ac7c491cSKonstantin Ananyev return rc; 3921ac7c491cSKonstantin Ananyev 39221a572499SJingjing Wu rx_conf->nb_tcs = num_tcs; 39231a572499SJingjing Wu tx_conf->nb_tcs = num_tcs; 39241a572499SJingjing Wu 3925295968d1SFerruh Yigit for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++) { 3926bcd0e432SJingjing Wu rx_conf->dcb_tc[i] = i % num_tcs; 3927bcd0e432SJingjing Wu tx_conf->dcb_tc[i] = i % num_tcs; 3928013af9b6SIntel } 3929ac7c491cSKonstantin Ananyev 3930f9295aa2SXiaoyu Min eth_conf->rxmode.mq_mode = 3931f9295aa2SXiaoyu Min (enum rte_eth_rx_mq_mode) 3932295968d1SFerruh Yigit (rx_mq_mode & RTE_ETH_MQ_RX_DCB_RSS); 3933ac7c491cSKonstantin Ananyev eth_conf->rx_adv_conf.rss_conf = rss_conf; 3934295968d1SFerruh Yigit eth_conf->txmode.mq_mode = RTE_ETH_MQ_TX_DCB; 39351a572499SJingjing Wu } 39361a572499SJingjing Wu 39371a572499SJingjing Wu if (pfc_en) 39381a572499SJingjing Wu eth_conf->dcb_capability_en = 3939295968d1SFerruh Yigit RTE_ETH_DCB_PG_SUPPORT | RTE_ETH_DCB_PFC_SUPPORT; 3940013af9b6SIntel else 3941295968d1SFerruh Yigit eth_conf->dcb_capability_en = RTE_ETH_DCB_PG_SUPPORT; 3942013af9b6SIntel 3943013af9b6SIntel return 0; 3944013af9b6SIntel } 3945013af9b6SIntel 3946013af9b6SIntel int 39471a572499SJingjing Wu init_port_dcb_config(portid_t pid, 39481a572499SJingjing Wu enum dcb_mode_enable dcb_mode, 39491a572499SJingjing Wu enum rte_eth_nb_tcs num_tcs, 39501a572499SJingjing Wu uint8_t pfc_en) 3951013af9b6SIntel { 3952013af9b6SIntel struct rte_eth_conf port_conf; 3953013af9b6SIntel struct rte_port *rte_port; 3954013af9b6SIntel int retval; 3955013af9b6SIntel uint16_t i; 3956013af9b6SIntel 3957a550baf2SMin Hu (Connor) if (num_procs > 1) { 3958a550baf2SMin Hu (Connor) printf("The multi-process feature doesn't support dcb.\n"); 3959a550baf2SMin Hu (Connor) return -ENOTSUP; 3960a550baf2SMin Hu (Connor) } 39612a977b89SWenzhuo Lu rte_port = &ports[pid]; 3962013af9b6SIntel 3963c1ba6c32SHuisong Li /* retain the original device configuration. */ 3964c1ba6c32SHuisong Li memcpy(&port_conf, &rte_port->dev_conf, sizeof(struct rte_eth_conf)); 3965d5354e89SYanglong Wu 3966013af9b6SIntel /*set configuration of DCB in vt mode and DCB in non-vt mode*/ 3967ac7c491cSKonstantin Ananyev retval = get_eth_dcb_conf(pid, &port_conf, dcb_mode, num_tcs, pfc_en); 3968013af9b6SIntel if (retval < 0) 3969013af9b6SIntel return retval; 3970295968d1SFerruh Yigit port_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_VLAN_FILTER; 3971cbe70fdeSJie Wang /* remove RSS HASH offload for DCB in vt mode */ 3972cbe70fdeSJie Wang if (port_conf.rxmode.mq_mode == RTE_ETH_MQ_RX_VMDQ_DCB) { 3973cbe70fdeSJie Wang port_conf.rxmode.offloads &= ~RTE_ETH_RX_OFFLOAD_RSS_HASH; 3974cbe70fdeSJie Wang for (i = 0; i < nb_rxq; i++) 3975cbe70fdeSJie Wang rte_port->rx_conf[i].offloads &= 3976cbe70fdeSJie Wang ~RTE_ETH_RX_OFFLOAD_RSS_HASH; 3977cbe70fdeSJie Wang } 3978013af9b6SIntel 39792f203d44SQi Zhang /* re-configure the device . */ 39802b0e0ebaSChenbo Xia retval = rte_eth_dev_configure(pid, nb_rxq, nb_rxq, &port_conf); 39812b0e0ebaSChenbo Xia if (retval < 0) 39822b0e0ebaSChenbo Xia return retval; 39836f51deb9SIvan Ilchenko 39846f51deb9SIvan Ilchenko retval = eth_dev_info_get_print_err(pid, &rte_port->dev_info); 39856f51deb9SIvan Ilchenko if (retval != 0) 39866f51deb9SIvan Ilchenko return retval; 39872a977b89SWenzhuo Lu 39882a977b89SWenzhuo Lu /* If dev_info.vmdq_pool_base is greater than 0, 39892a977b89SWenzhuo Lu * the queue id of vmdq pools is started after pf queues. 39902a977b89SWenzhuo Lu */ 39912a977b89SWenzhuo Lu if (dcb_mode == DCB_VT_ENABLED && 39922a977b89SWenzhuo Lu rte_port->dev_info.vmdq_pool_base > 0) { 399361a3b0e5SAndrew Rybchenko fprintf(stderr, 399461a3b0e5SAndrew Rybchenko "VMDQ_DCB multi-queue mode is nonsensical for port %d.\n", 399561a3b0e5SAndrew Rybchenko pid); 39962a977b89SWenzhuo Lu return -1; 39972a977b89SWenzhuo Lu } 39982a977b89SWenzhuo Lu 39992a977b89SWenzhuo Lu /* Assume the ports in testpmd have the same dcb capability 40002a977b89SWenzhuo Lu * and has the same number of rxq and txq in dcb mode 40012a977b89SWenzhuo Lu */ 40022a977b89SWenzhuo Lu if (dcb_mode == DCB_VT_ENABLED) { 400386ef65eeSBernard Iremonger if (rte_port->dev_info.max_vfs > 0) { 400486ef65eeSBernard Iremonger nb_rxq = rte_port->dev_info.nb_rx_queues; 400586ef65eeSBernard Iremonger nb_txq = rte_port->dev_info.nb_tx_queues; 400686ef65eeSBernard Iremonger } else { 40072a977b89SWenzhuo Lu nb_rxq = rte_port->dev_info.max_rx_queues; 40082a977b89SWenzhuo Lu nb_txq = rte_port->dev_info.max_tx_queues; 400986ef65eeSBernard Iremonger } 40102a977b89SWenzhuo Lu } else { 40112a977b89SWenzhuo Lu /*if vt is disabled, use all pf queues */ 40122a977b89SWenzhuo Lu if (rte_port->dev_info.vmdq_pool_base == 0) { 40132a977b89SWenzhuo Lu nb_rxq = rte_port->dev_info.max_rx_queues; 40142a977b89SWenzhuo Lu nb_txq = rte_port->dev_info.max_tx_queues; 40152a977b89SWenzhuo Lu } else { 40162a977b89SWenzhuo Lu nb_rxq = (queueid_t)num_tcs; 40172a977b89SWenzhuo Lu nb_txq = (queueid_t)num_tcs; 40182a977b89SWenzhuo Lu 40192a977b89SWenzhuo Lu } 40202a977b89SWenzhuo Lu } 40212a977b89SWenzhuo Lu rx_free_thresh = 64; 40222a977b89SWenzhuo Lu 4023013af9b6SIntel memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf)); 4024013af9b6SIntel 4025f4d178c1SXueming Li rxtx_port_config(pid); 4026013af9b6SIntel /* VLAN filter */ 4027295968d1SFerruh Yigit rte_port->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_VLAN_FILTER; 40281a572499SJingjing Wu for (i = 0; i < RTE_DIM(vlan_tags); i++) 4029013af9b6SIntel rx_vft_set(pid, vlan_tags[i], 1); 4030013af9b6SIntel 4031a5279d25SIgor Romanov retval = eth_macaddr_get_print_err(pid, &rte_port->eth_addr); 4032a5279d25SIgor Romanov if (retval != 0) 4033a5279d25SIgor Romanov return retval; 4034a5279d25SIgor Romanov 40357741e4cfSIntel rte_port->dcb_flag = 1; 40367741e4cfSIntel 4037a690a070SHuisong Li /* Enter DCB configuration status */ 4038a690a070SHuisong Li dcb_config = 1; 4039a690a070SHuisong Li 4040013af9b6SIntel return 0; 4041af75078fSIntel } 4042af75078fSIntel 4043ffc468ffSTetsuya Mukawa static void 4044ffc468ffSTetsuya Mukawa init_port(void) 4045ffc468ffSTetsuya Mukawa { 40461b9f2746SGregory Etelson int i; 40471b9f2746SGregory Etelson 4048ffc468ffSTetsuya Mukawa /* Configuration of Ethernet ports. */ 4049ffc468ffSTetsuya Mukawa ports = rte_zmalloc("testpmd: ports", 4050ffc468ffSTetsuya Mukawa sizeof(struct rte_port) * RTE_MAX_ETHPORTS, 4051ffc468ffSTetsuya Mukawa RTE_CACHE_LINE_SIZE); 4052ffc468ffSTetsuya Mukawa if (ports == NULL) { 4053ffc468ffSTetsuya Mukawa rte_exit(EXIT_FAILURE, 4054ffc468ffSTetsuya Mukawa "rte_zmalloc(%d struct rte_port) failed\n", 4055ffc468ffSTetsuya Mukawa RTE_MAX_ETHPORTS); 4056ffc468ffSTetsuya Mukawa } 40571b9f2746SGregory Etelson for (i = 0; i < RTE_MAX_ETHPORTS; i++) 405863b72657SIvan Ilchenko ports[i].xstats_info.allocated = false; 405963b72657SIvan Ilchenko for (i = 0; i < RTE_MAX_ETHPORTS; i++) 40601b9f2746SGregory Etelson LIST_INIT(&ports[i].flow_tunnel_list); 406129841336SPhil Yang /* Initialize ports NUMA structures */ 406229841336SPhil Yang memset(port_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS); 406329841336SPhil Yang memset(rxring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS); 406429841336SPhil Yang memset(txring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS); 4065ffc468ffSTetsuya Mukawa } 4066ffc468ffSTetsuya Mukawa 4067d3a274ceSZhihong Wang static void 4068d3a274ceSZhihong Wang force_quit(void) 4069d3a274ceSZhihong Wang { 4070d3a274ceSZhihong Wang pmd_test_exit(); 4071d3a274ceSZhihong Wang prompt_exit(); 4072d3a274ceSZhihong Wang } 4073d3a274ceSZhihong Wang 4074d3a274ceSZhihong Wang static void 4075cfea1f30SPablo de Lara print_stats(void) 4076cfea1f30SPablo de Lara { 4077cfea1f30SPablo de Lara uint8_t i; 4078cfea1f30SPablo de Lara const char clr[] = { 27, '[', '2', 'J', '\0' }; 4079cfea1f30SPablo de Lara const char top_left[] = { 27, '[', '1', ';', '1', 'H', '\0' }; 4080cfea1f30SPablo de Lara 4081cfea1f30SPablo de Lara /* Clear screen and move to top left */ 4082cfea1f30SPablo de Lara printf("%s%s", clr, top_left); 4083cfea1f30SPablo de Lara 4084cfea1f30SPablo de Lara printf("\nPort statistics ===================================="); 4085cfea1f30SPablo de Lara for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) 4086cfea1f30SPablo de Lara nic_stats_display(fwd_ports_ids[i]); 4087683d1e82SIgor Romanov 4088683d1e82SIgor Romanov fflush(stdout); 4089cfea1f30SPablo de Lara } 4090cfea1f30SPablo de Lara 4091cfea1f30SPablo de Lara static void 4092d3a274ceSZhihong Wang signal_handler(int signum) 4093d3a274ceSZhihong Wang { 4094d3a274ceSZhihong Wang if (signum == SIGINT || signum == SIGTERM) { 409561a3b0e5SAndrew Rybchenko fprintf(stderr, "\nSignal %d received, preparing to exit...\n", 4096d3a274ceSZhihong Wang signum); 4097a8d0d473SBruce Richardson #ifdef RTE_LIB_PDUMP 4098102b7329SReshma Pattan /* uninitialize packet capture framework */ 4099102b7329SReshma Pattan rte_pdump_uninit(); 4100102b7329SReshma Pattan #endif 4101a8d0d473SBruce Richardson #ifdef RTE_LIB_LATENCYSTATS 41028b36297dSAmit Gupta if (latencystats_enabled != 0) 410362d3216dSReshma Pattan rte_latencystats_uninit(); 410462d3216dSReshma Pattan #endif 4105d3a274ceSZhihong Wang force_quit(); 4106d9a191a0SPhil Yang /* Set flag to indicate the force termination. */ 4107d9a191a0SPhil Yang f_quit = 1; 4108d3a274ceSZhihong Wang /* exit with the expected status */ 4109761f7ae1SJie Zhou #ifndef RTE_EXEC_ENV_WINDOWS 4110d3a274ceSZhihong Wang signal(signum, SIG_DFL); 4111d3a274ceSZhihong Wang kill(getpid(), signum); 4112761f7ae1SJie Zhou #endif 4113d3a274ceSZhihong Wang } 4114d3a274ceSZhihong Wang } 4115d3a274ceSZhihong Wang 4116af75078fSIntel int 4117af75078fSIntel main(int argc, char** argv) 4118af75078fSIntel { 4119af75078fSIntel int diag; 4120f8244c63SZhiyong Yang portid_t port_id; 41214918a357SXiaoyun Li uint16_t count; 4122fb73e096SJeff Guo int ret; 4123af75078fSIntel 4124d3a274ceSZhihong Wang signal(SIGINT, signal_handler); 4125d3a274ceSZhihong Wang signal(SIGTERM, signal_handler); 4126d3a274ceSZhihong Wang 4127285fd101SOlivier Matz testpmd_logtype = rte_log_register("testpmd"); 4128285fd101SOlivier Matz if (testpmd_logtype < 0) 412916267ceeSStephen Hemminger rte_exit(EXIT_FAILURE, "Cannot register log type"); 4130285fd101SOlivier Matz rte_log_set_level(testpmd_logtype, RTE_LOG_DEBUG); 4131285fd101SOlivier Matz 41329201806eSStephen Hemminger diag = rte_eal_init(argc, argv); 41339201806eSStephen Hemminger if (diag < 0) 413416267ceeSStephen Hemminger rte_exit(EXIT_FAILURE, "Cannot init EAL: %s\n", 413516267ceeSStephen Hemminger rte_strerror(rte_errno)); 41369201806eSStephen Hemminger 413797b5d8b5SThomas Monjalon ret = register_eth_event_callback(); 413897b5d8b5SThomas Monjalon if (ret != 0) 413916267ceeSStephen Hemminger rte_exit(EXIT_FAILURE, "Cannot register for ethdev events"); 414097b5d8b5SThomas Monjalon 4141a8d0d473SBruce Richardson #ifdef RTE_LIB_PDUMP 41424aa0d012SAnatoly Burakov /* initialize packet capture framework */ 4143e9436f54STiwei Bie rte_pdump_init(); 41444aa0d012SAnatoly Burakov #endif 41454aa0d012SAnatoly Burakov 41464918a357SXiaoyun Li count = 0; 41474918a357SXiaoyun Li RTE_ETH_FOREACH_DEV(port_id) { 41484918a357SXiaoyun Li ports_ids[count] = port_id; 41494918a357SXiaoyun Li count++; 41504918a357SXiaoyun Li } 41514918a357SXiaoyun Li nb_ports = (portid_t) count; 41524aa0d012SAnatoly Burakov if (nb_ports == 0) 41534aa0d012SAnatoly Burakov TESTPMD_LOG(WARNING, "No probed ethernet devices\n"); 41544aa0d012SAnatoly Burakov 41554aa0d012SAnatoly Burakov /* allocate port structures, and init them */ 41564aa0d012SAnatoly Burakov init_port(); 41574aa0d012SAnatoly Burakov 41584aa0d012SAnatoly Burakov set_def_fwd_config(); 41594aa0d012SAnatoly Burakov if (nb_lcores == 0) 416016267ceeSStephen Hemminger rte_exit(EXIT_FAILURE, "No cores defined for forwarding\n" 416116267ceeSStephen Hemminger "Check the core mask argument\n"); 41624aa0d012SAnatoly Burakov 4163e505d84cSAnatoly Burakov /* Bitrate/latency stats disabled by default */ 4164a8d0d473SBruce Richardson #ifdef RTE_LIB_BITRATESTATS 4165e505d84cSAnatoly Burakov bitrate_enabled = 0; 4166e505d84cSAnatoly Burakov #endif 4167a8d0d473SBruce Richardson #ifdef RTE_LIB_LATENCYSTATS 4168e505d84cSAnatoly Burakov latencystats_enabled = 0; 4169e505d84cSAnatoly Burakov #endif 4170e505d84cSAnatoly Burakov 4171fb7b8b32SAnatoly Burakov /* on FreeBSD, mlockall() is disabled by default */ 41725fbc1d49SBruce Richardson #ifdef RTE_EXEC_ENV_FREEBSD 4173fb7b8b32SAnatoly Burakov do_mlockall = 0; 4174fb7b8b32SAnatoly Burakov #else 4175fb7b8b32SAnatoly Burakov do_mlockall = 1; 4176fb7b8b32SAnatoly Burakov #endif 4177fb7b8b32SAnatoly Burakov 4178e505d84cSAnatoly Burakov argc -= diag; 4179e505d84cSAnatoly Burakov argv += diag; 4180e505d84cSAnatoly Burakov if (argc > 1) 4181e505d84cSAnatoly Burakov launch_args_parse(argc, argv); 4182e505d84cSAnatoly Burakov 4183761f7ae1SJie Zhou #ifndef RTE_EXEC_ENV_WINDOWS 4184e505d84cSAnatoly Burakov if (do_mlockall && mlockall(MCL_CURRENT | MCL_FUTURE)) { 4185285fd101SOlivier Matz TESTPMD_LOG(NOTICE, "mlockall() failed with error \"%s\"\n", 41861c036b16SEelco Chaudron strerror(errno)); 41871c036b16SEelco Chaudron } 4188761f7ae1SJie Zhou #endif 41891c036b16SEelco Chaudron 419099cabef0SPablo de Lara if (tx_first && interactive) 419199cabef0SPablo de Lara rte_exit(EXIT_FAILURE, "--tx-first cannot be used on " 419299cabef0SPablo de Lara "interactive mode.\n"); 41938820cba4SDavid Hunt 41948820cba4SDavid Hunt if (tx_first && lsc_interrupt) { 419561a3b0e5SAndrew Rybchenko fprintf(stderr, 419661a3b0e5SAndrew Rybchenko "Warning: lsc_interrupt needs to be off when using tx_first. Disabling.\n"); 41978820cba4SDavid Hunt lsc_interrupt = 0; 41988820cba4SDavid Hunt } 41998820cba4SDavid Hunt 42005a8fb55cSReshma Pattan if (!nb_rxq && !nb_txq) 420161a3b0e5SAndrew Rybchenko fprintf(stderr, 420261a3b0e5SAndrew Rybchenko "Warning: Either rx or tx queues should be non-zero\n"); 42035a8fb55cSReshma Pattan 42045a8fb55cSReshma Pattan if (nb_rxq > 1 && nb_rxq > nb_txq) 420561a3b0e5SAndrew Rybchenko fprintf(stderr, 420661a3b0e5SAndrew Rybchenko "Warning: nb_rxq=%d enables RSS configuration, but nb_txq=%d will prevent to fully test it.\n", 4207af75078fSIntel nb_rxq, nb_txq); 4208af75078fSIntel 4209af75078fSIntel init_config(); 4210fb73e096SJeff Guo 4211fb73e096SJeff Guo if (hot_plug) { 42122049c511SJeff Guo ret = rte_dev_hotplug_handle_enable(); 4213fb73e096SJeff Guo if (ret) { 42142049c511SJeff Guo RTE_LOG(ERR, EAL, 42152049c511SJeff Guo "fail to enable hotplug handling."); 4216fb73e096SJeff Guo return -1; 4217fb73e096SJeff Guo } 4218fb73e096SJeff Guo 42192049c511SJeff Guo ret = rte_dev_event_monitor_start(); 42202049c511SJeff Guo if (ret) { 42212049c511SJeff Guo RTE_LOG(ERR, EAL, 42222049c511SJeff Guo "fail to start device event monitoring."); 42232049c511SJeff Guo return -1; 42242049c511SJeff Guo } 42252049c511SJeff Guo 42262049c511SJeff Guo ret = rte_dev_event_callback_register(NULL, 4227cc1bf307SJeff Guo dev_event_callback, NULL); 42282049c511SJeff Guo if (ret) { 42292049c511SJeff Guo RTE_LOG(ERR, EAL, 42302049c511SJeff Guo "fail to register device event callback\n"); 42312049c511SJeff Guo return -1; 42322049c511SJeff Guo } 4233fb73e096SJeff Guo } 4234fb73e096SJeff Guo 42356937d210SStephen Hemminger if (!no_device_start && start_port(RTE_PORT_ALL) != 0) 4236148f963fSBruce Richardson rte_exit(EXIT_FAILURE, "Start ports failed\n"); 4237af75078fSIntel 4238ce8d5614SIntel /* set all ports to promiscuous mode by default */ 423934fc1051SIvan Ilchenko RTE_ETH_FOREACH_DEV(port_id) { 424034fc1051SIvan Ilchenko ret = rte_eth_promiscuous_enable(port_id); 424134fc1051SIvan Ilchenko if (ret != 0) 424261a3b0e5SAndrew Rybchenko fprintf(stderr, 424361a3b0e5SAndrew Rybchenko "Error during enabling promiscuous mode for port %u: %s - ignore\n", 424434fc1051SIvan Ilchenko port_id, rte_strerror(-ret)); 424534fc1051SIvan Ilchenko } 4246af75078fSIntel 4247*bb9be9a4SDavid Marchand #ifdef RTE_LIB_METRICS 42487e4441c8SRemy Horton /* Init metrics library */ 42497e4441c8SRemy Horton rte_metrics_init(rte_socket_id()); 4250*bb9be9a4SDavid Marchand #endif 42517e4441c8SRemy Horton 4252a8d0d473SBruce Richardson #ifdef RTE_LIB_LATENCYSTATS 425362d3216dSReshma Pattan if (latencystats_enabled != 0) { 425462d3216dSReshma Pattan int ret = rte_latencystats_init(1, NULL); 425562d3216dSReshma Pattan if (ret) 425661a3b0e5SAndrew Rybchenko fprintf(stderr, 425761a3b0e5SAndrew Rybchenko "Warning: latencystats init() returned error %d\n", 425861a3b0e5SAndrew Rybchenko ret); 425961a3b0e5SAndrew Rybchenko fprintf(stderr, "Latencystats running on lcore %d\n", 426062d3216dSReshma Pattan latencystats_lcore_id); 426162d3216dSReshma Pattan } 426262d3216dSReshma Pattan #endif 426362d3216dSReshma Pattan 42647e4441c8SRemy Horton /* Setup bitrate stats */ 4265a8d0d473SBruce Richardson #ifdef RTE_LIB_BITRATESTATS 4266e25e6c70SRemy Horton if (bitrate_enabled != 0) { 42677e4441c8SRemy Horton bitrate_data = rte_stats_bitrate_create(); 42687e4441c8SRemy Horton if (bitrate_data == NULL) 4269e25e6c70SRemy Horton rte_exit(EXIT_FAILURE, 4270e25e6c70SRemy Horton "Could not allocate bitrate data.\n"); 42717e4441c8SRemy Horton rte_stats_bitrate_reg(bitrate_data); 4272e25e6c70SRemy Horton } 42737e4441c8SRemy Horton #endif 4274a8d0d473SBruce Richardson #ifdef RTE_LIB_CMDLINE 427581ef862bSAllain Legacy if (strlen(cmdline_filename) != 0) 427681ef862bSAllain Legacy cmdline_read_from_file(cmdline_filename); 427781ef862bSAllain Legacy 4278ca7feb22SCyril Chemparathy if (interactive == 1) { 4279ca7feb22SCyril Chemparathy if (auto_start) { 4280ca7feb22SCyril Chemparathy printf("Start automatic packet forwarding\n"); 4281ca7feb22SCyril Chemparathy start_packet_forwarding(0); 4282ca7feb22SCyril Chemparathy } 4283af75078fSIntel prompt(); 42840de738cfSJiayu Hu pmd_test_exit(); 4285ca7feb22SCyril Chemparathy } else 42860d56cb81SThomas Monjalon #endif 42870d56cb81SThomas Monjalon { 4288af75078fSIntel char c; 4289af75078fSIntel int rc; 4290af75078fSIntel 4291d9a191a0SPhil Yang f_quit = 0; 4292d9a191a0SPhil Yang 4293af75078fSIntel printf("No commandline core given, start packet forwarding\n"); 429499cabef0SPablo de Lara start_packet_forwarding(tx_first); 4295cfea1f30SPablo de Lara if (stats_period != 0) { 4296cfea1f30SPablo de Lara uint64_t prev_time = 0, cur_time, diff_time = 0; 4297cfea1f30SPablo de Lara uint64_t timer_period; 4298cfea1f30SPablo de Lara 4299cfea1f30SPablo de Lara /* Convert to number of cycles */ 4300cfea1f30SPablo de Lara timer_period = stats_period * rte_get_timer_hz(); 4301cfea1f30SPablo de Lara 4302d9a191a0SPhil Yang while (f_quit == 0) { 4303cfea1f30SPablo de Lara cur_time = rte_get_timer_cycles(); 4304cfea1f30SPablo de Lara diff_time += cur_time - prev_time; 4305cfea1f30SPablo de Lara 4306cfea1f30SPablo de Lara if (diff_time >= timer_period) { 4307cfea1f30SPablo de Lara print_stats(); 4308cfea1f30SPablo de Lara /* Reset the timer */ 4309cfea1f30SPablo de Lara diff_time = 0; 4310cfea1f30SPablo de Lara } 4311cfea1f30SPablo de Lara /* Sleep to avoid unnecessary checks */ 4312cfea1f30SPablo de Lara prev_time = cur_time; 4313761f7ae1SJie Zhou rte_delay_us_sleep(US_PER_S); 4314cfea1f30SPablo de Lara } 4315cfea1f30SPablo de Lara } 4316cfea1f30SPablo de Lara 4317af75078fSIntel printf("Press enter to exit\n"); 4318af75078fSIntel rc = read(0, &c, 1); 4319d3a274ceSZhihong Wang pmd_test_exit(); 4320af75078fSIntel if (rc < 0) 4321af75078fSIntel return 1; 4322af75078fSIntel } 4323af75078fSIntel 43245e516c89SStephen Hemminger ret = rte_eal_cleanup(); 43255e516c89SStephen Hemminger if (ret != 0) 43265e516c89SStephen Hemminger rte_exit(EXIT_FAILURE, 43275e516c89SStephen Hemminger "EAL cleanup failed: %s\n", strerror(-ret)); 43285e516c89SStephen Hemminger 43295e516c89SStephen Hemminger return EXIT_SUCCESS; 4330af75078fSIntel } 4331