1174a1631SBruce Richardson /* SPDX-License-Identifier: BSD-3-Clause 2174a1631SBruce Richardson * Copyright(c) 2010-2017 Intel Corporation 3af75078fSIntel */ 4af75078fSIntel 5af75078fSIntel #include <stdarg.h> 6af75078fSIntel #include <stdio.h> 7af75078fSIntel #include <stdlib.h> 8af75078fSIntel #include <signal.h> 9af75078fSIntel #include <string.h> 10af75078fSIntel #include <time.h> 11af75078fSIntel #include <fcntl.h> 12761f7ae1SJie Zhou #ifndef RTE_EXEC_ENV_WINDOWS 131c036b16SEelco Chaudron #include <sys/mman.h> 14761f7ae1SJie Zhou #endif 15af75078fSIntel #include <sys/types.h> 16af75078fSIntel #include <errno.h> 17fb73e096SJeff Guo #include <stdbool.h> 18af75078fSIntel 19af75078fSIntel #include <sys/queue.h> 20af75078fSIntel #include <sys/stat.h> 21af75078fSIntel 22af75078fSIntel #include <stdint.h> 23af75078fSIntel #include <unistd.h> 24af75078fSIntel #include <inttypes.h> 25af75078fSIntel 26af75078fSIntel #include <rte_common.h> 27d1eb542eSOlivier Matz #include <rte_errno.h> 28af75078fSIntel #include <rte_byteorder.h> 29af75078fSIntel #include <rte_log.h> 30af75078fSIntel #include <rte_debug.h> 31af75078fSIntel #include <rte_cycles.h> 32af75078fSIntel #include <rte_memory.h> 33af75078fSIntel #include <rte_memcpy.h> 34af75078fSIntel #include <rte_launch.h> 35af75078fSIntel #include <rte_eal.h> 36284c908cSGaetan Rivet #include <rte_alarm.h> 37af75078fSIntel #include <rte_per_lcore.h> 38af75078fSIntel #include <rte_lcore.h> 39af75078fSIntel #include <rte_atomic.h> 40af75078fSIntel #include <rte_branch_prediction.h> 41af75078fSIntel #include <rte_mempool.h> 42af75078fSIntel #include <rte_malloc.h> 43af75078fSIntel #include <rte_mbuf.h> 440e798567SPavan Nikhilesh #include <rte_mbuf_pool_ops.h> 45af75078fSIntel #include <rte_interrupts.h> 46af75078fSIntel #include <rte_pci.h> 47af75078fSIntel #include <rte_ether.h> 48af75078fSIntel #include <rte_ethdev.h> 49edab33b1STetsuya Mukawa #include <rte_dev.h> 50af75078fSIntel #include <rte_string_fns.h> 51a8d0d473SBruce Richardson #ifdef RTE_NET_IXGBE 52e261265eSRadu Nicolau #include <rte_pmd_ixgbe.h> 53e261265eSRadu Nicolau #endif 54a8d0d473SBruce Richardson #ifdef RTE_LIB_PDUMP 55102b7329SReshma Pattan #include <rte_pdump.h> 56102b7329SReshma Pattan #endif 57938a184aSAdrien Mazarguil #include <rte_flow.h> 587e4441c8SRemy Horton #include <rte_metrics.h> 59a8d0d473SBruce Richardson #ifdef RTE_LIB_BITRATESTATS 607e4441c8SRemy Horton #include <rte_bitrate.h> 617e4441c8SRemy Horton #endif 62a8d0d473SBruce Richardson #ifdef RTE_LIB_LATENCYSTATS 6362d3216dSReshma Pattan #include <rte_latencystats.h> 6462d3216dSReshma Pattan #endif 65761f7ae1SJie Zhou #ifdef RTE_EXEC_ENV_WINDOWS 66761f7ae1SJie Zhou #include <process.h> 67761f7ae1SJie Zhou #endif 68af75078fSIntel 69af75078fSIntel #include "testpmd.h" 70af75078fSIntel 71c7f5dba7SAnatoly Burakov #ifndef MAP_HUGETLB 72c7f5dba7SAnatoly Burakov /* FreeBSD may not have MAP_HUGETLB (in fact, it probably doesn't) */ 73c7f5dba7SAnatoly Burakov #define HUGE_FLAG (0x40000) 74c7f5dba7SAnatoly Burakov #else 75c7f5dba7SAnatoly Burakov #define HUGE_FLAG MAP_HUGETLB 76c7f5dba7SAnatoly Burakov #endif 77c7f5dba7SAnatoly Burakov 78c7f5dba7SAnatoly Burakov #ifndef MAP_HUGE_SHIFT 79c7f5dba7SAnatoly Burakov /* older kernels (or FreeBSD) will not have this define */ 80c7f5dba7SAnatoly Burakov #define HUGE_SHIFT (26) 81c7f5dba7SAnatoly Burakov #else 82c7f5dba7SAnatoly Burakov #define HUGE_SHIFT MAP_HUGE_SHIFT 83c7f5dba7SAnatoly Burakov #endif 84c7f5dba7SAnatoly Burakov 85c7f5dba7SAnatoly Burakov #define EXTMEM_HEAP_NAME "extmem" 8672512e18SViacheslav Ovsiienko #define EXTBUF_ZONE_SIZE RTE_PGSIZE_2M 87c7f5dba7SAnatoly Burakov 88af75078fSIntel uint16_t verbose_level = 0; /**< Silent by default. */ 89285fd101SOlivier Matz int testpmd_logtype; /**< Log type for testpmd logs */ 90af75078fSIntel 91cb056611SStephen Hemminger /* use main core for command line ? */ 92af75078fSIntel uint8_t interactive = 0; 93ca7feb22SCyril Chemparathy uint8_t auto_start = 0; 9499cabef0SPablo de Lara uint8_t tx_first; 9581ef862bSAllain Legacy char cmdline_filename[PATH_MAX] = {0}; 96af75078fSIntel 97af75078fSIntel /* 98af75078fSIntel * NUMA support configuration. 99af75078fSIntel * When set, the NUMA support attempts to dispatch the allocation of the 100af75078fSIntel * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the 101af75078fSIntel * probed ports among the CPU sockets 0 and 1. 102af75078fSIntel * Otherwise, all memory is allocated from CPU socket 0. 103af75078fSIntel */ 104999b2ee0SBruce Richardson uint8_t numa_support = 1; /**< numa enabled by default */ 105af75078fSIntel 106af75078fSIntel /* 107b6ea6408SIntel * In UMA mode,all memory is allocated from socket 0 if --socket-num is 108b6ea6408SIntel * not configured. 109b6ea6408SIntel */ 110b6ea6408SIntel uint8_t socket_num = UMA_NO_CONFIG; 111b6ea6408SIntel 112b6ea6408SIntel /* 113c7f5dba7SAnatoly Burakov * Select mempool allocation type: 114c7f5dba7SAnatoly Burakov * - native: use regular DPDK memory 115c7f5dba7SAnatoly Burakov * - anon: use regular DPDK memory to create mempool, but populate using 116c7f5dba7SAnatoly Burakov * anonymous memory (may not be IOVA-contiguous) 117c7f5dba7SAnatoly Burakov * - xmem: use externally allocated hugepage memory 118148f963fSBruce Richardson */ 119c7f5dba7SAnatoly Burakov uint8_t mp_alloc_type = MP_ALLOC_NATIVE; 120148f963fSBruce Richardson 121148f963fSBruce Richardson /* 12263531389SGeorgios Katsikas * Store specified sockets on which memory pool to be used by ports 12363531389SGeorgios Katsikas * is allocated. 12463531389SGeorgios Katsikas */ 12563531389SGeorgios Katsikas uint8_t port_numa[RTE_MAX_ETHPORTS]; 12663531389SGeorgios Katsikas 12763531389SGeorgios Katsikas /* 12863531389SGeorgios Katsikas * Store specified sockets on which RX ring to be used by ports 12963531389SGeorgios Katsikas * is allocated. 13063531389SGeorgios Katsikas */ 13163531389SGeorgios Katsikas uint8_t rxring_numa[RTE_MAX_ETHPORTS]; 13263531389SGeorgios Katsikas 13363531389SGeorgios Katsikas /* 13463531389SGeorgios Katsikas * Store specified sockets on which TX ring to be used by ports 13563531389SGeorgios Katsikas * is allocated. 13663531389SGeorgios Katsikas */ 13763531389SGeorgios Katsikas uint8_t txring_numa[RTE_MAX_ETHPORTS]; 13863531389SGeorgios Katsikas 13963531389SGeorgios Katsikas /* 140af75078fSIntel * Record the Ethernet address of peer target ports to which packets are 141af75078fSIntel * forwarded. 142547d946cSNirmoy Das * Must be instantiated with the ethernet addresses of peer traffic generator 143af75078fSIntel * ports. 144af75078fSIntel */ 1456d13ea8eSOlivier Matz struct rte_ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS]; 146af75078fSIntel portid_t nb_peer_eth_addrs = 0; 147af75078fSIntel 148af75078fSIntel /* 149af75078fSIntel * Probed Target Environment. 150af75078fSIntel */ 151af75078fSIntel struct rte_port *ports; /**< For all probed ethernet ports. */ 152af75078fSIntel portid_t nb_ports; /**< Number of probed ethernet ports. */ 153af75078fSIntel struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */ 154af75078fSIntel lcoreid_t nb_lcores; /**< Number of probed logical cores. */ 155af75078fSIntel 1564918a357SXiaoyun Li portid_t ports_ids[RTE_MAX_ETHPORTS]; /**< Store all port ids. */ 1574918a357SXiaoyun Li 158af75078fSIntel /* 159af75078fSIntel * Test Forwarding Configuration. 160af75078fSIntel * nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores 161af75078fSIntel * nb_fwd_ports <= nb_cfg_ports <= nb_ports 162af75078fSIntel */ 163af75078fSIntel lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */ 164af75078fSIntel lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */ 165af75078fSIntel portid_t nb_cfg_ports; /**< Number of configured ports. */ 166af75078fSIntel portid_t nb_fwd_ports; /**< Number of forwarding ports. */ 167af75078fSIntel 168af75078fSIntel unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */ 169af75078fSIntel portid_t fwd_ports_ids[RTE_MAX_ETHPORTS]; /**< Port ids configuration. */ 170af75078fSIntel 171af75078fSIntel struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */ 172af75078fSIntel streamid_t nb_fwd_streams; /**< Is equal to (nb_ports * nb_rxq). */ 173af75078fSIntel 174af75078fSIntel /* 175af75078fSIntel * Forwarding engines. 176af75078fSIntel */ 177af75078fSIntel struct fwd_engine * fwd_engines[] = { 178af75078fSIntel &io_fwd_engine, 179af75078fSIntel &mac_fwd_engine, 180d47388f1SCyril Chemparathy &mac_swap_engine, 181e9e23a61SCyril Chemparathy &flow_gen_engine, 182af75078fSIntel &rx_only_engine, 183af75078fSIntel &tx_only_engine, 184af75078fSIntel &csum_fwd_engine, 185168dfa61SIvan Boule &icmp_echo_engine, 1863c156061SJens Freimann &noisy_vnf_engine, 1872564abdaSShiri Kuzin &five_tuple_swap_fwd_engine, 188af75078fSIntel #ifdef RTE_LIBRTE_IEEE1588 189af75078fSIntel &ieee1588_fwd_engine, 190af75078fSIntel #endif 191af75078fSIntel NULL, 192af75078fSIntel }; 193af75078fSIntel 19426cbb419SViacheslav Ovsiienko struct rte_mempool *mempools[RTE_MAX_NUMA_NODES * MAX_SEGS_BUFFER_SPLIT]; 19559fcf854SShahaf Shuler uint16_t mempool_flags; 196401b744dSShahaf Shuler 197af75078fSIntel struct fwd_config cur_fwd_config; 198af75078fSIntel struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */ 199bf56fce1SZhihong Wang uint32_t retry_enabled; 200bf56fce1SZhihong Wang uint32_t burst_tx_delay_time = BURST_TX_WAIT_US; 201bf56fce1SZhihong Wang uint32_t burst_tx_retry_num = BURST_TX_RETRIES; 202af75078fSIntel 20326cbb419SViacheslav Ovsiienko uint32_t mbuf_data_size_n = 1; /* Number of specified mbuf sizes. */ 20426cbb419SViacheslav Ovsiienko uint16_t mbuf_data_size[MAX_SEGS_BUFFER_SPLIT] = { 20526cbb419SViacheslav Ovsiienko DEFAULT_MBUF_DATA_SIZE 20626cbb419SViacheslav Ovsiienko }; /**< Mbuf data space size. */ 207c8798818SIntel uint32_t param_total_num_mbufs = 0; /**< number of mbufs in all pools - if 208c8798818SIntel * specified on command-line. */ 209cfea1f30SPablo de Lara uint16_t stats_period; /**< Period to show statistics (disabled by default) */ 210d9a191a0SPhil Yang 21163b72657SIvan Ilchenko /** Extended statistics to show. */ 21263b72657SIvan Ilchenko struct rte_eth_xstat_name *xstats_display; 21363b72657SIvan Ilchenko 21463b72657SIvan Ilchenko unsigned int xstats_display_num; /**< Size of extended statistics to show */ 21563b72657SIvan Ilchenko 216d9a191a0SPhil Yang /* 217d9a191a0SPhil Yang * In container, it cannot terminate the process which running with 'stats-period' 218d9a191a0SPhil Yang * option. Set flag to exit stats period loop after received SIGINT/SIGTERM. 219d9a191a0SPhil Yang */ 220d9a191a0SPhil Yang uint8_t f_quit; 221d9a191a0SPhil Yang 222af75078fSIntel /* 223*1bb4a528SFerruh Yigit * Max Rx frame size, set by '--max-pkt-len' parameter. 224*1bb4a528SFerruh Yigit */ 225*1bb4a528SFerruh Yigit uint32_t max_rx_pkt_len; 226*1bb4a528SFerruh Yigit 227*1bb4a528SFerruh Yigit /* 2280f2096d7SViacheslav Ovsiienko * Configuration of packet segments used to scatter received packets 2290f2096d7SViacheslav Ovsiienko * if some of split features is configured. 2300f2096d7SViacheslav Ovsiienko */ 2310f2096d7SViacheslav Ovsiienko uint16_t rx_pkt_seg_lengths[MAX_SEGS_BUFFER_SPLIT]; 2320f2096d7SViacheslav Ovsiienko uint8_t rx_pkt_nb_segs; /**< Number of segments to split */ 23391c78e09SViacheslav Ovsiienko uint16_t rx_pkt_seg_offsets[MAX_SEGS_BUFFER_SPLIT]; 23491c78e09SViacheslav Ovsiienko uint8_t rx_pkt_nb_offs; /**< Number of specified offsets */ 2350f2096d7SViacheslav Ovsiienko 2360f2096d7SViacheslav Ovsiienko /* 237af75078fSIntel * Configuration of packet segments used by the "txonly" processing engine. 238af75078fSIntel */ 239af75078fSIntel uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */ 240af75078fSIntel uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = { 241af75078fSIntel TXONLY_DEF_PACKET_LEN, 242af75078fSIntel }; 243af75078fSIntel uint8_t tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */ 244af75078fSIntel 24579bec05bSKonstantin Ananyev enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF; 24679bec05bSKonstantin Ananyev /**< Split policy for packets to TX. */ 24779bec05bSKonstantin Ananyev 24882010ef5SYongseok Koh uint8_t txonly_multi_flow; 24982010ef5SYongseok Koh /**< Whether multiple flows are generated in TXONLY mode. */ 25082010ef5SYongseok Koh 2514940344dSViacheslav Ovsiienko uint32_t tx_pkt_times_inter; 2524940344dSViacheslav Ovsiienko /**< Timings for send scheduling in TXONLY mode, time between bursts. */ 2534940344dSViacheslav Ovsiienko 2544940344dSViacheslav Ovsiienko uint32_t tx_pkt_times_intra; 2554940344dSViacheslav Ovsiienko /**< Timings for send scheduling in TXONLY mode, time between packets. */ 2564940344dSViacheslav Ovsiienko 257af75078fSIntel uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */ 2586c02043eSIgor Russkikh uint16_t nb_pkt_flowgen_clones; /**< Number of Tx packet clones to send in flowgen mode. */ 259861e7684SZhihong Wang int nb_flows_flowgen = 1024; /**< Number of flows in flowgen mode. */ 260e9378bbcSCunming Liang uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */ 261af75078fSIntel 262900550deSIntel /* current configuration is in DCB or not,0 means it is not in DCB mode */ 263900550deSIntel uint8_t dcb_config = 0; 264900550deSIntel 265af75078fSIntel /* 266af75078fSIntel * Configurable number of RX/TX queues. 267af75078fSIntel */ 2681c69df45SOri Kam queueid_t nb_hairpinq; /**< Number of hairpin queues per port. */ 269af75078fSIntel queueid_t nb_rxq = 1; /**< Number of RX queues per port. */ 270af75078fSIntel queueid_t nb_txq = 1; /**< Number of TX queues per port. */ 271af75078fSIntel 272af75078fSIntel /* 273af75078fSIntel * Configurable number of RX/TX ring descriptors. 2748599ed31SRemy Horton * Defaults are supplied by drivers via ethdev. 275af75078fSIntel */ 2768599ed31SRemy Horton #define RTE_TEST_RX_DESC_DEFAULT 0 2778599ed31SRemy Horton #define RTE_TEST_TX_DESC_DEFAULT 0 278af75078fSIntel uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */ 279af75078fSIntel uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */ 280af75078fSIntel 281f2c5125aSPablo de Lara #define RTE_PMD_PARAM_UNSET -1 282af75078fSIntel /* 283af75078fSIntel * Configurable values of RX and TX ring threshold registers. 284af75078fSIntel */ 285af75078fSIntel 286f2c5125aSPablo de Lara int8_t rx_pthresh = RTE_PMD_PARAM_UNSET; 287f2c5125aSPablo de Lara int8_t rx_hthresh = RTE_PMD_PARAM_UNSET; 288f2c5125aSPablo de Lara int8_t rx_wthresh = RTE_PMD_PARAM_UNSET; 289af75078fSIntel 290f2c5125aSPablo de Lara int8_t tx_pthresh = RTE_PMD_PARAM_UNSET; 291f2c5125aSPablo de Lara int8_t tx_hthresh = RTE_PMD_PARAM_UNSET; 292f2c5125aSPablo de Lara int8_t tx_wthresh = RTE_PMD_PARAM_UNSET; 293af75078fSIntel 294af75078fSIntel /* 295af75078fSIntel * Configurable value of RX free threshold. 296af75078fSIntel */ 297f2c5125aSPablo de Lara int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET; 298af75078fSIntel 299af75078fSIntel /* 300ce8d5614SIntel * Configurable value of RX drop enable. 301ce8d5614SIntel */ 302f2c5125aSPablo de Lara int8_t rx_drop_en = RTE_PMD_PARAM_UNSET; 303ce8d5614SIntel 304ce8d5614SIntel /* 305af75078fSIntel * Configurable value of TX free threshold. 306af75078fSIntel */ 307f2c5125aSPablo de Lara int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET; 308af75078fSIntel 309af75078fSIntel /* 310af75078fSIntel * Configurable value of TX RS bit threshold. 311af75078fSIntel */ 312f2c5125aSPablo de Lara int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET; 313af75078fSIntel 314af75078fSIntel /* 3153c156061SJens Freimann * Configurable value of buffered packets before sending. 3163c156061SJens Freimann */ 3173c156061SJens Freimann uint16_t noisy_tx_sw_bufsz; 3183c156061SJens Freimann 3193c156061SJens Freimann /* 3203c156061SJens Freimann * Configurable value of packet buffer timeout. 3213c156061SJens Freimann */ 3223c156061SJens Freimann uint16_t noisy_tx_sw_buf_flush_time; 3233c156061SJens Freimann 3243c156061SJens Freimann /* 3253c156061SJens Freimann * Configurable value for size of VNF internal memory area 3263c156061SJens Freimann * used for simulating noisy neighbour behaviour 3273c156061SJens Freimann */ 3283c156061SJens Freimann uint64_t noisy_lkup_mem_sz; 3293c156061SJens Freimann 3303c156061SJens Freimann /* 3313c156061SJens Freimann * Configurable value of number of random writes done in 3323c156061SJens Freimann * VNF simulation memory area. 3333c156061SJens Freimann */ 3343c156061SJens Freimann uint64_t noisy_lkup_num_writes; 3353c156061SJens Freimann 3363c156061SJens Freimann /* 3373c156061SJens Freimann * Configurable value of number of random reads done in 3383c156061SJens Freimann * VNF simulation memory area. 3393c156061SJens Freimann */ 3403c156061SJens Freimann uint64_t noisy_lkup_num_reads; 3413c156061SJens Freimann 3423c156061SJens Freimann /* 3433c156061SJens Freimann * Configurable value of number of random reads/writes done in 3443c156061SJens Freimann * VNF simulation memory area. 3453c156061SJens Freimann */ 3463c156061SJens Freimann uint64_t noisy_lkup_num_reads_writes; 3473c156061SJens Freimann 3483c156061SJens Freimann /* 349af75078fSIntel * Receive Side Scaling (RSS) configuration. 350af75078fSIntel */ 3518a387fa8SHelin Zhang uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */ 352af75078fSIntel 353af75078fSIntel /* 354af75078fSIntel * Port topology configuration 355af75078fSIntel */ 356af75078fSIntel uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */ 357af75078fSIntel 3587741e4cfSIntel /* 3597741e4cfSIntel * Avoids to flush all the RX streams before starts forwarding. 3607741e4cfSIntel */ 3617741e4cfSIntel uint8_t no_flush_rx = 0; /* flush by default */ 3627741e4cfSIntel 363af75078fSIntel /* 3647ee3e944SVasily Philipov * Flow API isolated mode. 3657ee3e944SVasily Philipov */ 3667ee3e944SVasily Philipov uint8_t flow_isolate_all; 3677ee3e944SVasily Philipov 3687ee3e944SVasily Philipov /* 369bc202406SDavid Marchand * Avoids to check link status when starting/stopping a port. 370bc202406SDavid Marchand */ 371bc202406SDavid Marchand uint8_t no_link_check = 0; /* check by default */ 372bc202406SDavid Marchand 373bc202406SDavid Marchand /* 3746937d210SStephen Hemminger * Don't automatically start all ports in interactive mode. 3756937d210SStephen Hemminger */ 3766937d210SStephen Hemminger uint8_t no_device_start = 0; 3776937d210SStephen Hemminger 3786937d210SStephen Hemminger /* 3798ea656f8SGaetan Rivet * Enable link status change notification 3808ea656f8SGaetan Rivet */ 3818ea656f8SGaetan Rivet uint8_t lsc_interrupt = 1; /* enabled by default */ 3828ea656f8SGaetan Rivet 3838ea656f8SGaetan Rivet /* 384284c908cSGaetan Rivet * Enable device removal notification. 385284c908cSGaetan Rivet */ 386284c908cSGaetan Rivet uint8_t rmv_interrupt = 1; /* enabled by default */ 387284c908cSGaetan Rivet 388fb73e096SJeff Guo uint8_t hot_plug = 0; /**< hotplug disabled by default. */ 389fb73e096SJeff Guo 3904f1ed78eSThomas Monjalon /* After attach, port setup is called on event or by iterator */ 3914f1ed78eSThomas Monjalon bool setup_on_probe_event = true; 3924f1ed78eSThomas Monjalon 393b0a9354aSPavan Nikhilesh /* Clear ptypes on port initialization. */ 394b0a9354aSPavan Nikhilesh uint8_t clear_ptypes = true; 395b0a9354aSPavan Nikhilesh 39601817b10SBing Zhao /* Hairpin ports configuration mode. */ 39701817b10SBing Zhao uint16_t hairpin_mode; 39801817b10SBing Zhao 39997b5d8b5SThomas Monjalon /* Pretty printing of ethdev events */ 40097b5d8b5SThomas Monjalon static const char * const eth_event_desc[] = { 40197b5d8b5SThomas Monjalon [RTE_ETH_EVENT_UNKNOWN] = "unknown", 40297b5d8b5SThomas Monjalon [RTE_ETH_EVENT_INTR_LSC] = "link state change", 40397b5d8b5SThomas Monjalon [RTE_ETH_EVENT_QUEUE_STATE] = "queue state", 40497b5d8b5SThomas Monjalon [RTE_ETH_EVENT_INTR_RESET] = "reset", 40597b5d8b5SThomas Monjalon [RTE_ETH_EVENT_VF_MBOX] = "VF mbox", 40697b5d8b5SThomas Monjalon [RTE_ETH_EVENT_IPSEC] = "IPsec", 40797b5d8b5SThomas Monjalon [RTE_ETH_EVENT_MACSEC] = "MACsec", 40897b5d8b5SThomas Monjalon [RTE_ETH_EVENT_INTR_RMV] = "device removal", 40997b5d8b5SThomas Monjalon [RTE_ETH_EVENT_NEW] = "device probed", 41097b5d8b5SThomas Monjalon [RTE_ETH_EVENT_DESTROY] = "device released", 4110e459ffaSDong Zhou [RTE_ETH_EVENT_FLOW_AGED] = "flow aged", 41297b5d8b5SThomas Monjalon [RTE_ETH_EVENT_MAX] = NULL, 41397b5d8b5SThomas Monjalon }; 41497b5d8b5SThomas Monjalon 415284c908cSGaetan Rivet /* 4163af72783SGaetan Rivet * Display or mask ether events 4173af72783SGaetan Rivet * Default to all events except VF_MBOX 4183af72783SGaetan Rivet */ 4193af72783SGaetan Rivet uint32_t event_print_mask = (UINT32_C(1) << RTE_ETH_EVENT_UNKNOWN) | 4203af72783SGaetan Rivet (UINT32_C(1) << RTE_ETH_EVENT_INTR_LSC) | 4213af72783SGaetan Rivet (UINT32_C(1) << RTE_ETH_EVENT_QUEUE_STATE) | 4223af72783SGaetan Rivet (UINT32_C(1) << RTE_ETH_EVENT_INTR_RESET) | 423badb87c1SAnoob Joseph (UINT32_C(1) << RTE_ETH_EVENT_IPSEC) | 4243af72783SGaetan Rivet (UINT32_C(1) << RTE_ETH_EVENT_MACSEC) | 4250e459ffaSDong Zhou (UINT32_C(1) << RTE_ETH_EVENT_INTR_RMV) | 4260e459ffaSDong Zhou (UINT32_C(1) << RTE_ETH_EVENT_FLOW_AGED); 427e505d84cSAnatoly Burakov /* 428e505d84cSAnatoly Burakov * Decide if all memory are locked for performance. 429e505d84cSAnatoly Burakov */ 430e505d84cSAnatoly Burakov int do_mlockall = 0; 4313af72783SGaetan Rivet 4323af72783SGaetan Rivet /* 4337b7e5ba7SIntel * NIC bypass mode configuration options. 4347b7e5ba7SIntel */ 4357b7e5ba7SIntel 436a8d0d473SBruce Richardson #if defined RTE_NET_IXGBE && defined RTE_LIBRTE_IXGBE_BYPASS 4377b7e5ba7SIntel /* The NIC bypass watchdog timeout. */ 438e261265eSRadu Nicolau uint32_t bypass_timeout = RTE_PMD_IXGBE_BYPASS_TMT_OFF; 4397b7e5ba7SIntel #endif 4407b7e5ba7SIntel 441e261265eSRadu Nicolau 442a8d0d473SBruce Richardson #ifdef RTE_LIB_LATENCYSTATS 44362d3216dSReshma Pattan 44462d3216dSReshma Pattan /* 44562d3216dSReshma Pattan * Set when latency stats is enabled in the commandline 44662d3216dSReshma Pattan */ 44762d3216dSReshma Pattan uint8_t latencystats_enabled; 44862d3216dSReshma Pattan 44962d3216dSReshma Pattan /* 45062d3216dSReshma Pattan * Lcore ID to serive latency statistics. 45162d3216dSReshma Pattan */ 45262d3216dSReshma Pattan lcoreid_t latencystats_lcore_id = -1; 45362d3216dSReshma Pattan 45462d3216dSReshma Pattan #endif 45562d3216dSReshma Pattan 4567b7e5ba7SIntel /* 457af75078fSIntel * Ethernet device configuration. 458af75078fSIntel */ 459*1bb4a528SFerruh Yigit struct rte_eth_rxmode rx_mode; 460af75078fSIntel 46107e5f7bdSShahaf Shuler struct rte_eth_txmode tx_mode = { 46207e5f7bdSShahaf Shuler .offloads = DEV_TX_OFFLOAD_MBUF_FAST_FREE, 46307e5f7bdSShahaf Shuler }; 464fd8c20aaSShahaf Shuler 465af75078fSIntel struct rte_fdir_conf fdir_conf = { 466af75078fSIntel .mode = RTE_FDIR_MODE_NONE, 467af75078fSIntel .pballoc = RTE_FDIR_PBALLOC_64K, 468af75078fSIntel .status = RTE_FDIR_REPORT_STATUS, 469d9d5e6f2SJingjing Wu .mask = { 47026f579aaSWei Zhao .vlan_tci_mask = 0xFFEF, 471d9d5e6f2SJingjing Wu .ipv4_mask = { 472d9d5e6f2SJingjing Wu .src_ip = 0xFFFFFFFF, 473d9d5e6f2SJingjing Wu .dst_ip = 0xFFFFFFFF, 474d9d5e6f2SJingjing Wu }, 475d9d5e6f2SJingjing Wu .ipv6_mask = { 476d9d5e6f2SJingjing Wu .src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF}, 477d9d5e6f2SJingjing Wu .dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF}, 478d9d5e6f2SJingjing Wu }, 479d9d5e6f2SJingjing Wu .src_port_mask = 0xFFFF, 480d9d5e6f2SJingjing Wu .dst_port_mask = 0xFFFF, 48147b3ac6bSWenzhuo Lu .mac_addr_byte_mask = 0xFF, 48247b3ac6bSWenzhuo Lu .tunnel_type_mask = 1, 48347b3ac6bSWenzhuo Lu .tunnel_id_mask = 0xFFFFFFFF, 484d9d5e6f2SJingjing Wu }, 485af75078fSIntel .drop_queue = 127, 486af75078fSIntel }; 487af75078fSIntel 4882950a769SDeclan Doherty volatile int test_done = 1; /* stop packet forwarding when set to 1. */ 489af75078fSIntel 490a4fd5eeeSElza Mathew /* 491a4fd5eeeSElza Mathew * Display zero values by default for xstats 492a4fd5eeeSElza Mathew */ 493a4fd5eeeSElza Mathew uint8_t xstats_hide_zero; 494a4fd5eeeSElza Mathew 495bc700b67SDharmik Thakkar /* 496bc700b67SDharmik Thakkar * Measure of CPU cycles disabled by default 497bc700b67SDharmik Thakkar */ 498bc700b67SDharmik Thakkar uint8_t record_core_cycles; 499bc700b67SDharmik Thakkar 5000e4b1963SDharmik Thakkar /* 5010e4b1963SDharmik Thakkar * Display of RX and TX bursts disabled by default 5020e4b1963SDharmik Thakkar */ 5030e4b1963SDharmik Thakkar uint8_t record_burst_stats; 5040e4b1963SDharmik Thakkar 505c9cafcc8SShahaf Shuler unsigned int num_sockets = 0; 506c9cafcc8SShahaf Shuler unsigned int socket_ids[RTE_MAX_NUMA_NODES]; 5077acf894dSStephen Hurd 508a8d0d473SBruce Richardson #ifdef RTE_LIB_BITRATESTATS 5097e4441c8SRemy Horton /* Bitrate statistics */ 5107e4441c8SRemy Horton struct rte_stats_bitrates *bitrate_data; 511e25e6c70SRemy Horton lcoreid_t bitrate_lcore_id; 512e25e6c70SRemy Horton uint8_t bitrate_enabled; 513e25e6c70SRemy Horton #endif 5147e4441c8SRemy Horton 515b40f8d78SJiayu Hu struct gro_status gro_ports[RTE_MAX_ETHPORTS]; 516b7091f1dSJiayu Hu uint8_t gro_flush_cycles = GRO_DEFAULT_FLUSH_CYCLES; 517b40f8d78SJiayu Hu 518f9295aa2SXiaoyu Min /* 519f9295aa2SXiaoyu Min * hexadecimal bitmask of RX mq mode can be enabled. 520f9295aa2SXiaoyu Min */ 521f9295aa2SXiaoyu Min enum rte_eth_rx_mq_mode rx_mq_mode = ETH_MQ_RX_VMDQ_DCB_RSS; 522f9295aa2SXiaoyu Min 523b7b78a08SAjit Khaparde /* 524b7b78a08SAjit Khaparde * Used to set forced link speed 525b7b78a08SAjit Khaparde */ 526b7b78a08SAjit Khaparde uint32_t eth_link_speed; 527b7b78a08SAjit Khaparde 528a550baf2SMin Hu (Connor) /* 529a550baf2SMin Hu (Connor) * ID of the current process in multi-process, used to 530a550baf2SMin Hu (Connor) * configure the queues to be polled. 531a550baf2SMin Hu (Connor) */ 532a550baf2SMin Hu (Connor) int proc_id; 533a550baf2SMin Hu (Connor) 534a550baf2SMin Hu (Connor) /* 535a550baf2SMin Hu (Connor) * Number of processes in multi-process, used to 536a550baf2SMin Hu (Connor) * configure the queues to be polled. 537a550baf2SMin Hu (Connor) */ 538a550baf2SMin Hu (Connor) unsigned int num_procs = 1; 539a550baf2SMin Hu (Connor) 540f6d8a6d3SIvan Malov static void 541f6d8a6d3SIvan Malov eth_rx_metadata_negotiate_mp(uint16_t port_id) 542f6d8a6d3SIvan Malov { 543f6d8a6d3SIvan Malov uint64_t rx_meta_features = 0; 544f6d8a6d3SIvan Malov int ret; 545f6d8a6d3SIvan Malov 546f6d8a6d3SIvan Malov if (!is_proc_primary()) 547f6d8a6d3SIvan Malov return; 548f6d8a6d3SIvan Malov 549f6d8a6d3SIvan Malov rx_meta_features |= RTE_ETH_RX_METADATA_USER_FLAG; 550f6d8a6d3SIvan Malov rx_meta_features |= RTE_ETH_RX_METADATA_USER_MARK; 551f6d8a6d3SIvan Malov rx_meta_features |= RTE_ETH_RX_METADATA_TUNNEL_ID; 552f6d8a6d3SIvan Malov 553f6d8a6d3SIvan Malov ret = rte_eth_rx_metadata_negotiate(port_id, &rx_meta_features); 554f6d8a6d3SIvan Malov if (ret == 0) { 555f6d8a6d3SIvan Malov if (!(rx_meta_features & RTE_ETH_RX_METADATA_USER_FLAG)) { 556f6d8a6d3SIvan Malov TESTPMD_LOG(DEBUG, "Flow action FLAG will not affect Rx mbufs on port %u\n", 557f6d8a6d3SIvan Malov port_id); 558f6d8a6d3SIvan Malov } 559f6d8a6d3SIvan Malov 560f6d8a6d3SIvan Malov if (!(rx_meta_features & RTE_ETH_RX_METADATA_USER_MARK)) { 561f6d8a6d3SIvan Malov TESTPMD_LOG(DEBUG, "Flow action MARK will not affect Rx mbufs on port %u\n", 562f6d8a6d3SIvan Malov port_id); 563f6d8a6d3SIvan Malov } 564f6d8a6d3SIvan Malov 565f6d8a6d3SIvan Malov if (!(rx_meta_features & RTE_ETH_RX_METADATA_TUNNEL_ID)) { 566f6d8a6d3SIvan Malov TESTPMD_LOG(DEBUG, "Flow tunnel offload support might be limited or unavailable on port %u\n", 567f6d8a6d3SIvan Malov port_id); 568f6d8a6d3SIvan Malov } 569f6d8a6d3SIvan Malov } else if (ret != -ENOTSUP) { 570f6d8a6d3SIvan Malov rte_exit(EXIT_FAILURE, "Error when negotiating Rx meta features on port %u: %s\n", 571f6d8a6d3SIvan Malov port_id, rte_strerror(-ret)); 572f6d8a6d3SIvan Malov } 573f6d8a6d3SIvan Malov } 574f6d8a6d3SIvan Malov 5751179f05cSIvan Malov static void 5761179f05cSIvan Malov flow_pick_transfer_proxy_mp(uint16_t port_id) 5771179f05cSIvan Malov { 5781179f05cSIvan Malov struct rte_port *port = &ports[port_id]; 5791179f05cSIvan Malov int ret; 5801179f05cSIvan Malov 5811179f05cSIvan Malov port->flow_transfer_proxy = port_id; 5821179f05cSIvan Malov 5831179f05cSIvan Malov if (!is_proc_primary()) 5841179f05cSIvan Malov return; 5851179f05cSIvan Malov 5861179f05cSIvan Malov ret = rte_flow_pick_transfer_proxy(port_id, &port->flow_transfer_proxy, 5871179f05cSIvan Malov NULL); 5881179f05cSIvan Malov if (ret != 0) { 5891179f05cSIvan Malov fprintf(stderr, "Error picking flow transfer proxy for port %u: %s - ignore\n", 5901179f05cSIvan Malov port_id, rte_strerror(-ret)); 5911179f05cSIvan Malov } 5921179f05cSIvan Malov } 5931179f05cSIvan Malov 594a550baf2SMin Hu (Connor) static int 595a550baf2SMin Hu (Connor) eth_dev_configure_mp(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q, 596a550baf2SMin Hu (Connor) const struct rte_eth_conf *dev_conf) 597a550baf2SMin Hu (Connor) { 598a550baf2SMin Hu (Connor) if (is_proc_primary()) 599a550baf2SMin Hu (Connor) return rte_eth_dev_configure(port_id, nb_rx_q, nb_tx_q, 600a550baf2SMin Hu (Connor) dev_conf); 601a550baf2SMin Hu (Connor) return 0; 602a550baf2SMin Hu (Connor) } 603a550baf2SMin Hu (Connor) 604a550baf2SMin Hu (Connor) static int 605a550baf2SMin Hu (Connor) eth_dev_start_mp(uint16_t port_id) 606a550baf2SMin Hu (Connor) { 607a550baf2SMin Hu (Connor) if (is_proc_primary()) 608a550baf2SMin Hu (Connor) return rte_eth_dev_start(port_id); 609a550baf2SMin Hu (Connor) 610a550baf2SMin Hu (Connor) return 0; 611a550baf2SMin Hu (Connor) } 612a550baf2SMin Hu (Connor) 613a550baf2SMin Hu (Connor) static int 614a550baf2SMin Hu (Connor) eth_dev_stop_mp(uint16_t port_id) 615a550baf2SMin Hu (Connor) { 616a550baf2SMin Hu (Connor) if (is_proc_primary()) 617a550baf2SMin Hu (Connor) return rte_eth_dev_stop(port_id); 618a550baf2SMin Hu (Connor) 619a550baf2SMin Hu (Connor) return 0; 620a550baf2SMin Hu (Connor) } 621a550baf2SMin Hu (Connor) 622a550baf2SMin Hu (Connor) static void 623a550baf2SMin Hu (Connor) mempool_free_mp(struct rte_mempool *mp) 624a550baf2SMin Hu (Connor) { 625a550baf2SMin Hu (Connor) if (is_proc_primary()) 626a550baf2SMin Hu (Connor) rte_mempool_free(mp); 627a550baf2SMin Hu (Connor) } 628a550baf2SMin Hu (Connor) 629a550baf2SMin Hu (Connor) static int 630a550baf2SMin Hu (Connor) eth_dev_set_mtu_mp(uint16_t port_id, uint16_t mtu) 631a550baf2SMin Hu (Connor) { 632a550baf2SMin Hu (Connor) if (is_proc_primary()) 633a550baf2SMin Hu (Connor) return rte_eth_dev_set_mtu(port_id, mtu); 634a550baf2SMin Hu (Connor) 635a550baf2SMin Hu (Connor) return 0; 636a550baf2SMin Hu (Connor) } 637a550baf2SMin Hu (Connor) 638ed30d9b6SIntel /* Forward function declarations */ 639c9cce428SThomas Monjalon static void setup_attached_port(portid_t pi); 640edab33b1STetsuya Mukawa static void check_all_ports_link_status(uint32_t port_mask); 641f8244c63SZhiyong Yang static int eth_event_callback(portid_t port_id, 64276ad4a2dSGaetan Rivet enum rte_eth_event_type type, 643d6af1a13SBernard Iremonger void *param, void *ret_param); 644cc1bf307SJeff Guo static void dev_event_callback(const char *device_name, 645fb73e096SJeff Guo enum rte_dev_event_type type, 646fb73e096SJeff Guo void *param); 64763b72657SIvan Ilchenko static void fill_xstats_display_info(void); 648ce8d5614SIntel 649ce8d5614SIntel /* 650ce8d5614SIntel * Check if all the ports are started. 651ce8d5614SIntel * If yes, return positive value. If not, return zero. 652ce8d5614SIntel */ 653ce8d5614SIntel static int all_ports_started(void); 654ed30d9b6SIntel 65552f38a20SJiayu Hu struct gso_status gso_ports[RTE_MAX_ETHPORTS]; 65635b2d13fSOlivier Matz uint16_t gso_max_segment_size = RTE_ETHER_MAX_LEN - RTE_ETHER_CRC_LEN; 65752f38a20SJiayu Hu 658b57b66a9SOri Kam /* Holds the registered mbuf dynamic flags names. */ 659b57b66a9SOri Kam char dynf_names[64][RTE_MBUF_DYN_NAMESIZE]; 660b57b66a9SOri Kam 66163b72657SIvan Ilchenko 662af75078fSIntel /* 66398a7ea33SJerin Jacob * Helper function to check if socket is already discovered. 664c9cafcc8SShahaf Shuler * If yes, return positive value. If not, return zero. 665c9cafcc8SShahaf Shuler */ 666c9cafcc8SShahaf Shuler int 667c9cafcc8SShahaf Shuler new_socket_id(unsigned int socket_id) 668c9cafcc8SShahaf Shuler { 669c9cafcc8SShahaf Shuler unsigned int i; 670c9cafcc8SShahaf Shuler 671c9cafcc8SShahaf Shuler for (i = 0; i < num_sockets; i++) { 672c9cafcc8SShahaf Shuler if (socket_ids[i] == socket_id) 673c9cafcc8SShahaf Shuler return 0; 674c9cafcc8SShahaf Shuler } 675c9cafcc8SShahaf Shuler return 1; 676c9cafcc8SShahaf Shuler } 677c9cafcc8SShahaf Shuler 678c9cafcc8SShahaf Shuler /* 679af75078fSIntel * Setup default configuration. 680af75078fSIntel */ 681af75078fSIntel static void 682af75078fSIntel set_default_fwd_lcores_config(void) 683af75078fSIntel { 684af75078fSIntel unsigned int i; 685af75078fSIntel unsigned int nb_lc; 6867acf894dSStephen Hurd unsigned int sock_num; 687af75078fSIntel 688af75078fSIntel nb_lc = 0; 689af75078fSIntel for (i = 0; i < RTE_MAX_LCORE; i++) { 690dbfb8ec7SPhil Yang if (!rte_lcore_is_enabled(i)) 691dbfb8ec7SPhil Yang continue; 692c9cafcc8SShahaf Shuler sock_num = rte_lcore_to_socket_id(i); 693c9cafcc8SShahaf Shuler if (new_socket_id(sock_num)) { 694c9cafcc8SShahaf Shuler if (num_sockets >= RTE_MAX_NUMA_NODES) { 695c9cafcc8SShahaf Shuler rte_exit(EXIT_FAILURE, 696c9cafcc8SShahaf Shuler "Total sockets greater than %u\n", 697c9cafcc8SShahaf Shuler RTE_MAX_NUMA_NODES); 698c9cafcc8SShahaf Shuler } 699c9cafcc8SShahaf Shuler socket_ids[num_sockets++] = sock_num; 7007acf894dSStephen Hurd } 701cb056611SStephen Hemminger if (i == rte_get_main_lcore()) 702f54fe5eeSStephen Hurd continue; 703f54fe5eeSStephen Hurd fwd_lcores_cpuids[nb_lc++] = i; 704af75078fSIntel } 705af75078fSIntel nb_lcores = (lcoreid_t) nb_lc; 706af75078fSIntel nb_cfg_lcores = nb_lcores; 707af75078fSIntel nb_fwd_lcores = 1; 708af75078fSIntel } 709af75078fSIntel 710af75078fSIntel static void 711af75078fSIntel set_def_peer_eth_addrs(void) 712af75078fSIntel { 713af75078fSIntel portid_t i; 714af75078fSIntel 715af75078fSIntel for (i = 0; i < RTE_MAX_ETHPORTS; i++) { 71635b2d13fSOlivier Matz peer_eth_addrs[i].addr_bytes[0] = RTE_ETHER_LOCAL_ADMIN_ADDR; 717af75078fSIntel peer_eth_addrs[i].addr_bytes[5] = i; 718af75078fSIntel } 719af75078fSIntel } 720af75078fSIntel 721af75078fSIntel static void 722af75078fSIntel set_default_fwd_ports_config(void) 723af75078fSIntel { 724af75078fSIntel portid_t pt_id; 72565a7360cSMatan Azrad int i = 0; 726af75078fSIntel 727effdb8bbSPhil Yang RTE_ETH_FOREACH_DEV(pt_id) { 72865a7360cSMatan Azrad fwd_ports_ids[i++] = pt_id; 729af75078fSIntel 730effdb8bbSPhil Yang /* Update sockets info according to the attached device */ 731effdb8bbSPhil Yang int socket_id = rte_eth_dev_socket_id(pt_id); 732effdb8bbSPhil Yang if (socket_id >= 0 && new_socket_id(socket_id)) { 733effdb8bbSPhil Yang if (num_sockets >= RTE_MAX_NUMA_NODES) { 734effdb8bbSPhil Yang rte_exit(EXIT_FAILURE, 735effdb8bbSPhil Yang "Total sockets greater than %u\n", 736effdb8bbSPhil Yang RTE_MAX_NUMA_NODES); 737effdb8bbSPhil Yang } 738effdb8bbSPhil Yang socket_ids[num_sockets++] = socket_id; 739effdb8bbSPhil Yang } 740effdb8bbSPhil Yang } 741effdb8bbSPhil Yang 742af75078fSIntel nb_cfg_ports = nb_ports; 743af75078fSIntel nb_fwd_ports = nb_ports; 744af75078fSIntel } 745af75078fSIntel 746af75078fSIntel void 747af75078fSIntel set_def_fwd_config(void) 748af75078fSIntel { 749af75078fSIntel set_default_fwd_lcores_config(); 750af75078fSIntel set_def_peer_eth_addrs(); 751af75078fSIntel set_default_fwd_ports_config(); 752af75078fSIntel } 753af75078fSIntel 754761f7ae1SJie Zhou #ifndef RTE_EXEC_ENV_WINDOWS 755c7f5dba7SAnatoly Burakov /* extremely pessimistic estimation of memory required to create a mempool */ 756c7f5dba7SAnatoly Burakov static int 757c7f5dba7SAnatoly Burakov calc_mem_size(uint32_t nb_mbufs, uint32_t mbuf_sz, size_t pgsz, size_t *out) 758c7f5dba7SAnatoly Burakov { 759c7f5dba7SAnatoly Burakov unsigned int n_pages, mbuf_per_pg, leftover; 760c7f5dba7SAnatoly Burakov uint64_t total_mem, mbuf_mem, obj_sz; 761c7f5dba7SAnatoly Burakov 762c7f5dba7SAnatoly Burakov /* there is no good way to predict how much space the mempool will 763c7f5dba7SAnatoly Burakov * occupy because it will allocate chunks on the fly, and some of those 764c7f5dba7SAnatoly Burakov * will come from default DPDK memory while some will come from our 765c7f5dba7SAnatoly Burakov * external memory, so just assume 128MB will be enough for everyone. 766c7f5dba7SAnatoly Burakov */ 767c7f5dba7SAnatoly Burakov uint64_t hdr_mem = 128 << 20; 768c7f5dba7SAnatoly Burakov 769c7f5dba7SAnatoly Burakov /* account for possible non-contiguousness */ 770c7f5dba7SAnatoly Burakov obj_sz = rte_mempool_calc_obj_size(mbuf_sz, 0, NULL); 771c7f5dba7SAnatoly Burakov if (obj_sz > pgsz) { 772c7f5dba7SAnatoly Burakov TESTPMD_LOG(ERR, "Object size is bigger than page size\n"); 773c7f5dba7SAnatoly Burakov return -1; 774c7f5dba7SAnatoly Burakov } 775c7f5dba7SAnatoly Burakov 776c7f5dba7SAnatoly Burakov mbuf_per_pg = pgsz / obj_sz; 777c7f5dba7SAnatoly Burakov leftover = (nb_mbufs % mbuf_per_pg) > 0; 778c7f5dba7SAnatoly Burakov n_pages = (nb_mbufs / mbuf_per_pg) + leftover; 779c7f5dba7SAnatoly Burakov 780c7f5dba7SAnatoly Burakov mbuf_mem = n_pages * pgsz; 781c7f5dba7SAnatoly Burakov 782c7f5dba7SAnatoly Burakov total_mem = RTE_ALIGN(hdr_mem + mbuf_mem, pgsz); 783c7f5dba7SAnatoly Burakov 784c7f5dba7SAnatoly Burakov if (total_mem > SIZE_MAX) { 785c7f5dba7SAnatoly Burakov TESTPMD_LOG(ERR, "Memory size too big\n"); 786c7f5dba7SAnatoly Burakov return -1; 787c7f5dba7SAnatoly Burakov } 788c7f5dba7SAnatoly Burakov *out = (size_t)total_mem; 789c7f5dba7SAnatoly Burakov 790c7f5dba7SAnatoly Burakov return 0; 791c7f5dba7SAnatoly Burakov } 792c7f5dba7SAnatoly Burakov 793c7f5dba7SAnatoly Burakov static int 794c7f5dba7SAnatoly Burakov pagesz_flags(uint64_t page_sz) 795c7f5dba7SAnatoly Burakov { 796c7f5dba7SAnatoly Burakov /* as per mmap() manpage, all page sizes are log2 of page size 797c7f5dba7SAnatoly Burakov * shifted by MAP_HUGE_SHIFT 798c7f5dba7SAnatoly Burakov */ 7999d650537SAnatoly Burakov int log2 = rte_log2_u64(page_sz); 800c7f5dba7SAnatoly Burakov 801c7f5dba7SAnatoly Burakov return (log2 << HUGE_SHIFT); 802c7f5dba7SAnatoly Burakov } 803c7f5dba7SAnatoly Burakov 804c7f5dba7SAnatoly Burakov static void * 805c7f5dba7SAnatoly Burakov alloc_mem(size_t memsz, size_t pgsz, bool huge) 806c7f5dba7SAnatoly Burakov { 807c7f5dba7SAnatoly Burakov void *addr; 808c7f5dba7SAnatoly Burakov int flags; 809c7f5dba7SAnatoly Burakov 810c7f5dba7SAnatoly Burakov /* allocate anonymous hugepages */ 811c7f5dba7SAnatoly Burakov flags = MAP_ANONYMOUS | MAP_PRIVATE; 812c7f5dba7SAnatoly Burakov if (huge) 813c7f5dba7SAnatoly Burakov flags |= HUGE_FLAG | pagesz_flags(pgsz); 814c7f5dba7SAnatoly Burakov 815c7f5dba7SAnatoly Burakov addr = mmap(NULL, memsz, PROT_READ | PROT_WRITE, flags, -1, 0); 816c7f5dba7SAnatoly Burakov if (addr == MAP_FAILED) 817c7f5dba7SAnatoly Burakov return NULL; 818c7f5dba7SAnatoly Burakov 819c7f5dba7SAnatoly Burakov return addr; 820c7f5dba7SAnatoly Burakov } 821c7f5dba7SAnatoly Burakov 822c7f5dba7SAnatoly Burakov struct extmem_param { 823c7f5dba7SAnatoly Burakov void *addr; 824c7f5dba7SAnatoly Burakov size_t len; 825c7f5dba7SAnatoly Burakov size_t pgsz; 826c7f5dba7SAnatoly Burakov rte_iova_t *iova_table; 827c7f5dba7SAnatoly Burakov unsigned int iova_table_len; 828c7f5dba7SAnatoly Burakov }; 829c7f5dba7SAnatoly Burakov 830c7f5dba7SAnatoly Burakov static int 831c7f5dba7SAnatoly Burakov create_extmem(uint32_t nb_mbufs, uint32_t mbuf_sz, struct extmem_param *param, 832c7f5dba7SAnatoly Burakov bool huge) 833c7f5dba7SAnatoly Burakov { 834c7f5dba7SAnatoly Burakov uint64_t pgsizes[] = {RTE_PGSIZE_2M, RTE_PGSIZE_1G, /* x86_64, ARM */ 835c7f5dba7SAnatoly Burakov RTE_PGSIZE_16M, RTE_PGSIZE_16G}; /* POWER */ 836c7f5dba7SAnatoly Burakov unsigned int cur_page, n_pages, pgsz_idx; 837c7f5dba7SAnatoly Burakov size_t mem_sz, cur_pgsz; 838c7f5dba7SAnatoly Burakov rte_iova_t *iovas = NULL; 839c7f5dba7SAnatoly Burakov void *addr; 840c7f5dba7SAnatoly Burakov int ret; 841c7f5dba7SAnatoly Burakov 842c7f5dba7SAnatoly Burakov for (pgsz_idx = 0; pgsz_idx < RTE_DIM(pgsizes); pgsz_idx++) { 843c7f5dba7SAnatoly Burakov /* skip anything that is too big */ 844c7f5dba7SAnatoly Burakov if (pgsizes[pgsz_idx] > SIZE_MAX) 845c7f5dba7SAnatoly Burakov continue; 846c7f5dba7SAnatoly Burakov 847c7f5dba7SAnatoly Burakov cur_pgsz = pgsizes[pgsz_idx]; 848c7f5dba7SAnatoly Burakov 849c7f5dba7SAnatoly Burakov /* if we were told not to allocate hugepages, override */ 850c7f5dba7SAnatoly Burakov if (!huge) 851c7f5dba7SAnatoly Burakov cur_pgsz = sysconf(_SC_PAGESIZE); 852c7f5dba7SAnatoly Burakov 853c7f5dba7SAnatoly Burakov ret = calc_mem_size(nb_mbufs, mbuf_sz, cur_pgsz, &mem_sz); 854c7f5dba7SAnatoly Burakov if (ret < 0) { 855c7f5dba7SAnatoly Burakov TESTPMD_LOG(ERR, "Cannot calculate memory size\n"); 856c7f5dba7SAnatoly Burakov return -1; 857c7f5dba7SAnatoly Burakov } 858c7f5dba7SAnatoly Burakov 859c7f5dba7SAnatoly Burakov /* allocate our memory */ 860c7f5dba7SAnatoly Burakov addr = alloc_mem(mem_sz, cur_pgsz, huge); 861c7f5dba7SAnatoly Burakov 862c7f5dba7SAnatoly Burakov /* if we couldn't allocate memory with a specified page size, 863c7f5dba7SAnatoly Burakov * that doesn't mean we can't do it with other page sizes, so 864c7f5dba7SAnatoly Burakov * try another one. 865c7f5dba7SAnatoly Burakov */ 866c7f5dba7SAnatoly Burakov if (addr == NULL) 867c7f5dba7SAnatoly Burakov continue; 868c7f5dba7SAnatoly Burakov 869c7f5dba7SAnatoly Burakov /* store IOVA addresses for every page in this memory area */ 870c7f5dba7SAnatoly Burakov n_pages = mem_sz / cur_pgsz; 871c7f5dba7SAnatoly Burakov 872c7f5dba7SAnatoly Burakov iovas = malloc(sizeof(*iovas) * n_pages); 873c7f5dba7SAnatoly Burakov 874c7f5dba7SAnatoly Burakov if (iovas == NULL) { 875c7f5dba7SAnatoly Burakov TESTPMD_LOG(ERR, "Cannot allocate memory for iova addresses\n"); 876c7f5dba7SAnatoly Burakov goto fail; 877c7f5dba7SAnatoly Burakov } 878c7f5dba7SAnatoly Burakov /* lock memory if it's not huge pages */ 879c7f5dba7SAnatoly Burakov if (!huge) 880c7f5dba7SAnatoly Burakov mlock(addr, mem_sz); 881c7f5dba7SAnatoly Burakov 882c7f5dba7SAnatoly Burakov /* populate IOVA addresses */ 883c7f5dba7SAnatoly Burakov for (cur_page = 0; cur_page < n_pages; cur_page++) { 884c7f5dba7SAnatoly Burakov rte_iova_t iova; 885c7f5dba7SAnatoly Burakov size_t offset; 886c7f5dba7SAnatoly Burakov void *cur; 887c7f5dba7SAnatoly Burakov 888c7f5dba7SAnatoly Burakov offset = cur_pgsz * cur_page; 889c7f5dba7SAnatoly Burakov cur = RTE_PTR_ADD(addr, offset); 890c7f5dba7SAnatoly Burakov 891c7f5dba7SAnatoly Burakov /* touch the page before getting its IOVA */ 892c7f5dba7SAnatoly Burakov *(volatile char *)cur = 0; 893c7f5dba7SAnatoly Burakov 894c7f5dba7SAnatoly Burakov iova = rte_mem_virt2iova(cur); 895c7f5dba7SAnatoly Burakov 896c7f5dba7SAnatoly Burakov iovas[cur_page] = iova; 897c7f5dba7SAnatoly Burakov } 898c7f5dba7SAnatoly Burakov 899c7f5dba7SAnatoly Burakov break; 900c7f5dba7SAnatoly Burakov } 901c7f5dba7SAnatoly Burakov /* if we couldn't allocate anything */ 902c7f5dba7SAnatoly Burakov if (iovas == NULL) 903c7f5dba7SAnatoly Burakov return -1; 904c7f5dba7SAnatoly Burakov 905c7f5dba7SAnatoly Burakov param->addr = addr; 906c7f5dba7SAnatoly Burakov param->len = mem_sz; 907c7f5dba7SAnatoly Burakov param->pgsz = cur_pgsz; 908c7f5dba7SAnatoly Burakov param->iova_table = iovas; 909c7f5dba7SAnatoly Burakov param->iova_table_len = n_pages; 910c7f5dba7SAnatoly Burakov 911c7f5dba7SAnatoly Burakov return 0; 912c7f5dba7SAnatoly Burakov fail: 913c7f5dba7SAnatoly Burakov if (iovas) 914c7f5dba7SAnatoly Burakov free(iovas); 915c7f5dba7SAnatoly Burakov if (addr) 916c7f5dba7SAnatoly Burakov munmap(addr, mem_sz); 917c7f5dba7SAnatoly Burakov 918c7f5dba7SAnatoly Burakov return -1; 919c7f5dba7SAnatoly Burakov } 920c7f5dba7SAnatoly Burakov 921c7f5dba7SAnatoly Burakov static int 922c7f5dba7SAnatoly Burakov setup_extmem(uint32_t nb_mbufs, uint32_t mbuf_sz, bool huge) 923c7f5dba7SAnatoly Burakov { 924c7f5dba7SAnatoly Burakov struct extmem_param param; 925c7f5dba7SAnatoly Burakov int socket_id, ret; 926c7f5dba7SAnatoly Burakov 927c7f5dba7SAnatoly Burakov memset(¶m, 0, sizeof(param)); 928c7f5dba7SAnatoly Burakov 929c7f5dba7SAnatoly Burakov /* check if our heap exists */ 930c7f5dba7SAnatoly Burakov socket_id = rte_malloc_heap_get_socket(EXTMEM_HEAP_NAME); 931c7f5dba7SAnatoly Burakov if (socket_id < 0) { 932c7f5dba7SAnatoly Burakov /* create our heap */ 933c7f5dba7SAnatoly Burakov ret = rte_malloc_heap_create(EXTMEM_HEAP_NAME); 934c7f5dba7SAnatoly Burakov if (ret < 0) { 935c7f5dba7SAnatoly Burakov TESTPMD_LOG(ERR, "Cannot create heap\n"); 936c7f5dba7SAnatoly Burakov return -1; 937c7f5dba7SAnatoly Burakov } 938c7f5dba7SAnatoly Burakov } 939c7f5dba7SAnatoly Burakov 940c7f5dba7SAnatoly Burakov ret = create_extmem(nb_mbufs, mbuf_sz, ¶m, huge); 941c7f5dba7SAnatoly Burakov if (ret < 0) { 942c7f5dba7SAnatoly Burakov TESTPMD_LOG(ERR, "Cannot create memory area\n"); 943c7f5dba7SAnatoly Burakov return -1; 944c7f5dba7SAnatoly Burakov } 945c7f5dba7SAnatoly Burakov 946c7f5dba7SAnatoly Burakov /* we now have a valid memory area, so add it to heap */ 947c7f5dba7SAnatoly Burakov ret = rte_malloc_heap_memory_add(EXTMEM_HEAP_NAME, 948c7f5dba7SAnatoly Burakov param.addr, param.len, param.iova_table, 949c7f5dba7SAnatoly Burakov param.iova_table_len, param.pgsz); 950c7f5dba7SAnatoly Burakov 951c7f5dba7SAnatoly Burakov /* when using VFIO, memory is automatically mapped for DMA by EAL */ 952c7f5dba7SAnatoly Burakov 953c7f5dba7SAnatoly Burakov /* not needed any more */ 954c7f5dba7SAnatoly Burakov free(param.iova_table); 955c7f5dba7SAnatoly Burakov 956c7f5dba7SAnatoly Burakov if (ret < 0) { 957c7f5dba7SAnatoly Burakov TESTPMD_LOG(ERR, "Cannot add memory to heap\n"); 958c7f5dba7SAnatoly Burakov munmap(param.addr, param.len); 959c7f5dba7SAnatoly Burakov return -1; 960c7f5dba7SAnatoly Burakov } 961c7f5dba7SAnatoly Burakov 962c7f5dba7SAnatoly Burakov /* success */ 963c7f5dba7SAnatoly Burakov 964c7f5dba7SAnatoly Burakov TESTPMD_LOG(DEBUG, "Allocated %zuMB of external memory\n", 965c7f5dba7SAnatoly Burakov param.len >> 20); 966c7f5dba7SAnatoly Burakov 967c7f5dba7SAnatoly Burakov return 0; 968c7f5dba7SAnatoly Burakov } 9693a0968c8SShahaf Shuler static void 9703a0968c8SShahaf Shuler dma_unmap_cb(struct rte_mempool *mp __rte_unused, void *opaque __rte_unused, 9713a0968c8SShahaf Shuler struct rte_mempool_memhdr *memhdr, unsigned mem_idx __rte_unused) 9723a0968c8SShahaf Shuler { 9733a0968c8SShahaf Shuler uint16_t pid = 0; 9743a0968c8SShahaf Shuler int ret; 9753a0968c8SShahaf Shuler 9763a0968c8SShahaf Shuler RTE_ETH_FOREACH_DEV(pid) { 9770a0821bcSPaulis Gributs struct rte_eth_dev_info dev_info; 9783a0968c8SShahaf Shuler 9790a0821bcSPaulis Gributs ret = eth_dev_info_get_print_err(pid, &dev_info); 9800a0821bcSPaulis Gributs if (ret != 0) { 9810a0821bcSPaulis Gributs TESTPMD_LOG(DEBUG, 9820a0821bcSPaulis Gributs "unable to get device info for port %d on addr 0x%p," 9830a0821bcSPaulis Gributs "mempool unmapping will not be performed\n", 9840a0821bcSPaulis Gributs pid, memhdr->addr); 9850a0821bcSPaulis Gributs continue; 9860a0821bcSPaulis Gributs } 9870a0821bcSPaulis Gributs 9880a0821bcSPaulis Gributs ret = rte_dev_dma_unmap(dev_info.device, memhdr->addr, 0, memhdr->len); 9893a0968c8SShahaf Shuler if (ret) { 9903a0968c8SShahaf Shuler TESTPMD_LOG(DEBUG, 9913a0968c8SShahaf Shuler "unable to DMA unmap addr 0x%p " 9923a0968c8SShahaf Shuler "for device %s\n", 9930a0821bcSPaulis Gributs memhdr->addr, dev_info.device->name); 9943a0968c8SShahaf Shuler } 9953a0968c8SShahaf Shuler } 9963a0968c8SShahaf Shuler ret = rte_extmem_unregister(memhdr->addr, memhdr->len); 9973a0968c8SShahaf Shuler if (ret) { 9983a0968c8SShahaf Shuler TESTPMD_LOG(DEBUG, 9993a0968c8SShahaf Shuler "unable to un-register addr 0x%p\n", memhdr->addr); 10003a0968c8SShahaf Shuler } 10013a0968c8SShahaf Shuler } 10023a0968c8SShahaf Shuler 10033a0968c8SShahaf Shuler static void 10043a0968c8SShahaf Shuler dma_map_cb(struct rte_mempool *mp __rte_unused, void *opaque __rte_unused, 10053a0968c8SShahaf Shuler struct rte_mempool_memhdr *memhdr, unsigned mem_idx __rte_unused) 10063a0968c8SShahaf Shuler { 10073a0968c8SShahaf Shuler uint16_t pid = 0; 10083a0968c8SShahaf Shuler size_t page_size = sysconf(_SC_PAGESIZE); 10093a0968c8SShahaf Shuler int ret; 10103a0968c8SShahaf Shuler 10113a0968c8SShahaf Shuler ret = rte_extmem_register(memhdr->addr, memhdr->len, NULL, 0, 10123a0968c8SShahaf Shuler page_size); 10133a0968c8SShahaf Shuler if (ret) { 10143a0968c8SShahaf Shuler TESTPMD_LOG(DEBUG, 10153a0968c8SShahaf Shuler "unable to register addr 0x%p\n", memhdr->addr); 10163a0968c8SShahaf Shuler return; 10173a0968c8SShahaf Shuler } 10183a0968c8SShahaf Shuler RTE_ETH_FOREACH_DEV(pid) { 10190a0821bcSPaulis Gributs struct rte_eth_dev_info dev_info; 10203a0968c8SShahaf Shuler 10210a0821bcSPaulis Gributs ret = eth_dev_info_get_print_err(pid, &dev_info); 10220a0821bcSPaulis Gributs if (ret != 0) { 10230a0821bcSPaulis Gributs TESTPMD_LOG(DEBUG, 10240a0821bcSPaulis Gributs "unable to get device info for port %d on addr 0x%p," 10250a0821bcSPaulis Gributs "mempool mapping will not be performed\n", 10260a0821bcSPaulis Gributs pid, memhdr->addr); 10270a0821bcSPaulis Gributs continue; 10280a0821bcSPaulis Gributs } 10290a0821bcSPaulis Gributs ret = rte_dev_dma_map(dev_info.device, memhdr->addr, 0, memhdr->len); 10303a0968c8SShahaf Shuler if (ret) { 10313a0968c8SShahaf Shuler TESTPMD_LOG(DEBUG, 10323a0968c8SShahaf Shuler "unable to DMA map addr 0x%p " 10333a0968c8SShahaf Shuler "for device %s\n", 10340a0821bcSPaulis Gributs memhdr->addr, dev_info.device->name); 10353a0968c8SShahaf Shuler } 10363a0968c8SShahaf Shuler } 10373a0968c8SShahaf Shuler } 1038761f7ae1SJie Zhou #endif 1039c7f5dba7SAnatoly Burakov 104072512e18SViacheslav Ovsiienko static unsigned int 104172512e18SViacheslav Ovsiienko setup_extbuf(uint32_t nb_mbufs, uint16_t mbuf_sz, unsigned int socket_id, 104272512e18SViacheslav Ovsiienko char *pool_name, struct rte_pktmbuf_extmem **ext_mem) 104372512e18SViacheslav Ovsiienko { 104472512e18SViacheslav Ovsiienko struct rte_pktmbuf_extmem *xmem; 104572512e18SViacheslav Ovsiienko unsigned int ext_num, zone_num, elt_num; 104672512e18SViacheslav Ovsiienko uint16_t elt_size; 104772512e18SViacheslav Ovsiienko 104872512e18SViacheslav Ovsiienko elt_size = RTE_ALIGN_CEIL(mbuf_sz, RTE_CACHE_LINE_SIZE); 104972512e18SViacheslav Ovsiienko elt_num = EXTBUF_ZONE_SIZE / elt_size; 105072512e18SViacheslav Ovsiienko zone_num = (nb_mbufs + elt_num - 1) / elt_num; 105172512e18SViacheslav Ovsiienko 105272512e18SViacheslav Ovsiienko xmem = malloc(sizeof(struct rte_pktmbuf_extmem) * zone_num); 105372512e18SViacheslav Ovsiienko if (xmem == NULL) { 105472512e18SViacheslav Ovsiienko TESTPMD_LOG(ERR, "Cannot allocate memory for " 105572512e18SViacheslav Ovsiienko "external buffer descriptors\n"); 105672512e18SViacheslav Ovsiienko *ext_mem = NULL; 105772512e18SViacheslav Ovsiienko return 0; 105872512e18SViacheslav Ovsiienko } 105972512e18SViacheslav Ovsiienko for (ext_num = 0; ext_num < zone_num; ext_num++) { 106072512e18SViacheslav Ovsiienko struct rte_pktmbuf_extmem *xseg = xmem + ext_num; 106172512e18SViacheslav Ovsiienko const struct rte_memzone *mz; 106272512e18SViacheslav Ovsiienko char mz_name[RTE_MEMZONE_NAMESIZE]; 106372512e18SViacheslav Ovsiienko int ret; 106472512e18SViacheslav Ovsiienko 106572512e18SViacheslav Ovsiienko ret = snprintf(mz_name, sizeof(mz_name), 106672512e18SViacheslav Ovsiienko RTE_MEMPOOL_MZ_FORMAT "_xb_%u", pool_name, ext_num); 106772512e18SViacheslav Ovsiienko if (ret < 0 || ret >= (int)sizeof(mz_name)) { 106872512e18SViacheslav Ovsiienko errno = ENAMETOOLONG; 106972512e18SViacheslav Ovsiienko ext_num = 0; 107072512e18SViacheslav Ovsiienko break; 107172512e18SViacheslav Ovsiienko } 107272512e18SViacheslav Ovsiienko mz = rte_memzone_reserve_aligned(mz_name, EXTBUF_ZONE_SIZE, 107372512e18SViacheslav Ovsiienko socket_id, 107472512e18SViacheslav Ovsiienko RTE_MEMZONE_IOVA_CONTIG | 107572512e18SViacheslav Ovsiienko RTE_MEMZONE_1GB | 107672512e18SViacheslav Ovsiienko RTE_MEMZONE_SIZE_HINT_ONLY, 107772512e18SViacheslav Ovsiienko EXTBUF_ZONE_SIZE); 107872512e18SViacheslav Ovsiienko if (mz == NULL) { 107972512e18SViacheslav Ovsiienko /* 108072512e18SViacheslav Ovsiienko * The caller exits on external buffer creation 108172512e18SViacheslav Ovsiienko * error, so there is no need to free memzones. 108272512e18SViacheslav Ovsiienko */ 108372512e18SViacheslav Ovsiienko errno = ENOMEM; 108472512e18SViacheslav Ovsiienko ext_num = 0; 108572512e18SViacheslav Ovsiienko break; 108672512e18SViacheslav Ovsiienko } 108772512e18SViacheslav Ovsiienko xseg->buf_ptr = mz->addr; 108872512e18SViacheslav Ovsiienko xseg->buf_iova = mz->iova; 108972512e18SViacheslav Ovsiienko xseg->buf_len = EXTBUF_ZONE_SIZE; 109072512e18SViacheslav Ovsiienko xseg->elt_size = elt_size; 109172512e18SViacheslav Ovsiienko } 109272512e18SViacheslav Ovsiienko if (ext_num == 0 && xmem != NULL) { 109372512e18SViacheslav Ovsiienko free(xmem); 109472512e18SViacheslav Ovsiienko xmem = NULL; 109572512e18SViacheslav Ovsiienko } 109672512e18SViacheslav Ovsiienko *ext_mem = xmem; 109772512e18SViacheslav Ovsiienko return ext_num; 109872512e18SViacheslav Ovsiienko } 109972512e18SViacheslav Ovsiienko 1100af75078fSIntel /* 1101af75078fSIntel * Configuration initialisation done once at init time. 1102af75078fSIntel */ 1103401b744dSShahaf Shuler static struct rte_mempool * 1104af75078fSIntel mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf, 110526cbb419SViacheslav Ovsiienko unsigned int socket_id, uint16_t size_idx) 1106af75078fSIntel { 1107af75078fSIntel char pool_name[RTE_MEMPOOL_NAMESIZE]; 1108bece7b6cSChristian Ehrhardt struct rte_mempool *rte_mp = NULL; 1109761f7ae1SJie Zhou #ifndef RTE_EXEC_ENV_WINDOWS 1110af75078fSIntel uint32_t mb_size; 1111af75078fSIntel 1112dfb03bbeSOlivier Matz mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size; 1113761f7ae1SJie Zhou #endif 111426cbb419SViacheslav Ovsiienko mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name), size_idx); 1115a550baf2SMin Hu (Connor) if (!is_proc_primary()) { 1116a550baf2SMin Hu (Connor) rte_mp = rte_mempool_lookup(pool_name); 1117a550baf2SMin Hu (Connor) if (rte_mp == NULL) 1118a550baf2SMin Hu (Connor) rte_exit(EXIT_FAILURE, 1119a550baf2SMin Hu (Connor) "Get mbuf pool for socket %u failed: %s\n", 1120a550baf2SMin Hu (Connor) socket_id, rte_strerror(rte_errno)); 1121a550baf2SMin Hu (Connor) return rte_mp; 1122a550baf2SMin Hu (Connor) } 1123148f963fSBruce Richardson 1124285fd101SOlivier Matz TESTPMD_LOG(INFO, 1125d1eb542eSOlivier Matz "create a new mbuf pool <%s>: n=%u, size=%u, socket=%u\n", 1126d1eb542eSOlivier Matz pool_name, nb_mbuf, mbuf_seg_size, socket_id); 1127d1eb542eSOlivier Matz 1128c7f5dba7SAnatoly Burakov switch (mp_alloc_type) { 1129c7f5dba7SAnatoly Burakov case MP_ALLOC_NATIVE: 1130c7f5dba7SAnatoly Burakov { 1131c7f5dba7SAnatoly Burakov /* wrapper to rte_mempool_create() */ 1132c7f5dba7SAnatoly Burakov TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n", 1133c7f5dba7SAnatoly Burakov rte_mbuf_best_mempool_ops()); 1134c7f5dba7SAnatoly Burakov rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf, 1135c7f5dba7SAnatoly Burakov mb_mempool_cache, 0, mbuf_seg_size, socket_id); 1136c7f5dba7SAnatoly Burakov break; 1137c7f5dba7SAnatoly Burakov } 1138761f7ae1SJie Zhou #ifndef RTE_EXEC_ENV_WINDOWS 1139c7f5dba7SAnatoly Burakov case MP_ALLOC_ANON: 1140c7f5dba7SAnatoly Burakov { 1141b19a0c75SOlivier Matz rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf, 1142c7f5dba7SAnatoly Burakov mb_size, (unsigned int) mb_mempool_cache, 1143148f963fSBruce Richardson sizeof(struct rte_pktmbuf_pool_private), 114459fcf854SShahaf Shuler socket_id, mempool_flags); 114524427bb9SOlivier Matz if (rte_mp == NULL) 114624427bb9SOlivier Matz goto err; 1147b19a0c75SOlivier Matz 1148b19a0c75SOlivier Matz if (rte_mempool_populate_anon(rte_mp) == 0) { 1149b19a0c75SOlivier Matz rte_mempool_free(rte_mp); 1150b19a0c75SOlivier Matz rte_mp = NULL; 115124427bb9SOlivier Matz goto err; 1152b19a0c75SOlivier Matz } 1153b19a0c75SOlivier Matz rte_pktmbuf_pool_init(rte_mp, NULL); 1154b19a0c75SOlivier Matz rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL); 11553a0968c8SShahaf Shuler rte_mempool_mem_iter(rte_mp, dma_map_cb, NULL); 1156c7f5dba7SAnatoly Burakov break; 1157c7f5dba7SAnatoly Burakov } 1158c7f5dba7SAnatoly Burakov case MP_ALLOC_XMEM: 1159c7f5dba7SAnatoly Burakov case MP_ALLOC_XMEM_HUGE: 1160c7f5dba7SAnatoly Burakov { 1161c7f5dba7SAnatoly Burakov int heap_socket; 1162c7f5dba7SAnatoly Burakov bool huge = mp_alloc_type == MP_ALLOC_XMEM_HUGE; 1163c7f5dba7SAnatoly Burakov 1164c7f5dba7SAnatoly Burakov if (setup_extmem(nb_mbuf, mbuf_seg_size, huge) < 0) 1165c7f5dba7SAnatoly Burakov rte_exit(EXIT_FAILURE, "Could not create external memory\n"); 1166c7f5dba7SAnatoly Burakov 1167c7f5dba7SAnatoly Burakov heap_socket = 1168c7f5dba7SAnatoly Burakov rte_malloc_heap_get_socket(EXTMEM_HEAP_NAME); 1169c7f5dba7SAnatoly Burakov if (heap_socket < 0) 1170c7f5dba7SAnatoly Burakov rte_exit(EXIT_FAILURE, "Could not get external memory socket ID\n"); 1171c7f5dba7SAnatoly Burakov 11720e798567SPavan Nikhilesh TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n", 11730e798567SPavan Nikhilesh rte_mbuf_best_mempool_ops()); 1174ea0c20eaSOlivier Matz rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf, 1175c7f5dba7SAnatoly Burakov mb_mempool_cache, 0, mbuf_seg_size, 1176c7f5dba7SAnatoly Burakov heap_socket); 1177c7f5dba7SAnatoly Burakov break; 1178c7f5dba7SAnatoly Burakov } 1179761f7ae1SJie Zhou #endif 118072512e18SViacheslav Ovsiienko case MP_ALLOC_XBUF: 118172512e18SViacheslav Ovsiienko { 118272512e18SViacheslav Ovsiienko struct rte_pktmbuf_extmem *ext_mem; 118372512e18SViacheslav Ovsiienko unsigned int ext_num; 118472512e18SViacheslav Ovsiienko 118572512e18SViacheslav Ovsiienko ext_num = setup_extbuf(nb_mbuf, mbuf_seg_size, 118672512e18SViacheslav Ovsiienko socket_id, pool_name, &ext_mem); 118772512e18SViacheslav Ovsiienko if (ext_num == 0) 118872512e18SViacheslav Ovsiienko rte_exit(EXIT_FAILURE, 118972512e18SViacheslav Ovsiienko "Can't create pinned data buffers\n"); 119072512e18SViacheslav Ovsiienko 119172512e18SViacheslav Ovsiienko TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n", 119272512e18SViacheslav Ovsiienko rte_mbuf_best_mempool_ops()); 119372512e18SViacheslav Ovsiienko rte_mp = rte_pktmbuf_pool_create_extbuf 119472512e18SViacheslav Ovsiienko (pool_name, nb_mbuf, mb_mempool_cache, 119572512e18SViacheslav Ovsiienko 0, mbuf_seg_size, socket_id, 119672512e18SViacheslav Ovsiienko ext_mem, ext_num); 119772512e18SViacheslav Ovsiienko free(ext_mem); 119872512e18SViacheslav Ovsiienko break; 119972512e18SViacheslav Ovsiienko } 1200c7f5dba7SAnatoly Burakov default: 1201c7f5dba7SAnatoly Burakov { 1202c7f5dba7SAnatoly Burakov rte_exit(EXIT_FAILURE, "Invalid mempool creation mode\n"); 1203c7f5dba7SAnatoly Burakov } 1204bece7b6cSChristian Ehrhardt } 1205148f963fSBruce Richardson 1206761f7ae1SJie Zhou #ifndef RTE_EXEC_ENV_WINDOWS 120724427bb9SOlivier Matz err: 1208761f7ae1SJie Zhou #endif 1209af75078fSIntel if (rte_mp == NULL) { 1210d1eb542eSOlivier Matz rte_exit(EXIT_FAILURE, 1211d1eb542eSOlivier Matz "Creation of mbuf pool for socket %u failed: %s\n", 1212d1eb542eSOlivier Matz socket_id, rte_strerror(rte_errno)); 1213148f963fSBruce Richardson } else if (verbose_level > 0) { 1214591a9d79SStephen Hemminger rte_mempool_dump(stdout, rte_mp); 1215af75078fSIntel } 1216401b744dSShahaf Shuler return rte_mp; 1217af75078fSIntel } 1218af75078fSIntel 121920a0286fSLiu Xiaofeng /* 122020a0286fSLiu Xiaofeng * Check given socket id is valid or not with NUMA mode, 122120a0286fSLiu Xiaofeng * if valid, return 0, else return -1 122220a0286fSLiu Xiaofeng */ 122320a0286fSLiu Xiaofeng static int 122420a0286fSLiu Xiaofeng check_socket_id(const unsigned int socket_id) 122520a0286fSLiu Xiaofeng { 122620a0286fSLiu Xiaofeng static int warning_once = 0; 122720a0286fSLiu Xiaofeng 1228c9cafcc8SShahaf Shuler if (new_socket_id(socket_id)) { 122920a0286fSLiu Xiaofeng if (!warning_once && numa_support) 123061a3b0e5SAndrew Rybchenko fprintf(stderr, 123161a3b0e5SAndrew Rybchenko "Warning: NUMA should be configured manually by using --port-numa-config and --ring-numa-config parameters along with --numa.\n"); 123220a0286fSLiu Xiaofeng warning_once = 1; 123320a0286fSLiu Xiaofeng return -1; 123420a0286fSLiu Xiaofeng } 123520a0286fSLiu Xiaofeng return 0; 123620a0286fSLiu Xiaofeng } 123720a0286fSLiu Xiaofeng 12383f7311baSWei Dai /* 12393f7311baSWei Dai * Get the allowed maximum number of RX queues. 12403f7311baSWei Dai * *pid return the port id which has minimal value of 12413f7311baSWei Dai * max_rx_queues in all ports. 12423f7311baSWei Dai */ 12433f7311baSWei Dai queueid_t 12443f7311baSWei Dai get_allowed_max_nb_rxq(portid_t *pid) 12453f7311baSWei Dai { 12469e6b36c3SDavid Marchand queueid_t allowed_max_rxq = RTE_MAX_QUEUES_PER_PORT; 12476f51deb9SIvan Ilchenko bool max_rxq_valid = false; 12483f7311baSWei Dai portid_t pi; 12493f7311baSWei Dai struct rte_eth_dev_info dev_info; 12503f7311baSWei Dai 12513f7311baSWei Dai RTE_ETH_FOREACH_DEV(pi) { 12526f51deb9SIvan Ilchenko if (eth_dev_info_get_print_err(pi, &dev_info) != 0) 12536f51deb9SIvan Ilchenko continue; 12546f51deb9SIvan Ilchenko 12556f51deb9SIvan Ilchenko max_rxq_valid = true; 12563f7311baSWei Dai if (dev_info.max_rx_queues < allowed_max_rxq) { 12573f7311baSWei Dai allowed_max_rxq = dev_info.max_rx_queues; 12583f7311baSWei Dai *pid = pi; 12593f7311baSWei Dai } 12603f7311baSWei Dai } 12616f51deb9SIvan Ilchenko return max_rxq_valid ? allowed_max_rxq : 0; 12623f7311baSWei Dai } 12633f7311baSWei Dai 12643f7311baSWei Dai /* 12653f7311baSWei Dai * Check input rxq is valid or not. 12663f7311baSWei Dai * If input rxq is not greater than any of maximum number 12673f7311baSWei Dai * of RX queues of all ports, it is valid. 12683f7311baSWei Dai * if valid, return 0, else return -1 12693f7311baSWei Dai */ 12703f7311baSWei Dai int 12713f7311baSWei Dai check_nb_rxq(queueid_t rxq) 12723f7311baSWei Dai { 12733f7311baSWei Dai queueid_t allowed_max_rxq; 12743f7311baSWei Dai portid_t pid = 0; 12753f7311baSWei Dai 12763f7311baSWei Dai allowed_max_rxq = get_allowed_max_nb_rxq(&pid); 12773f7311baSWei Dai if (rxq > allowed_max_rxq) { 127861a3b0e5SAndrew Rybchenko fprintf(stderr, 127961a3b0e5SAndrew Rybchenko "Fail: input rxq (%u) can't be greater than max_rx_queues (%u) of port %u\n", 128061a3b0e5SAndrew Rybchenko rxq, allowed_max_rxq, pid); 12813f7311baSWei Dai return -1; 12823f7311baSWei Dai } 12833f7311baSWei Dai return 0; 12843f7311baSWei Dai } 12853f7311baSWei Dai 128636db4f6cSWei Dai /* 128736db4f6cSWei Dai * Get the allowed maximum number of TX queues. 128836db4f6cSWei Dai * *pid return the port id which has minimal value of 128936db4f6cSWei Dai * max_tx_queues in all ports. 129036db4f6cSWei Dai */ 129136db4f6cSWei Dai queueid_t 129236db4f6cSWei Dai get_allowed_max_nb_txq(portid_t *pid) 129336db4f6cSWei Dai { 12949e6b36c3SDavid Marchand queueid_t allowed_max_txq = RTE_MAX_QUEUES_PER_PORT; 12956f51deb9SIvan Ilchenko bool max_txq_valid = false; 129636db4f6cSWei Dai portid_t pi; 129736db4f6cSWei Dai struct rte_eth_dev_info dev_info; 129836db4f6cSWei Dai 129936db4f6cSWei Dai RTE_ETH_FOREACH_DEV(pi) { 13006f51deb9SIvan Ilchenko if (eth_dev_info_get_print_err(pi, &dev_info) != 0) 13016f51deb9SIvan Ilchenko continue; 13026f51deb9SIvan Ilchenko 13036f51deb9SIvan Ilchenko max_txq_valid = true; 130436db4f6cSWei Dai if (dev_info.max_tx_queues < allowed_max_txq) { 130536db4f6cSWei Dai allowed_max_txq = dev_info.max_tx_queues; 130636db4f6cSWei Dai *pid = pi; 130736db4f6cSWei Dai } 130836db4f6cSWei Dai } 13096f51deb9SIvan Ilchenko return max_txq_valid ? allowed_max_txq : 0; 131036db4f6cSWei Dai } 131136db4f6cSWei Dai 131236db4f6cSWei Dai /* 131336db4f6cSWei Dai * Check input txq is valid or not. 131436db4f6cSWei Dai * If input txq is not greater than any of maximum number 131536db4f6cSWei Dai * of TX queues of all ports, it is valid. 131636db4f6cSWei Dai * if valid, return 0, else return -1 131736db4f6cSWei Dai */ 131836db4f6cSWei Dai int 131936db4f6cSWei Dai check_nb_txq(queueid_t txq) 132036db4f6cSWei Dai { 132136db4f6cSWei Dai queueid_t allowed_max_txq; 132236db4f6cSWei Dai portid_t pid = 0; 132336db4f6cSWei Dai 132436db4f6cSWei Dai allowed_max_txq = get_allowed_max_nb_txq(&pid); 132536db4f6cSWei Dai if (txq > allowed_max_txq) { 132661a3b0e5SAndrew Rybchenko fprintf(stderr, 132761a3b0e5SAndrew Rybchenko "Fail: input txq (%u) can't be greater than max_tx_queues (%u) of port %u\n", 132861a3b0e5SAndrew Rybchenko txq, allowed_max_txq, pid); 132936db4f6cSWei Dai return -1; 133036db4f6cSWei Dai } 133136db4f6cSWei Dai return 0; 133236db4f6cSWei Dai } 133336db4f6cSWei Dai 13341c69df45SOri Kam /* 133599e040d3SLijun Ou * Get the allowed maximum number of RXDs of every rx queue. 133699e040d3SLijun Ou * *pid return the port id which has minimal value of 133799e040d3SLijun Ou * max_rxd in all queues of all ports. 133899e040d3SLijun Ou */ 133999e040d3SLijun Ou static uint16_t 134099e040d3SLijun Ou get_allowed_max_nb_rxd(portid_t *pid) 134199e040d3SLijun Ou { 134299e040d3SLijun Ou uint16_t allowed_max_rxd = UINT16_MAX; 134399e040d3SLijun Ou portid_t pi; 134499e040d3SLijun Ou struct rte_eth_dev_info dev_info; 134599e040d3SLijun Ou 134699e040d3SLijun Ou RTE_ETH_FOREACH_DEV(pi) { 134799e040d3SLijun Ou if (eth_dev_info_get_print_err(pi, &dev_info) != 0) 134899e040d3SLijun Ou continue; 134999e040d3SLijun Ou 135099e040d3SLijun Ou if (dev_info.rx_desc_lim.nb_max < allowed_max_rxd) { 135199e040d3SLijun Ou allowed_max_rxd = dev_info.rx_desc_lim.nb_max; 135299e040d3SLijun Ou *pid = pi; 135399e040d3SLijun Ou } 135499e040d3SLijun Ou } 135599e040d3SLijun Ou return allowed_max_rxd; 135699e040d3SLijun Ou } 135799e040d3SLijun Ou 135899e040d3SLijun Ou /* 135999e040d3SLijun Ou * Get the allowed minimal number of RXDs of every rx queue. 136099e040d3SLijun Ou * *pid return the port id which has minimal value of 136199e040d3SLijun Ou * min_rxd in all queues of all ports. 136299e040d3SLijun Ou */ 136399e040d3SLijun Ou static uint16_t 136499e040d3SLijun Ou get_allowed_min_nb_rxd(portid_t *pid) 136599e040d3SLijun Ou { 136699e040d3SLijun Ou uint16_t allowed_min_rxd = 0; 136799e040d3SLijun Ou portid_t pi; 136899e040d3SLijun Ou struct rte_eth_dev_info dev_info; 136999e040d3SLijun Ou 137099e040d3SLijun Ou RTE_ETH_FOREACH_DEV(pi) { 137199e040d3SLijun Ou if (eth_dev_info_get_print_err(pi, &dev_info) != 0) 137299e040d3SLijun Ou continue; 137399e040d3SLijun Ou 137499e040d3SLijun Ou if (dev_info.rx_desc_lim.nb_min > allowed_min_rxd) { 137599e040d3SLijun Ou allowed_min_rxd = dev_info.rx_desc_lim.nb_min; 137699e040d3SLijun Ou *pid = pi; 137799e040d3SLijun Ou } 137899e040d3SLijun Ou } 137999e040d3SLijun Ou 138099e040d3SLijun Ou return allowed_min_rxd; 138199e040d3SLijun Ou } 138299e040d3SLijun Ou 138399e040d3SLijun Ou /* 138499e040d3SLijun Ou * Check input rxd is valid or not. 138599e040d3SLijun Ou * If input rxd is not greater than any of maximum number 138699e040d3SLijun Ou * of RXDs of every Rx queues and is not less than any of 138799e040d3SLijun Ou * minimal number of RXDs of every Rx queues, it is valid. 138899e040d3SLijun Ou * if valid, return 0, else return -1 138999e040d3SLijun Ou */ 139099e040d3SLijun Ou int 139199e040d3SLijun Ou check_nb_rxd(queueid_t rxd) 139299e040d3SLijun Ou { 139399e040d3SLijun Ou uint16_t allowed_max_rxd; 139499e040d3SLijun Ou uint16_t allowed_min_rxd; 139599e040d3SLijun Ou portid_t pid = 0; 139699e040d3SLijun Ou 139799e040d3SLijun Ou allowed_max_rxd = get_allowed_max_nb_rxd(&pid); 139899e040d3SLijun Ou if (rxd > allowed_max_rxd) { 139961a3b0e5SAndrew Rybchenko fprintf(stderr, 140061a3b0e5SAndrew Rybchenko "Fail: input rxd (%u) can't be greater than max_rxds (%u) of port %u\n", 140161a3b0e5SAndrew Rybchenko rxd, allowed_max_rxd, pid); 140299e040d3SLijun Ou return -1; 140399e040d3SLijun Ou } 140499e040d3SLijun Ou 140599e040d3SLijun Ou allowed_min_rxd = get_allowed_min_nb_rxd(&pid); 140699e040d3SLijun Ou if (rxd < allowed_min_rxd) { 140761a3b0e5SAndrew Rybchenko fprintf(stderr, 140861a3b0e5SAndrew Rybchenko "Fail: input rxd (%u) can't be less than min_rxds (%u) of port %u\n", 140961a3b0e5SAndrew Rybchenko rxd, allowed_min_rxd, pid); 141099e040d3SLijun Ou return -1; 141199e040d3SLijun Ou } 141299e040d3SLijun Ou 141399e040d3SLijun Ou return 0; 141499e040d3SLijun Ou } 141599e040d3SLijun Ou 141699e040d3SLijun Ou /* 141799e040d3SLijun Ou * Get the allowed maximum number of TXDs of every rx queues. 141899e040d3SLijun Ou * *pid return the port id which has minimal value of 141999e040d3SLijun Ou * max_txd in every tx queue. 142099e040d3SLijun Ou */ 142199e040d3SLijun Ou static uint16_t 142299e040d3SLijun Ou get_allowed_max_nb_txd(portid_t *pid) 142399e040d3SLijun Ou { 142499e040d3SLijun Ou uint16_t allowed_max_txd = UINT16_MAX; 142599e040d3SLijun Ou portid_t pi; 142699e040d3SLijun Ou struct rte_eth_dev_info dev_info; 142799e040d3SLijun Ou 142899e040d3SLijun Ou RTE_ETH_FOREACH_DEV(pi) { 142999e040d3SLijun Ou if (eth_dev_info_get_print_err(pi, &dev_info) != 0) 143099e040d3SLijun Ou continue; 143199e040d3SLijun Ou 143299e040d3SLijun Ou if (dev_info.tx_desc_lim.nb_max < allowed_max_txd) { 143399e040d3SLijun Ou allowed_max_txd = dev_info.tx_desc_lim.nb_max; 143499e040d3SLijun Ou *pid = pi; 143599e040d3SLijun Ou } 143699e040d3SLijun Ou } 143799e040d3SLijun Ou return allowed_max_txd; 143899e040d3SLijun Ou } 143999e040d3SLijun Ou 144099e040d3SLijun Ou /* 144199e040d3SLijun Ou * Get the allowed maximum number of TXDs of every tx queues. 144299e040d3SLijun Ou * *pid return the port id which has minimal value of 144399e040d3SLijun Ou * min_txd in every tx queue. 144499e040d3SLijun Ou */ 144599e040d3SLijun Ou static uint16_t 144699e040d3SLijun Ou get_allowed_min_nb_txd(portid_t *pid) 144799e040d3SLijun Ou { 144899e040d3SLijun Ou uint16_t allowed_min_txd = 0; 144999e040d3SLijun Ou portid_t pi; 145099e040d3SLijun Ou struct rte_eth_dev_info dev_info; 145199e040d3SLijun Ou 145299e040d3SLijun Ou RTE_ETH_FOREACH_DEV(pi) { 145399e040d3SLijun Ou if (eth_dev_info_get_print_err(pi, &dev_info) != 0) 145499e040d3SLijun Ou continue; 145599e040d3SLijun Ou 145699e040d3SLijun Ou if (dev_info.tx_desc_lim.nb_min > allowed_min_txd) { 145799e040d3SLijun Ou allowed_min_txd = dev_info.tx_desc_lim.nb_min; 145899e040d3SLijun Ou *pid = pi; 145999e040d3SLijun Ou } 146099e040d3SLijun Ou } 146199e040d3SLijun Ou 146299e040d3SLijun Ou return allowed_min_txd; 146399e040d3SLijun Ou } 146499e040d3SLijun Ou 146599e040d3SLijun Ou /* 146699e040d3SLijun Ou * Check input txd is valid or not. 146799e040d3SLijun Ou * If input txd is not greater than any of maximum number 146899e040d3SLijun Ou * of TXDs of every Rx queues, it is valid. 146999e040d3SLijun Ou * if valid, return 0, else return -1 147099e040d3SLijun Ou */ 147199e040d3SLijun Ou int 147299e040d3SLijun Ou check_nb_txd(queueid_t txd) 147399e040d3SLijun Ou { 147499e040d3SLijun Ou uint16_t allowed_max_txd; 147599e040d3SLijun Ou uint16_t allowed_min_txd; 147699e040d3SLijun Ou portid_t pid = 0; 147799e040d3SLijun Ou 147899e040d3SLijun Ou allowed_max_txd = get_allowed_max_nb_txd(&pid); 147999e040d3SLijun Ou if (txd > allowed_max_txd) { 148061a3b0e5SAndrew Rybchenko fprintf(stderr, 148161a3b0e5SAndrew Rybchenko "Fail: input txd (%u) can't be greater than max_txds (%u) of port %u\n", 148261a3b0e5SAndrew Rybchenko txd, allowed_max_txd, pid); 148399e040d3SLijun Ou return -1; 148499e040d3SLijun Ou } 148599e040d3SLijun Ou 148699e040d3SLijun Ou allowed_min_txd = get_allowed_min_nb_txd(&pid); 148799e040d3SLijun Ou if (txd < allowed_min_txd) { 148861a3b0e5SAndrew Rybchenko fprintf(stderr, 148961a3b0e5SAndrew Rybchenko "Fail: input txd (%u) can't be less than min_txds (%u) of port %u\n", 149061a3b0e5SAndrew Rybchenko txd, allowed_min_txd, pid); 149199e040d3SLijun Ou return -1; 149299e040d3SLijun Ou } 149399e040d3SLijun Ou return 0; 149499e040d3SLijun Ou } 149599e040d3SLijun Ou 149699e040d3SLijun Ou 149799e040d3SLijun Ou /* 14981c69df45SOri Kam * Get the allowed maximum number of hairpin queues. 14991c69df45SOri Kam * *pid return the port id which has minimal value of 15001c69df45SOri Kam * max_hairpin_queues in all ports. 15011c69df45SOri Kam */ 15021c69df45SOri Kam queueid_t 15031c69df45SOri Kam get_allowed_max_nb_hairpinq(portid_t *pid) 15041c69df45SOri Kam { 15059e6b36c3SDavid Marchand queueid_t allowed_max_hairpinq = RTE_MAX_QUEUES_PER_PORT; 15061c69df45SOri Kam portid_t pi; 15071c69df45SOri Kam struct rte_eth_hairpin_cap cap; 15081c69df45SOri Kam 15091c69df45SOri Kam RTE_ETH_FOREACH_DEV(pi) { 15101c69df45SOri Kam if (rte_eth_dev_hairpin_capability_get(pi, &cap) != 0) { 15111c69df45SOri Kam *pid = pi; 15121c69df45SOri Kam return 0; 15131c69df45SOri Kam } 15141c69df45SOri Kam if (cap.max_nb_queues < allowed_max_hairpinq) { 15151c69df45SOri Kam allowed_max_hairpinq = cap.max_nb_queues; 15161c69df45SOri Kam *pid = pi; 15171c69df45SOri Kam } 15181c69df45SOri Kam } 15191c69df45SOri Kam return allowed_max_hairpinq; 15201c69df45SOri Kam } 15211c69df45SOri Kam 15221c69df45SOri Kam /* 15231c69df45SOri Kam * Check input hairpin is valid or not. 15241c69df45SOri Kam * If input hairpin is not greater than any of maximum number 15251c69df45SOri Kam * of hairpin queues of all ports, it is valid. 15261c69df45SOri Kam * if valid, return 0, else return -1 15271c69df45SOri Kam */ 15281c69df45SOri Kam int 15291c69df45SOri Kam check_nb_hairpinq(queueid_t hairpinq) 15301c69df45SOri Kam { 15311c69df45SOri Kam queueid_t allowed_max_hairpinq; 15321c69df45SOri Kam portid_t pid = 0; 15331c69df45SOri Kam 15341c69df45SOri Kam allowed_max_hairpinq = get_allowed_max_nb_hairpinq(&pid); 15351c69df45SOri Kam if (hairpinq > allowed_max_hairpinq) { 153661a3b0e5SAndrew Rybchenko fprintf(stderr, 153761a3b0e5SAndrew Rybchenko "Fail: input hairpin (%u) can't be greater than max_hairpin_queues (%u) of port %u\n", 15381c69df45SOri Kam hairpinq, allowed_max_hairpinq, pid); 15391c69df45SOri Kam return -1; 15401c69df45SOri Kam } 15411c69df45SOri Kam return 0; 15421c69df45SOri Kam } 15431c69df45SOri Kam 1544*1bb4a528SFerruh Yigit static int 1545*1bb4a528SFerruh Yigit get_eth_overhead(struct rte_eth_dev_info *dev_info) 1546*1bb4a528SFerruh Yigit { 1547*1bb4a528SFerruh Yigit uint32_t eth_overhead; 1548*1bb4a528SFerruh Yigit 1549*1bb4a528SFerruh Yigit if (dev_info->max_mtu != UINT16_MAX && 1550*1bb4a528SFerruh Yigit dev_info->max_rx_pktlen > dev_info->max_mtu) 1551*1bb4a528SFerruh Yigit eth_overhead = dev_info->max_rx_pktlen - dev_info->max_mtu; 1552*1bb4a528SFerruh Yigit else 1553*1bb4a528SFerruh Yigit eth_overhead = RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN; 1554*1bb4a528SFerruh Yigit 1555*1bb4a528SFerruh Yigit return eth_overhead; 1556*1bb4a528SFerruh Yigit } 1557*1bb4a528SFerruh Yigit 1558af75078fSIntel static void 1559b6b8a1ebSViacheslav Ovsiienko init_config_port_offloads(portid_t pid, uint32_t socket_id) 1560b6b8a1ebSViacheslav Ovsiienko { 1561b6b8a1ebSViacheslav Ovsiienko struct rte_port *port = &ports[pid]; 1562b6b8a1ebSViacheslav Ovsiienko int ret; 1563b6b8a1ebSViacheslav Ovsiienko int i; 1564b6b8a1ebSViacheslav Ovsiienko 1565f6d8a6d3SIvan Malov eth_rx_metadata_negotiate_mp(pid); 15661179f05cSIvan Malov flow_pick_transfer_proxy_mp(pid); 1567f6d8a6d3SIvan Malov 1568b6b8a1ebSViacheslav Ovsiienko port->dev_conf.txmode = tx_mode; 1569b6b8a1ebSViacheslav Ovsiienko port->dev_conf.rxmode = rx_mode; 1570b6b8a1ebSViacheslav Ovsiienko 1571b6b8a1ebSViacheslav Ovsiienko ret = eth_dev_info_get_print_err(pid, &port->dev_info); 1572b6b8a1ebSViacheslav Ovsiienko if (ret != 0) 1573b6b8a1ebSViacheslav Ovsiienko rte_exit(EXIT_FAILURE, "rte_eth_dev_info_get() failed\n"); 1574b6b8a1ebSViacheslav Ovsiienko 1575*1bb4a528SFerruh Yigit ret = update_jumbo_frame_offload(pid, 0); 1576b6b8a1ebSViacheslav Ovsiienko if (ret != 0) 157761a3b0e5SAndrew Rybchenko fprintf(stderr, 157861a3b0e5SAndrew Rybchenko "Updating jumbo frame offload failed for port %u\n", 1579b6b8a1ebSViacheslav Ovsiienko pid); 1580b6b8a1ebSViacheslav Ovsiienko 1581b6b8a1ebSViacheslav Ovsiienko if (!(port->dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)) 1582b6b8a1ebSViacheslav Ovsiienko port->dev_conf.txmode.offloads &= 1583b6b8a1ebSViacheslav Ovsiienko ~DEV_TX_OFFLOAD_MBUF_FAST_FREE; 1584b6b8a1ebSViacheslav Ovsiienko 1585b6b8a1ebSViacheslav Ovsiienko /* Apply Rx offloads configuration */ 1586b6b8a1ebSViacheslav Ovsiienko for (i = 0; i < port->dev_info.max_rx_queues; i++) 1587b6b8a1ebSViacheslav Ovsiienko port->rx_conf[i].offloads = port->dev_conf.rxmode.offloads; 1588b6b8a1ebSViacheslav Ovsiienko /* Apply Tx offloads configuration */ 1589b6b8a1ebSViacheslav Ovsiienko for (i = 0; i < port->dev_info.max_tx_queues; i++) 1590b6b8a1ebSViacheslav Ovsiienko port->tx_conf[i].offloads = port->dev_conf.txmode.offloads; 1591b6b8a1ebSViacheslav Ovsiienko 1592b6b8a1ebSViacheslav Ovsiienko if (eth_link_speed) 1593b6b8a1ebSViacheslav Ovsiienko port->dev_conf.link_speeds = eth_link_speed; 1594b6b8a1ebSViacheslav Ovsiienko 1595*1bb4a528SFerruh Yigit if (max_rx_pkt_len) 1596*1bb4a528SFerruh Yigit port->dev_conf.rxmode.mtu = max_rx_pkt_len - 1597*1bb4a528SFerruh Yigit get_eth_overhead(&port->dev_info); 1598*1bb4a528SFerruh Yigit 1599b6b8a1ebSViacheslav Ovsiienko /* set flag to initialize port/queue */ 1600b6b8a1ebSViacheslav Ovsiienko port->need_reconfig = 1; 1601b6b8a1ebSViacheslav Ovsiienko port->need_reconfig_queues = 1; 1602b6b8a1ebSViacheslav Ovsiienko port->socket_id = socket_id; 1603b6b8a1ebSViacheslav Ovsiienko port->tx_metadata = 0; 1604b6b8a1ebSViacheslav Ovsiienko 1605b6b8a1ebSViacheslav Ovsiienko /* 1606b6b8a1ebSViacheslav Ovsiienko * Check for maximum number of segments per MTU. 1607b6b8a1ebSViacheslav Ovsiienko * Accordingly update the mbuf data size. 1608b6b8a1ebSViacheslav Ovsiienko */ 1609b6b8a1ebSViacheslav Ovsiienko if (port->dev_info.rx_desc_lim.nb_mtu_seg_max != UINT16_MAX && 1610b6b8a1ebSViacheslav Ovsiienko port->dev_info.rx_desc_lim.nb_mtu_seg_max != 0) { 1611*1bb4a528SFerruh Yigit uint32_t eth_overhead = get_eth_overhead(&port->dev_info); 1612*1bb4a528SFerruh Yigit uint16_t mtu; 1613b6b8a1ebSViacheslav Ovsiienko 1614*1bb4a528SFerruh Yigit if (rte_eth_dev_get_mtu(pid, &mtu) == 0) { 1615*1bb4a528SFerruh Yigit uint16_t data_size = (mtu + eth_overhead) / 1616*1bb4a528SFerruh Yigit port->dev_info.rx_desc_lim.nb_mtu_seg_max; 1617*1bb4a528SFerruh Yigit uint16_t buffer_size = data_size + RTE_PKTMBUF_HEADROOM; 1618*1bb4a528SFerruh Yigit 1619*1bb4a528SFerruh Yigit if (buffer_size > mbuf_data_size[0]) { 1620*1bb4a528SFerruh Yigit mbuf_data_size[0] = buffer_size; 1621b6b8a1ebSViacheslav Ovsiienko TESTPMD_LOG(WARNING, 1622b6b8a1ebSViacheslav Ovsiienko "Configured mbuf size of the first segment %hu\n", 1623b6b8a1ebSViacheslav Ovsiienko mbuf_data_size[0]); 1624b6b8a1ebSViacheslav Ovsiienko } 1625b6b8a1ebSViacheslav Ovsiienko } 1626b6b8a1ebSViacheslav Ovsiienko } 1627*1bb4a528SFerruh Yigit } 1628b6b8a1ebSViacheslav Ovsiienko 1629b6b8a1ebSViacheslav Ovsiienko static void 1630af75078fSIntel init_config(void) 1631af75078fSIntel { 1632ce8d5614SIntel portid_t pid; 1633af75078fSIntel struct rte_mempool *mbp; 1634af75078fSIntel unsigned int nb_mbuf_per_pool; 1635af75078fSIntel lcoreid_t lc_id; 1636b7091f1dSJiayu Hu struct rte_gro_param gro_param; 163752f38a20SJiayu Hu uint32_t gso_types; 1638487f9a59SYulong Pei 1639af75078fSIntel /* Configuration of logical cores. */ 1640af75078fSIntel fwd_lcores = rte_zmalloc("testpmd: fwd_lcores", 1641af75078fSIntel sizeof(struct fwd_lcore *) * nb_lcores, 1642fdf20fa7SSergio Gonzalez Monroy RTE_CACHE_LINE_SIZE); 1643af75078fSIntel if (fwd_lcores == NULL) { 1644ce8d5614SIntel rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) " 1645ce8d5614SIntel "failed\n", nb_lcores); 1646af75078fSIntel } 1647af75078fSIntel for (lc_id = 0; lc_id < nb_lcores; lc_id++) { 1648af75078fSIntel fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore", 1649af75078fSIntel sizeof(struct fwd_lcore), 1650fdf20fa7SSergio Gonzalez Monroy RTE_CACHE_LINE_SIZE); 1651af75078fSIntel if (fwd_lcores[lc_id] == NULL) { 1652ce8d5614SIntel rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) " 1653ce8d5614SIntel "failed\n"); 1654af75078fSIntel } 1655af75078fSIntel fwd_lcores[lc_id]->cpuid_idx = lc_id; 1656af75078fSIntel } 1657af75078fSIntel 16587d89b261SGaetan Rivet RTE_ETH_FOREACH_DEV(pid) { 1659b6b8a1ebSViacheslav Ovsiienko uint32_t socket_id; 16606f51deb9SIvan Ilchenko 1661b6ea6408SIntel if (numa_support) { 1662b6b8a1ebSViacheslav Ovsiienko socket_id = port_numa[pid]; 1663b6b8a1ebSViacheslav Ovsiienko if (port_numa[pid] == NUMA_NO_CONFIG) { 1664b6b8a1ebSViacheslav Ovsiienko socket_id = rte_eth_dev_socket_id(pid); 166520a0286fSLiu Xiaofeng 166629841336SPhil Yang /* 166729841336SPhil Yang * if socket_id is invalid, 166829841336SPhil Yang * set to the first available socket. 166929841336SPhil Yang */ 167020a0286fSLiu Xiaofeng if (check_socket_id(socket_id) < 0) 167129841336SPhil Yang socket_id = socket_ids[0]; 1672b6ea6408SIntel } 1673b6b8a1ebSViacheslav Ovsiienko } else { 1674b6b8a1ebSViacheslav Ovsiienko socket_id = (socket_num == UMA_NO_CONFIG) ? 1675b6b8a1ebSViacheslav Ovsiienko 0 : socket_num; 1676b6ea6408SIntel } 1677b6b8a1ebSViacheslav Ovsiienko /* Apply default TxRx configuration for all ports */ 1678b6b8a1ebSViacheslav Ovsiienko init_config_port_offloads(pid, socket_id); 1679ce8d5614SIntel } 16803ab64341SOlivier Matz /* 16813ab64341SOlivier Matz * Create pools of mbuf. 16823ab64341SOlivier Matz * If NUMA support is disabled, create a single pool of mbuf in 16833ab64341SOlivier Matz * socket 0 memory by default. 16843ab64341SOlivier Matz * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1. 16853ab64341SOlivier Matz * 16863ab64341SOlivier Matz * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and 16873ab64341SOlivier Matz * nb_txd can be configured at run time. 16883ab64341SOlivier Matz */ 16893ab64341SOlivier Matz if (param_total_num_mbufs) 16903ab64341SOlivier Matz nb_mbuf_per_pool = param_total_num_mbufs; 16913ab64341SOlivier Matz else { 16923ab64341SOlivier Matz nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX + 16933ab64341SOlivier Matz (nb_lcores * mb_mempool_cache) + 16943ab64341SOlivier Matz RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST; 16953ab64341SOlivier Matz nb_mbuf_per_pool *= RTE_MAX_ETHPORTS; 16963ab64341SOlivier Matz } 16973ab64341SOlivier Matz 1698b6ea6408SIntel if (numa_support) { 169926cbb419SViacheslav Ovsiienko uint8_t i, j; 1700ce8d5614SIntel 1701c9cafcc8SShahaf Shuler for (i = 0; i < num_sockets; i++) 170226cbb419SViacheslav Ovsiienko for (j = 0; j < mbuf_data_size_n; j++) 170326cbb419SViacheslav Ovsiienko mempools[i * MAX_SEGS_BUFFER_SPLIT + j] = 170426cbb419SViacheslav Ovsiienko mbuf_pool_create(mbuf_data_size[j], 1705401b744dSShahaf Shuler nb_mbuf_per_pool, 170626cbb419SViacheslav Ovsiienko socket_ids[i], j); 17073ab64341SOlivier Matz } else { 170826cbb419SViacheslav Ovsiienko uint8_t i; 170926cbb419SViacheslav Ovsiienko 171026cbb419SViacheslav Ovsiienko for (i = 0; i < mbuf_data_size_n; i++) 171126cbb419SViacheslav Ovsiienko mempools[i] = mbuf_pool_create 171226cbb419SViacheslav Ovsiienko (mbuf_data_size[i], 1713401b744dSShahaf Shuler nb_mbuf_per_pool, 171426cbb419SViacheslav Ovsiienko socket_num == UMA_NO_CONFIG ? 171526cbb419SViacheslav Ovsiienko 0 : socket_num, i); 17163ab64341SOlivier Matz } 1717b6ea6408SIntel 1718b6ea6408SIntel init_port_config(); 17195886ae07SAdrien Mazarguil 172052f38a20SJiayu Hu gso_types = DEV_TX_OFFLOAD_TCP_TSO | DEV_TX_OFFLOAD_VXLAN_TNL_TSO | 1721aaacd052SJiayu Hu DEV_TX_OFFLOAD_GRE_TNL_TSO | DEV_TX_OFFLOAD_UDP_TSO; 17225886ae07SAdrien Mazarguil /* 17235886ae07SAdrien Mazarguil * Records which Mbuf pool to use by each logical core, if needed. 17245886ae07SAdrien Mazarguil */ 17255886ae07SAdrien Mazarguil for (lc_id = 0; lc_id < nb_lcores; lc_id++) { 17268fd8bebcSAdrien Mazarguil mbp = mbuf_pool_find( 172726cbb419SViacheslav Ovsiienko rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]), 0); 17288fd8bebcSAdrien Mazarguil 17295886ae07SAdrien Mazarguil if (mbp == NULL) 173026cbb419SViacheslav Ovsiienko mbp = mbuf_pool_find(0, 0); 17315886ae07SAdrien Mazarguil fwd_lcores[lc_id]->mbp = mbp; 173252f38a20SJiayu Hu /* initialize GSO context */ 173352f38a20SJiayu Hu fwd_lcores[lc_id]->gso_ctx.direct_pool = mbp; 173452f38a20SJiayu Hu fwd_lcores[lc_id]->gso_ctx.indirect_pool = mbp; 173552f38a20SJiayu Hu fwd_lcores[lc_id]->gso_ctx.gso_types = gso_types; 173635b2d13fSOlivier Matz fwd_lcores[lc_id]->gso_ctx.gso_size = RTE_ETHER_MAX_LEN - 173735b2d13fSOlivier Matz RTE_ETHER_CRC_LEN; 173852f38a20SJiayu Hu fwd_lcores[lc_id]->gso_ctx.flag = 0; 17395886ae07SAdrien Mazarguil } 17405886ae07SAdrien Mazarguil 17410c0db76fSBernard Iremonger fwd_config_setup(); 1742b7091f1dSJiayu Hu 1743b7091f1dSJiayu Hu /* create a gro context for each lcore */ 1744b7091f1dSJiayu Hu gro_param.gro_types = RTE_GRO_TCP_IPV4; 1745b7091f1dSJiayu Hu gro_param.max_flow_num = GRO_MAX_FLUSH_CYCLES; 1746b7091f1dSJiayu Hu gro_param.max_item_per_flow = MAX_PKT_BURST; 1747b7091f1dSJiayu Hu for (lc_id = 0; lc_id < nb_lcores; lc_id++) { 1748b7091f1dSJiayu Hu gro_param.socket_id = rte_lcore_to_socket_id( 1749b7091f1dSJiayu Hu fwd_lcores_cpuids[lc_id]); 1750b7091f1dSJiayu Hu fwd_lcores[lc_id]->gro_ctx = rte_gro_ctx_create(&gro_param); 1751b7091f1dSJiayu Hu if (fwd_lcores[lc_id]->gro_ctx == NULL) { 1752b7091f1dSJiayu Hu rte_exit(EXIT_FAILURE, 1753b7091f1dSJiayu Hu "rte_gro_ctx_create() failed\n"); 1754b7091f1dSJiayu Hu } 1755b7091f1dSJiayu Hu } 1756ce8d5614SIntel } 1757ce8d5614SIntel 17582950a769SDeclan Doherty 17592950a769SDeclan Doherty void 1760a21d5a4bSDeclan Doherty reconfig(portid_t new_port_id, unsigned socket_id) 17612950a769SDeclan Doherty { 17622950a769SDeclan Doherty /* Reconfiguration of Ethernet ports. */ 1763b6b8a1ebSViacheslav Ovsiienko init_config_port_offloads(new_port_id, socket_id); 17642950a769SDeclan Doherty init_port_config(); 17652950a769SDeclan Doherty } 17662950a769SDeclan Doherty 17672950a769SDeclan Doherty 1768ce8d5614SIntel int 1769ce8d5614SIntel init_fwd_streams(void) 1770ce8d5614SIntel { 1771ce8d5614SIntel portid_t pid; 1772ce8d5614SIntel struct rte_port *port; 1773ce8d5614SIntel streamid_t sm_id, nb_fwd_streams_new; 17745a8fb55cSReshma Pattan queueid_t q; 1775ce8d5614SIntel 1776ce8d5614SIntel /* set socket id according to numa or not */ 17777d89b261SGaetan Rivet RTE_ETH_FOREACH_DEV(pid) { 1778ce8d5614SIntel port = &ports[pid]; 1779ce8d5614SIntel if (nb_rxq > port->dev_info.max_rx_queues) { 178061a3b0e5SAndrew Rybchenko fprintf(stderr, 178161a3b0e5SAndrew Rybchenko "Fail: nb_rxq(%d) is greater than max_rx_queues(%d)\n", 178261a3b0e5SAndrew Rybchenko nb_rxq, port->dev_info.max_rx_queues); 1783ce8d5614SIntel return -1; 1784ce8d5614SIntel } 1785ce8d5614SIntel if (nb_txq > port->dev_info.max_tx_queues) { 178661a3b0e5SAndrew Rybchenko fprintf(stderr, 178761a3b0e5SAndrew Rybchenko "Fail: nb_txq(%d) is greater than max_tx_queues(%d)\n", 178861a3b0e5SAndrew Rybchenko nb_txq, port->dev_info.max_tx_queues); 1789ce8d5614SIntel return -1; 1790ce8d5614SIntel } 179120a0286fSLiu Xiaofeng if (numa_support) { 179220a0286fSLiu Xiaofeng if (port_numa[pid] != NUMA_NO_CONFIG) 179320a0286fSLiu Xiaofeng port->socket_id = port_numa[pid]; 179420a0286fSLiu Xiaofeng else { 1795b6ea6408SIntel port->socket_id = rte_eth_dev_socket_id(pid); 179620a0286fSLiu Xiaofeng 179729841336SPhil Yang /* 179829841336SPhil Yang * if socket_id is invalid, 179929841336SPhil Yang * set to the first available socket. 180029841336SPhil Yang */ 180120a0286fSLiu Xiaofeng if (check_socket_id(port->socket_id) < 0) 180229841336SPhil Yang port->socket_id = socket_ids[0]; 180320a0286fSLiu Xiaofeng } 180420a0286fSLiu Xiaofeng } 1805b6ea6408SIntel else { 1806b6ea6408SIntel if (socket_num == UMA_NO_CONFIG) 1807af75078fSIntel port->socket_id = 0; 1808b6ea6408SIntel else 1809b6ea6408SIntel port->socket_id = socket_num; 1810b6ea6408SIntel } 1811af75078fSIntel } 1812af75078fSIntel 18135a8fb55cSReshma Pattan q = RTE_MAX(nb_rxq, nb_txq); 18145a8fb55cSReshma Pattan if (q == 0) { 181561a3b0e5SAndrew Rybchenko fprintf(stderr, 181661a3b0e5SAndrew Rybchenko "Fail: Cannot allocate fwd streams as number of queues is 0\n"); 18175a8fb55cSReshma Pattan return -1; 18185a8fb55cSReshma Pattan } 18195a8fb55cSReshma Pattan nb_fwd_streams_new = (streamid_t)(nb_ports * q); 1820ce8d5614SIntel if (nb_fwd_streams_new == nb_fwd_streams) 1821ce8d5614SIntel return 0; 1822ce8d5614SIntel /* clear the old */ 1823ce8d5614SIntel if (fwd_streams != NULL) { 1824ce8d5614SIntel for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) { 1825ce8d5614SIntel if (fwd_streams[sm_id] == NULL) 1826ce8d5614SIntel continue; 1827ce8d5614SIntel rte_free(fwd_streams[sm_id]); 1828ce8d5614SIntel fwd_streams[sm_id] = NULL; 1829af75078fSIntel } 1830ce8d5614SIntel rte_free(fwd_streams); 1831ce8d5614SIntel fwd_streams = NULL; 1832ce8d5614SIntel } 1833ce8d5614SIntel 1834ce8d5614SIntel /* init new */ 1835ce8d5614SIntel nb_fwd_streams = nb_fwd_streams_new; 18361f84c469SMatan Azrad if (nb_fwd_streams) { 1837ce8d5614SIntel fwd_streams = rte_zmalloc("testpmd: fwd_streams", 18381f84c469SMatan Azrad sizeof(struct fwd_stream *) * nb_fwd_streams, 18391f84c469SMatan Azrad RTE_CACHE_LINE_SIZE); 1840ce8d5614SIntel if (fwd_streams == NULL) 18411f84c469SMatan Azrad rte_exit(EXIT_FAILURE, "rte_zmalloc(%d" 18421f84c469SMatan Azrad " (struct fwd_stream *)) failed\n", 18431f84c469SMatan Azrad nb_fwd_streams); 1844ce8d5614SIntel 1845af75078fSIntel for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) { 18461f84c469SMatan Azrad fwd_streams[sm_id] = rte_zmalloc("testpmd:" 18471f84c469SMatan Azrad " struct fwd_stream", sizeof(struct fwd_stream), 18481f84c469SMatan Azrad RTE_CACHE_LINE_SIZE); 1849ce8d5614SIntel if (fwd_streams[sm_id] == NULL) 18501f84c469SMatan Azrad rte_exit(EXIT_FAILURE, "rte_zmalloc" 18511f84c469SMatan Azrad "(struct fwd_stream) failed\n"); 18521f84c469SMatan Azrad } 1853af75078fSIntel } 1854ce8d5614SIntel 1855ce8d5614SIntel return 0; 1856af75078fSIntel } 1857af75078fSIntel 1858af75078fSIntel static void 1859af75078fSIntel pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs) 1860af75078fSIntel { 18617569b8c1SHonnappa Nagarahalli uint64_t total_burst, sburst; 186285de481aSHonnappa Nagarahalli uint64_t nb_burst; 18637569b8c1SHonnappa Nagarahalli uint64_t burst_stats[4]; 18647569b8c1SHonnappa Nagarahalli uint16_t pktnb_stats[4]; 1865af75078fSIntel uint16_t nb_pkt; 18667569b8c1SHonnappa Nagarahalli int burst_percent[4], sburstp; 18677569b8c1SHonnappa Nagarahalli int i; 1868af75078fSIntel 1869af75078fSIntel /* 1870af75078fSIntel * First compute the total number of packet bursts and the 1871af75078fSIntel * two highest numbers of bursts of the same number of packets. 1872af75078fSIntel */ 18737569b8c1SHonnappa Nagarahalli memset(&burst_stats, 0x0, sizeof(burst_stats)); 18747569b8c1SHonnappa Nagarahalli memset(&pktnb_stats, 0x0, sizeof(pktnb_stats)); 18757569b8c1SHonnappa Nagarahalli 18767569b8c1SHonnappa Nagarahalli /* Show stats for 0 burst size always */ 18777569b8c1SHonnappa Nagarahalli total_burst = pbs->pkt_burst_spread[0]; 18787569b8c1SHonnappa Nagarahalli burst_stats[0] = pbs->pkt_burst_spread[0]; 18797569b8c1SHonnappa Nagarahalli pktnb_stats[0] = 0; 18807569b8c1SHonnappa Nagarahalli 18817569b8c1SHonnappa Nagarahalli /* Find the next 2 burst sizes with highest occurrences. */ 18827569b8c1SHonnappa Nagarahalli for (nb_pkt = 1; nb_pkt < MAX_PKT_BURST; nb_pkt++) { 1883af75078fSIntel nb_burst = pbs->pkt_burst_spread[nb_pkt]; 18847569b8c1SHonnappa Nagarahalli 1885af75078fSIntel if (nb_burst == 0) 1886af75078fSIntel continue; 18877569b8c1SHonnappa Nagarahalli 1888af75078fSIntel total_burst += nb_burst; 18897569b8c1SHonnappa Nagarahalli 18907569b8c1SHonnappa Nagarahalli if (nb_burst > burst_stats[1]) { 18917569b8c1SHonnappa Nagarahalli burst_stats[2] = burst_stats[1]; 18927569b8c1SHonnappa Nagarahalli pktnb_stats[2] = pktnb_stats[1]; 1893fe613657SDaniel Shelepov burst_stats[1] = nb_burst; 1894fe613657SDaniel Shelepov pktnb_stats[1] = nb_pkt; 18957569b8c1SHonnappa Nagarahalli } else if (nb_burst > burst_stats[2]) { 18967569b8c1SHonnappa Nagarahalli burst_stats[2] = nb_burst; 18977569b8c1SHonnappa Nagarahalli pktnb_stats[2] = nb_pkt; 1898af75078fSIntel } 1899af75078fSIntel } 1900af75078fSIntel if (total_burst == 0) 1901af75078fSIntel return; 19027569b8c1SHonnappa Nagarahalli 19037569b8c1SHonnappa Nagarahalli printf(" %s-bursts : %"PRIu64" [", rx_tx, total_burst); 19047569b8c1SHonnappa Nagarahalli for (i = 0, sburst = 0, sburstp = 0; i < 4; i++) { 19057569b8c1SHonnappa Nagarahalli if (i == 3) { 19067569b8c1SHonnappa Nagarahalli printf("%d%% of other]\n", 100 - sburstp); 1907af75078fSIntel return; 1908af75078fSIntel } 19097569b8c1SHonnappa Nagarahalli 19107569b8c1SHonnappa Nagarahalli sburst += burst_stats[i]; 19117569b8c1SHonnappa Nagarahalli if (sburst == total_burst) { 19127569b8c1SHonnappa Nagarahalli printf("%d%% of %d pkts]\n", 19137569b8c1SHonnappa Nagarahalli 100 - sburstp, (int) pktnb_stats[i]); 1914af75078fSIntel return; 1915af75078fSIntel } 19167569b8c1SHonnappa Nagarahalli 19177569b8c1SHonnappa Nagarahalli burst_percent[i] = 19187569b8c1SHonnappa Nagarahalli (double)burst_stats[i] / total_burst * 100; 19197569b8c1SHonnappa Nagarahalli printf("%d%% of %d pkts + ", 19207569b8c1SHonnappa Nagarahalli burst_percent[i], (int) pktnb_stats[i]); 19217569b8c1SHonnappa Nagarahalli sburstp += burst_percent[i]; 1922af75078fSIntel } 1923af75078fSIntel } 1924af75078fSIntel 1925af75078fSIntel static void 1926af75078fSIntel fwd_stream_stats_display(streamid_t stream_id) 1927af75078fSIntel { 1928af75078fSIntel struct fwd_stream *fs; 1929af75078fSIntel static const char *fwd_top_stats_border = "-------"; 1930af75078fSIntel 1931af75078fSIntel fs = fwd_streams[stream_id]; 1932af75078fSIntel if ((fs->rx_packets == 0) && (fs->tx_packets == 0) && 1933af75078fSIntel (fs->fwd_dropped == 0)) 1934af75078fSIntel return; 1935af75078fSIntel printf("\n %s Forward Stats for RX Port=%2d/Queue=%2d -> " 1936af75078fSIntel "TX Port=%2d/Queue=%2d %s\n", 1937af75078fSIntel fwd_top_stats_border, fs->rx_port, fs->rx_queue, 1938af75078fSIntel fs->tx_port, fs->tx_queue, fwd_top_stats_border); 1939c185d42cSDavid Marchand printf(" RX-packets: %-14"PRIu64" TX-packets: %-14"PRIu64 1940c185d42cSDavid Marchand " TX-dropped: %-14"PRIu64, 1941af75078fSIntel fs->rx_packets, fs->tx_packets, fs->fwd_dropped); 1942af75078fSIntel 1943af75078fSIntel /* if checksum mode */ 1944af75078fSIntel if (cur_fwd_eng == &csum_fwd_engine) { 1945c185d42cSDavid Marchand printf(" RX- bad IP checksum: %-14"PRIu64 1946c185d42cSDavid Marchand " Rx- bad L4 checksum: %-14"PRIu64 1947c185d42cSDavid Marchand " Rx- bad outer L4 checksum: %-14"PRIu64"\n", 194858d475b7SJerin Jacob fs->rx_bad_ip_csum, fs->rx_bad_l4_csum, 194958d475b7SJerin Jacob fs->rx_bad_outer_l4_csum); 1950d139cf23SLance Richardson printf(" RX- bad outer IP checksum: %-14"PRIu64"\n", 1951d139cf23SLance Richardson fs->rx_bad_outer_ip_csum); 195294d65546SDavid Marchand } else { 195394d65546SDavid Marchand printf("\n"); 1954af75078fSIntel } 1955af75078fSIntel 19560e4b1963SDharmik Thakkar if (record_burst_stats) { 1957af75078fSIntel pkt_burst_stats_display("RX", &fs->rx_burst_stats); 1958af75078fSIntel pkt_burst_stats_display("TX", &fs->tx_burst_stats); 19590e4b1963SDharmik Thakkar } 1960af75078fSIntel } 1961af75078fSIntel 196253324971SDavid Marchand void 196353324971SDavid Marchand fwd_stats_display(void) 196453324971SDavid Marchand { 196553324971SDavid Marchand static const char *fwd_stats_border = "----------------------"; 196653324971SDavid Marchand static const char *acc_stats_border = "+++++++++++++++"; 196753324971SDavid Marchand struct { 196853324971SDavid Marchand struct fwd_stream *rx_stream; 196953324971SDavid Marchand struct fwd_stream *tx_stream; 197053324971SDavid Marchand uint64_t tx_dropped; 197153324971SDavid Marchand uint64_t rx_bad_ip_csum; 197253324971SDavid Marchand uint64_t rx_bad_l4_csum; 197353324971SDavid Marchand uint64_t rx_bad_outer_l4_csum; 1974d139cf23SLance Richardson uint64_t rx_bad_outer_ip_csum; 197553324971SDavid Marchand } ports_stats[RTE_MAX_ETHPORTS]; 197653324971SDavid Marchand uint64_t total_rx_dropped = 0; 197753324971SDavid Marchand uint64_t total_tx_dropped = 0; 197853324971SDavid Marchand uint64_t total_rx_nombuf = 0; 197953324971SDavid Marchand struct rte_eth_stats stats; 198053324971SDavid Marchand uint64_t fwd_cycles = 0; 198153324971SDavid Marchand uint64_t total_recv = 0; 198253324971SDavid Marchand uint64_t total_xmit = 0; 198353324971SDavid Marchand struct rte_port *port; 198453324971SDavid Marchand streamid_t sm_id; 198553324971SDavid Marchand portid_t pt_id; 198653324971SDavid Marchand int i; 198753324971SDavid Marchand 198853324971SDavid Marchand memset(ports_stats, 0, sizeof(ports_stats)); 198953324971SDavid Marchand 199053324971SDavid Marchand for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) { 199153324971SDavid Marchand struct fwd_stream *fs = fwd_streams[sm_id]; 199253324971SDavid Marchand 199353324971SDavid Marchand if (cur_fwd_config.nb_fwd_streams > 199453324971SDavid Marchand cur_fwd_config.nb_fwd_ports) { 199553324971SDavid Marchand fwd_stream_stats_display(sm_id); 199653324971SDavid Marchand } else { 199753324971SDavid Marchand ports_stats[fs->tx_port].tx_stream = fs; 199853324971SDavid Marchand ports_stats[fs->rx_port].rx_stream = fs; 199953324971SDavid Marchand } 200053324971SDavid Marchand 200153324971SDavid Marchand ports_stats[fs->tx_port].tx_dropped += fs->fwd_dropped; 200253324971SDavid Marchand 200353324971SDavid Marchand ports_stats[fs->rx_port].rx_bad_ip_csum += fs->rx_bad_ip_csum; 200453324971SDavid Marchand ports_stats[fs->rx_port].rx_bad_l4_csum += fs->rx_bad_l4_csum; 200553324971SDavid Marchand ports_stats[fs->rx_port].rx_bad_outer_l4_csum += 200653324971SDavid Marchand fs->rx_bad_outer_l4_csum; 2007d139cf23SLance Richardson ports_stats[fs->rx_port].rx_bad_outer_ip_csum += 2008d139cf23SLance Richardson fs->rx_bad_outer_ip_csum; 200953324971SDavid Marchand 2010bc700b67SDharmik Thakkar if (record_core_cycles) 201153324971SDavid Marchand fwd_cycles += fs->core_cycles; 201253324971SDavid Marchand } 201353324971SDavid Marchand for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) { 201453324971SDavid Marchand pt_id = fwd_ports_ids[i]; 201553324971SDavid Marchand port = &ports[pt_id]; 201653324971SDavid Marchand 201753324971SDavid Marchand rte_eth_stats_get(pt_id, &stats); 201853324971SDavid Marchand stats.ipackets -= port->stats.ipackets; 201953324971SDavid Marchand stats.opackets -= port->stats.opackets; 202053324971SDavid Marchand stats.ibytes -= port->stats.ibytes; 202153324971SDavid Marchand stats.obytes -= port->stats.obytes; 202253324971SDavid Marchand stats.imissed -= port->stats.imissed; 202353324971SDavid Marchand stats.oerrors -= port->stats.oerrors; 202453324971SDavid Marchand stats.rx_nombuf -= port->stats.rx_nombuf; 202553324971SDavid Marchand 202653324971SDavid Marchand total_recv += stats.ipackets; 202753324971SDavid Marchand total_xmit += stats.opackets; 202853324971SDavid Marchand total_rx_dropped += stats.imissed; 202953324971SDavid Marchand total_tx_dropped += ports_stats[pt_id].tx_dropped; 203053324971SDavid Marchand total_tx_dropped += stats.oerrors; 203153324971SDavid Marchand total_rx_nombuf += stats.rx_nombuf; 203253324971SDavid Marchand 203353324971SDavid Marchand printf("\n %s Forward statistics for port %-2d %s\n", 203453324971SDavid Marchand fwd_stats_border, pt_id, fwd_stats_border); 203553324971SDavid Marchand 203608dcd187SHuisong Li printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64 203708dcd187SHuisong Li "RX-total: %-"PRIu64"\n", stats.ipackets, stats.imissed, 203853324971SDavid Marchand stats.ipackets + stats.imissed); 203953324971SDavid Marchand 2040d139cf23SLance Richardson if (cur_fwd_eng == &csum_fwd_engine) { 204153324971SDavid Marchand printf(" Bad-ipcsum: %-14"PRIu64 204253324971SDavid Marchand " Bad-l4csum: %-14"PRIu64 204353324971SDavid Marchand "Bad-outer-l4csum: %-14"PRIu64"\n", 204453324971SDavid Marchand ports_stats[pt_id].rx_bad_ip_csum, 204553324971SDavid Marchand ports_stats[pt_id].rx_bad_l4_csum, 204653324971SDavid Marchand ports_stats[pt_id].rx_bad_outer_l4_csum); 2047d139cf23SLance Richardson printf(" Bad-outer-ipcsum: %-14"PRIu64"\n", 2048d139cf23SLance Richardson ports_stats[pt_id].rx_bad_outer_ip_csum); 2049d139cf23SLance Richardson } 205053324971SDavid Marchand if (stats.ierrors + stats.rx_nombuf > 0) { 205108dcd187SHuisong Li printf(" RX-error: %-"PRIu64"\n", stats.ierrors); 205208dcd187SHuisong Li printf(" RX-nombufs: %-14"PRIu64"\n", stats.rx_nombuf); 205353324971SDavid Marchand } 205453324971SDavid Marchand 205508dcd187SHuisong Li printf(" TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64 205653324971SDavid Marchand "TX-total: %-"PRIu64"\n", 205753324971SDavid Marchand stats.opackets, ports_stats[pt_id].tx_dropped, 205853324971SDavid Marchand stats.opackets + ports_stats[pt_id].tx_dropped); 205953324971SDavid Marchand 20600e4b1963SDharmik Thakkar if (record_burst_stats) { 206153324971SDavid Marchand if (ports_stats[pt_id].rx_stream) 206253324971SDavid Marchand pkt_burst_stats_display("RX", 206353324971SDavid Marchand &ports_stats[pt_id].rx_stream->rx_burst_stats); 206453324971SDavid Marchand if (ports_stats[pt_id].tx_stream) 206553324971SDavid Marchand pkt_burst_stats_display("TX", 206653324971SDavid Marchand &ports_stats[pt_id].tx_stream->tx_burst_stats); 20670e4b1963SDharmik Thakkar } 206853324971SDavid Marchand 206953324971SDavid Marchand printf(" %s--------------------------------%s\n", 207053324971SDavid Marchand fwd_stats_border, fwd_stats_border); 207153324971SDavid Marchand } 207253324971SDavid Marchand 207353324971SDavid Marchand printf("\n %s Accumulated forward statistics for all ports" 207453324971SDavid Marchand "%s\n", 207553324971SDavid Marchand acc_stats_border, acc_stats_border); 207653324971SDavid Marchand printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: " 207753324971SDavid Marchand "%-"PRIu64"\n" 207853324971SDavid Marchand " TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: " 207953324971SDavid Marchand "%-"PRIu64"\n", 208053324971SDavid Marchand total_recv, total_rx_dropped, total_recv + total_rx_dropped, 208153324971SDavid Marchand total_xmit, total_tx_dropped, total_xmit + total_tx_dropped); 208253324971SDavid Marchand if (total_rx_nombuf > 0) 208353324971SDavid Marchand printf(" RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf); 208453324971SDavid Marchand printf(" %s++++++++++++++++++++++++++++++++++++++++++++++" 208553324971SDavid Marchand "%s\n", 208653324971SDavid Marchand acc_stats_border, acc_stats_border); 2087bc700b67SDharmik Thakkar if (record_core_cycles) { 20884c0497b1SDharmik Thakkar #define CYC_PER_MHZ 1E6 20893a164e00SPhil Yang if (total_recv > 0 || total_xmit > 0) { 20903a164e00SPhil Yang uint64_t total_pkts = 0; 20913a164e00SPhil Yang if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 || 20923a164e00SPhil Yang strcmp(cur_fwd_eng->fwd_mode_name, "flowgen") == 0) 20933a164e00SPhil Yang total_pkts = total_xmit; 20943a164e00SPhil Yang else 20953a164e00SPhil Yang total_pkts = total_recv; 20963a164e00SPhil Yang 20971920832aSDharmik Thakkar printf("\n CPU cycles/packet=%.2F (total cycles=" 20983a164e00SPhil Yang "%"PRIu64" / total %s packets=%"PRIu64") at %"PRIu64 20994c0497b1SDharmik Thakkar " MHz Clock\n", 21003a164e00SPhil Yang (double) fwd_cycles / total_pkts, 21013a164e00SPhil Yang fwd_cycles, cur_fwd_eng->fwd_mode_name, total_pkts, 21024c0497b1SDharmik Thakkar (uint64_t)(rte_get_tsc_hz() / CYC_PER_MHZ)); 21033a164e00SPhil Yang } 2104bc700b67SDharmik Thakkar } 210553324971SDavid Marchand } 210653324971SDavid Marchand 210753324971SDavid Marchand void 210853324971SDavid Marchand fwd_stats_reset(void) 210953324971SDavid Marchand { 211053324971SDavid Marchand streamid_t sm_id; 211153324971SDavid Marchand portid_t pt_id; 211253324971SDavid Marchand int i; 211353324971SDavid Marchand 211453324971SDavid Marchand for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) { 211553324971SDavid Marchand pt_id = fwd_ports_ids[i]; 211653324971SDavid Marchand rte_eth_stats_get(pt_id, &ports[pt_id].stats); 211753324971SDavid Marchand } 211853324971SDavid Marchand for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) { 211953324971SDavid Marchand struct fwd_stream *fs = fwd_streams[sm_id]; 212053324971SDavid Marchand 212153324971SDavid Marchand fs->rx_packets = 0; 212253324971SDavid Marchand fs->tx_packets = 0; 212353324971SDavid Marchand fs->fwd_dropped = 0; 212453324971SDavid Marchand fs->rx_bad_ip_csum = 0; 212553324971SDavid Marchand fs->rx_bad_l4_csum = 0; 212653324971SDavid Marchand fs->rx_bad_outer_l4_csum = 0; 2127d139cf23SLance Richardson fs->rx_bad_outer_ip_csum = 0; 212853324971SDavid Marchand 212953324971SDavid Marchand memset(&fs->rx_burst_stats, 0, sizeof(fs->rx_burst_stats)); 213053324971SDavid Marchand memset(&fs->tx_burst_stats, 0, sizeof(fs->tx_burst_stats)); 213153324971SDavid Marchand fs->core_cycles = 0; 213253324971SDavid Marchand } 213353324971SDavid Marchand } 213453324971SDavid Marchand 2135af75078fSIntel static void 21367741e4cfSIntel flush_fwd_rx_queues(void) 2137af75078fSIntel { 2138af75078fSIntel struct rte_mbuf *pkts_burst[MAX_PKT_BURST]; 2139af75078fSIntel portid_t rxp; 21407741e4cfSIntel portid_t port_id; 2141af75078fSIntel queueid_t rxq; 2142af75078fSIntel uint16_t nb_rx; 2143af75078fSIntel uint16_t i; 2144af75078fSIntel uint8_t j; 2145f487715fSReshma Pattan uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0; 2146594302c7SJames Poole uint64_t timer_period; 2147f487715fSReshma Pattan 2148a550baf2SMin Hu (Connor) if (num_procs > 1) { 2149a550baf2SMin Hu (Connor) printf("multi-process not support for flushing fwd Rx queues, skip the below lines and return.\n"); 2150a550baf2SMin Hu (Connor) return; 2151a550baf2SMin Hu (Connor) } 2152a550baf2SMin Hu (Connor) 2153f487715fSReshma Pattan /* convert to number of cycles */ 2154594302c7SJames Poole timer_period = rte_get_timer_hz(); /* 1 second timeout */ 2155af75078fSIntel 2156af75078fSIntel for (j = 0; j < 2; j++) { 21577741e4cfSIntel for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) { 2158af75078fSIntel for (rxq = 0; rxq < nb_rxq; rxq++) { 21597741e4cfSIntel port_id = fwd_ports_ids[rxp]; 2160f487715fSReshma Pattan /** 2161f487715fSReshma Pattan * testpmd can stuck in the below do while loop 2162f487715fSReshma Pattan * if rte_eth_rx_burst() always returns nonzero 2163f487715fSReshma Pattan * packets. So timer is added to exit this loop 2164f487715fSReshma Pattan * after 1sec timer expiry. 2165f487715fSReshma Pattan */ 2166f487715fSReshma Pattan prev_tsc = rte_rdtsc(); 2167af75078fSIntel do { 21687741e4cfSIntel nb_rx = rte_eth_rx_burst(port_id, rxq, 2169013af9b6SIntel pkts_burst, MAX_PKT_BURST); 2170af75078fSIntel for (i = 0; i < nb_rx; i++) 2171af75078fSIntel rte_pktmbuf_free(pkts_burst[i]); 2172f487715fSReshma Pattan 2173f487715fSReshma Pattan cur_tsc = rte_rdtsc(); 2174f487715fSReshma Pattan diff_tsc = cur_tsc - prev_tsc; 2175f487715fSReshma Pattan timer_tsc += diff_tsc; 2176f487715fSReshma Pattan } while ((nb_rx > 0) && 2177f487715fSReshma Pattan (timer_tsc < timer_period)); 2178f487715fSReshma Pattan timer_tsc = 0; 2179af75078fSIntel } 2180af75078fSIntel } 2181af75078fSIntel rte_delay_ms(10); /* wait 10 milli-seconds before retrying */ 2182af75078fSIntel } 2183af75078fSIntel } 2184af75078fSIntel 2185af75078fSIntel static void 2186af75078fSIntel run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd) 2187af75078fSIntel { 2188af75078fSIntel struct fwd_stream **fsm; 2189af75078fSIntel streamid_t nb_fs; 2190af75078fSIntel streamid_t sm_id; 2191a8d0d473SBruce Richardson #ifdef RTE_LIB_BITRATESTATS 21927e4441c8SRemy Horton uint64_t tics_per_1sec; 21937e4441c8SRemy Horton uint64_t tics_datum; 21947e4441c8SRemy Horton uint64_t tics_current; 21954918a357SXiaoyun Li uint16_t i, cnt_ports; 2196af75078fSIntel 21974918a357SXiaoyun Li cnt_ports = nb_ports; 21987e4441c8SRemy Horton tics_datum = rte_rdtsc(); 21997e4441c8SRemy Horton tics_per_1sec = rte_get_timer_hz(); 22007e4441c8SRemy Horton #endif 2201af75078fSIntel fsm = &fwd_streams[fc->stream_idx]; 2202af75078fSIntel nb_fs = fc->stream_nb; 2203af75078fSIntel do { 2204af75078fSIntel for (sm_id = 0; sm_id < nb_fs; sm_id++) 2205af75078fSIntel (*pkt_fwd)(fsm[sm_id]); 2206a8d0d473SBruce Richardson #ifdef RTE_LIB_BITRATESTATS 2207e25e6c70SRemy Horton if (bitrate_enabled != 0 && 2208e25e6c70SRemy Horton bitrate_lcore_id == rte_lcore_id()) { 22097e4441c8SRemy Horton tics_current = rte_rdtsc(); 22107e4441c8SRemy Horton if (tics_current - tics_datum >= tics_per_1sec) { 22117e4441c8SRemy Horton /* Periodic bitrate calculation */ 22124918a357SXiaoyun Li for (i = 0; i < cnt_ports; i++) 2213e25e6c70SRemy Horton rte_stats_bitrate_calc(bitrate_data, 22144918a357SXiaoyun Li ports_ids[i]); 22157e4441c8SRemy Horton tics_datum = tics_current; 22167e4441c8SRemy Horton } 2217e25e6c70SRemy Horton } 22187e4441c8SRemy Horton #endif 2219a8d0d473SBruce Richardson #ifdef RTE_LIB_LATENCYSTATS 222065eb1e54SPablo de Lara if (latencystats_enabled != 0 && 222165eb1e54SPablo de Lara latencystats_lcore_id == rte_lcore_id()) 222262d3216dSReshma Pattan rte_latencystats_update(); 222362d3216dSReshma Pattan #endif 222462d3216dSReshma Pattan 2225af75078fSIntel } while (! fc->stopped); 2226af75078fSIntel } 2227af75078fSIntel 2228af75078fSIntel static int 2229af75078fSIntel start_pkt_forward_on_core(void *fwd_arg) 2230af75078fSIntel { 2231af75078fSIntel run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg, 2232af75078fSIntel cur_fwd_config.fwd_eng->packet_fwd); 2233af75078fSIntel return 0; 2234af75078fSIntel } 2235af75078fSIntel 2236af75078fSIntel /* 2237af75078fSIntel * Run the TXONLY packet forwarding engine to send a single burst of packets. 2238af75078fSIntel * Used to start communication flows in network loopback test configurations. 2239af75078fSIntel */ 2240af75078fSIntel static int 2241af75078fSIntel run_one_txonly_burst_on_core(void *fwd_arg) 2242af75078fSIntel { 2243af75078fSIntel struct fwd_lcore *fwd_lc; 2244af75078fSIntel struct fwd_lcore tmp_lcore; 2245af75078fSIntel 2246af75078fSIntel fwd_lc = (struct fwd_lcore *) fwd_arg; 2247af75078fSIntel tmp_lcore = *fwd_lc; 2248af75078fSIntel tmp_lcore.stopped = 1; 2249af75078fSIntel run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd); 2250af75078fSIntel return 0; 2251af75078fSIntel } 2252af75078fSIntel 2253af75078fSIntel /* 2254af75078fSIntel * Launch packet forwarding: 2255af75078fSIntel * - Setup per-port forwarding context. 2256af75078fSIntel * - launch logical cores with their forwarding configuration. 2257af75078fSIntel */ 2258af75078fSIntel static void 2259af75078fSIntel launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore) 2260af75078fSIntel { 2261af75078fSIntel unsigned int i; 2262af75078fSIntel unsigned int lc_id; 2263af75078fSIntel int diag; 2264af75078fSIntel 2265af75078fSIntel for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) { 2266af75078fSIntel lc_id = fwd_lcores_cpuids[i]; 2267af75078fSIntel if ((interactive == 0) || (lc_id != rte_lcore_id())) { 2268af75078fSIntel fwd_lcores[i]->stopped = 0; 2269af75078fSIntel diag = rte_eal_remote_launch(pkt_fwd_on_lcore, 2270af75078fSIntel fwd_lcores[i], lc_id); 2271af75078fSIntel if (diag != 0) 227261a3b0e5SAndrew Rybchenko fprintf(stderr, 227361a3b0e5SAndrew Rybchenko "launch lcore %u failed - diag=%d\n", 2274af75078fSIntel lc_id, diag); 2275af75078fSIntel } 2276af75078fSIntel } 2277af75078fSIntel } 2278af75078fSIntel 2279af75078fSIntel /* 2280af75078fSIntel * Launch packet forwarding configuration. 2281af75078fSIntel */ 2282af75078fSIntel void 2283af75078fSIntel start_packet_forwarding(int with_tx_first) 2284af75078fSIntel { 2285af75078fSIntel port_fwd_begin_t port_fwd_begin; 2286af75078fSIntel port_fwd_end_t port_fwd_end; 2287af75078fSIntel unsigned int i; 2288af75078fSIntel 22895a8fb55cSReshma Pattan if (strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") == 0 && !nb_rxq) 22905a8fb55cSReshma Pattan rte_exit(EXIT_FAILURE, "rxq are 0, cannot use rxonly fwd mode\n"); 22915a8fb55cSReshma Pattan 22925a8fb55cSReshma Pattan if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 && !nb_txq) 22935a8fb55cSReshma Pattan rte_exit(EXIT_FAILURE, "txq are 0, cannot use txonly fwd mode\n"); 22945a8fb55cSReshma Pattan 22955a8fb55cSReshma Pattan if ((strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") != 0 && 22965a8fb55cSReshma Pattan strcmp(cur_fwd_eng->fwd_mode_name, "txonly") != 0) && 22975a8fb55cSReshma Pattan (!nb_rxq || !nb_txq)) 22985a8fb55cSReshma Pattan rte_exit(EXIT_FAILURE, 22995a8fb55cSReshma Pattan "Either rxq or txq are 0, cannot use %s fwd mode\n", 23005a8fb55cSReshma Pattan cur_fwd_eng->fwd_mode_name); 23015a8fb55cSReshma Pattan 2302ce8d5614SIntel if (all_ports_started() == 0) { 230361a3b0e5SAndrew Rybchenko fprintf(stderr, "Not all ports were started\n"); 2304ce8d5614SIntel return; 2305ce8d5614SIntel } 2306af75078fSIntel if (test_done == 0) { 230761a3b0e5SAndrew Rybchenko fprintf(stderr, "Packet forwarding already started\n"); 2308af75078fSIntel return; 2309af75078fSIntel } 23107741e4cfSIntel 231147a767b2SMatan Azrad fwd_config_setup(); 231247a767b2SMatan Azrad 2313a78040c9SAlvin Zhang port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin; 2314a78040c9SAlvin Zhang if (port_fwd_begin != NULL) { 2315a78040c9SAlvin Zhang for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) { 2316a78040c9SAlvin Zhang if (port_fwd_begin(fwd_ports_ids[i])) { 2317a78040c9SAlvin Zhang fprintf(stderr, 2318a78040c9SAlvin Zhang "Packet forwarding is not ready\n"); 2319a78040c9SAlvin Zhang return; 2320a78040c9SAlvin Zhang } 2321a78040c9SAlvin Zhang } 2322a78040c9SAlvin Zhang } 2323a78040c9SAlvin Zhang 2324a78040c9SAlvin Zhang if (with_tx_first) { 2325a78040c9SAlvin Zhang port_fwd_begin = tx_only_engine.port_fwd_begin; 2326a78040c9SAlvin Zhang if (port_fwd_begin != NULL) { 2327a78040c9SAlvin Zhang for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) { 2328a78040c9SAlvin Zhang if (port_fwd_begin(fwd_ports_ids[i])) { 2329a78040c9SAlvin Zhang fprintf(stderr, 2330a78040c9SAlvin Zhang "Packet forwarding is not ready\n"); 2331a78040c9SAlvin Zhang return; 2332a78040c9SAlvin Zhang } 2333a78040c9SAlvin Zhang } 2334a78040c9SAlvin Zhang } 2335a78040c9SAlvin Zhang } 2336a78040c9SAlvin Zhang 2337a78040c9SAlvin Zhang test_done = 0; 2338a78040c9SAlvin Zhang 23397741e4cfSIntel if(!no_flush_rx) 23407741e4cfSIntel flush_fwd_rx_queues(); 23417741e4cfSIntel 2342933617d8SZhihong Wang pkt_fwd_config_display(&cur_fwd_config); 2343af75078fSIntel rxtx_config_display(); 2344af75078fSIntel 234553324971SDavid Marchand fwd_stats_reset(); 2346af75078fSIntel if (with_tx_first) { 2347acbf77a6SZhihong Wang while (with_tx_first--) { 2348acbf77a6SZhihong Wang launch_packet_forwarding( 2349acbf77a6SZhihong Wang run_one_txonly_burst_on_core); 2350af75078fSIntel rte_eal_mp_wait_lcore(); 2351acbf77a6SZhihong Wang } 2352af75078fSIntel port_fwd_end = tx_only_engine.port_fwd_end; 2353af75078fSIntel if (port_fwd_end != NULL) { 2354af75078fSIntel for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) 2355af75078fSIntel (*port_fwd_end)(fwd_ports_ids[i]); 2356af75078fSIntel } 2357af75078fSIntel } 2358af75078fSIntel launch_packet_forwarding(start_pkt_forward_on_core); 2359af75078fSIntel } 2360af75078fSIntel 2361af75078fSIntel void 2362af75078fSIntel stop_packet_forwarding(void) 2363af75078fSIntel { 2364af75078fSIntel port_fwd_end_t port_fwd_end; 2365af75078fSIntel lcoreid_t lc_id; 236653324971SDavid Marchand portid_t pt_id; 236753324971SDavid Marchand int i; 2368af75078fSIntel 2369af75078fSIntel if (test_done) { 237061a3b0e5SAndrew Rybchenko fprintf(stderr, "Packet forwarding not started\n"); 2371af75078fSIntel return; 2372af75078fSIntel } 2373af75078fSIntel printf("Telling cores to stop..."); 2374af75078fSIntel for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) 2375af75078fSIntel fwd_lcores[lc_id]->stopped = 1; 2376af75078fSIntel printf("\nWaiting for lcores to finish...\n"); 2377af75078fSIntel rte_eal_mp_wait_lcore(); 2378af75078fSIntel port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end; 2379af75078fSIntel if (port_fwd_end != NULL) { 2380af75078fSIntel for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) { 2381af75078fSIntel pt_id = fwd_ports_ids[i]; 2382af75078fSIntel (*port_fwd_end)(pt_id); 2383af75078fSIntel } 2384af75078fSIntel } 2385c185d42cSDavid Marchand 238653324971SDavid Marchand fwd_stats_display(); 238758d475b7SJerin Jacob 2388af75078fSIntel printf("\nDone.\n"); 2389af75078fSIntel test_done = 1; 2390af75078fSIntel } 2391af75078fSIntel 2392cfae07fdSOuyang Changchun void 2393cfae07fdSOuyang Changchun dev_set_link_up(portid_t pid) 2394cfae07fdSOuyang Changchun { 2395492ab604SZhiyong Yang if (rte_eth_dev_set_link_up(pid) < 0) 239661a3b0e5SAndrew Rybchenko fprintf(stderr, "\nSet link up fail.\n"); 2397cfae07fdSOuyang Changchun } 2398cfae07fdSOuyang Changchun 2399cfae07fdSOuyang Changchun void 2400cfae07fdSOuyang Changchun dev_set_link_down(portid_t pid) 2401cfae07fdSOuyang Changchun { 2402492ab604SZhiyong Yang if (rte_eth_dev_set_link_down(pid) < 0) 240361a3b0e5SAndrew Rybchenko fprintf(stderr, "\nSet link down fail.\n"); 2404cfae07fdSOuyang Changchun } 2405cfae07fdSOuyang Changchun 2406ce8d5614SIntel static int 2407ce8d5614SIntel all_ports_started(void) 2408ce8d5614SIntel { 2409ce8d5614SIntel portid_t pi; 2410ce8d5614SIntel struct rte_port *port; 2411ce8d5614SIntel 24127d89b261SGaetan Rivet RTE_ETH_FOREACH_DEV(pi) { 2413ce8d5614SIntel port = &ports[pi]; 2414ce8d5614SIntel /* Check if there is a port which is not started */ 241541b05095SBernard Iremonger if ((port->port_status != RTE_PORT_STARTED) && 241641b05095SBernard Iremonger (port->slave_flag == 0)) 2417ce8d5614SIntel return 0; 2418ce8d5614SIntel } 2419ce8d5614SIntel 2420ce8d5614SIntel /* No port is not started */ 2421ce8d5614SIntel return 1; 2422ce8d5614SIntel } 2423ce8d5614SIntel 2424148f963fSBruce Richardson int 24256018eb8cSShahaf Shuler port_is_stopped(portid_t port_id) 24266018eb8cSShahaf Shuler { 24276018eb8cSShahaf Shuler struct rte_port *port = &ports[port_id]; 24286018eb8cSShahaf Shuler 24296018eb8cSShahaf Shuler if ((port->port_status != RTE_PORT_STOPPED) && 24306018eb8cSShahaf Shuler (port->slave_flag == 0)) 24316018eb8cSShahaf Shuler return 0; 24326018eb8cSShahaf Shuler return 1; 24336018eb8cSShahaf Shuler } 24346018eb8cSShahaf Shuler 24356018eb8cSShahaf Shuler int 2436edab33b1STetsuya Mukawa all_ports_stopped(void) 2437edab33b1STetsuya Mukawa { 2438edab33b1STetsuya Mukawa portid_t pi; 2439edab33b1STetsuya Mukawa 24407d89b261SGaetan Rivet RTE_ETH_FOREACH_DEV(pi) { 24416018eb8cSShahaf Shuler if (!port_is_stopped(pi)) 2442edab33b1STetsuya Mukawa return 0; 2443edab33b1STetsuya Mukawa } 2444edab33b1STetsuya Mukawa 2445edab33b1STetsuya Mukawa return 1; 2446edab33b1STetsuya Mukawa } 2447edab33b1STetsuya Mukawa 2448edab33b1STetsuya Mukawa int 2449edab33b1STetsuya Mukawa port_is_started(portid_t port_id) 2450edab33b1STetsuya Mukawa { 2451edab33b1STetsuya Mukawa if (port_id_is_invalid(port_id, ENABLED_WARN)) 2452edab33b1STetsuya Mukawa return 0; 2453edab33b1STetsuya Mukawa 2454edab33b1STetsuya Mukawa if (ports[port_id].port_status != RTE_PORT_STARTED) 2455edab33b1STetsuya Mukawa return 0; 2456edab33b1STetsuya Mukawa 2457edab33b1STetsuya Mukawa return 1; 2458edab33b1STetsuya Mukawa } 2459edab33b1STetsuya Mukawa 24601c69df45SOri Kam /* Configure the Rx and Tx hairpin queues for the selected port. */ 24611c69df45SOri Kam static int 246201817b10SBing Zhao setup_hairpin_queues(portid_t pi, portid_t p_pi, uint16_t cnt_pi) 24631c69df45SOri Kam { 24641c69df45SOri Kam queueid_t qi; 24651c69df45SOri Kam struct rte_eth_hairpin_conf hairpin_conf = { 24661c69df45SOri Kam .peer_count = 1, 24671c69df45SOri Kam }; 24681c69df45SOri Kam int i; 24691c69df45SOri Kam int diag; 24701c69df45SOri Kam struct rte_port *port = &ports[pi]; 247101817b10SBing Zhao uint16_t peer_rx_port = pi; 247201817b10SBing Zhao uint16_t peer_tx_port = pi; 247301817b10SBing Zhao uint32_t manual = 1; 247401817b10SBing Zhao uint32_t tx_exp = hairpin_mode & 0x10; 247501817b10SBing Zhao 247601817b10SBing Zhao if (!(hairpin_mode & 0xf)) { 247701817b10SBing Zhao peer_rx_port = pi; 247801817b10SBing Zhao peer_tx_port = pi; 247901817b10SBing Zhao manual = 0; 248001817b10SBing Zhao } else if (hairpin_mode & 0x1) { 248101817b10SBing Zhao peer_tx_port = rte_eth_find_next_owned_by(pi + 1, 248201817b10SBing Zhao RTE_ETH_DEV_NO_OWNER); 248301817b10SBing Zhao if (peer_tx_port >= RTE_MAX_ETHPORTS) 248401817b10SBing Zhao peer_tx_port = rte_eth_find_next_owned_by(0, 248501817b10SBing Zhao RTE_ETH_DEV_NO_OWNER); 248601817b10SBing Zhao if (p_pi != RTE_MAX_ETHPORTS) { 248701817b10SBing Zhao peer_rx_port = p_pi; 248801817b10SBing Zhao } else { 248901817b10SBing Zhao uint16_t next_pi; 249001817b10SBing Zhao 249101817b10SBing Zhao /* Last port will be the peer RX port of the first. */ 249201817b10SBing Zhao RTE_ETH_FOREACH_DEV(next_pi) 249301817b10SBing Zhao peer_rx_port = next_pi; 249401817b10SBing Zhao } 249501817b10SBing Zhao manual = 1; 249601817b10SBing Zhao } else if (hairpin_mode & 0x2) { 249701817b10SBing Zhao if (cnt_pi & 0x1) { 249801817b10SBing Zhao peer_rx_port = p_pi; 249901817b10SBing Zhao } else { 250001817b10SBing Zhao peer_rx_port = rte_eth_find_next_owned_by(pi + 1, 250101817b10SBing Zhao RTE_ETH_DEV_NO_OWNER); 250201817b10SBing Zhao if (peer_rx_port >= RTE_MAX_ETHPORTS) 250301817b10SBing Zhao peer_rx_port = pi; 250401817b10SBing Zhao } 250501817b10SBing Zhao peer_tx_port = peer_rx_port; 250601817b10SBing Zhao manual = 1; 250701817b10SBing Zhao } 25081c69df45SOri Kam 25091c69df45SOri Kam for (qi = nb_txq, i = 0; qi < nb_hairpinq + nb_txq; qi++) { 251001817b10SBing Zhao hairpin_conf.peers[0].port = peer_rx_port; 25111c69df45SOri Kam hairpin_conf.peers[0].queue = i + nb_rxq; 251201817b10SBing Zhao hairpin_conf.manual_bind = !!manual; 251301817b10SBing Zhao hairpin_conf.tx_explicit = !!tx_exp; 25141c69df45SOri Kam diag = rte_eth_tx_hairpin_queue_setup 25151c69df45SOri Kam (pi, qi, nb_txd, &hairpin_conf); 25161c69df45SOri Kam i++; 25171c69df45SOri Kam if (diag == 0) 25181c69df45SOri Kam continue; 25191c69df45SOri Kam 25201c69df45SOri Kam /* Fail to setup rx queue, return */ 25211c69df45SOri Kam if (rte_atomic16_cmpset(&(port->port_status), 25221c69df45SOri Kam RTE_PORT_HANDLING, 25231c69df45SOri Kam RTE_PORT_STOPPED) == 0) 252461a3b0e5SAndrew Rybchenko fprintf(stderr, 252561a3b0e5SAndrew Rybchenko "Port %d can not be set back to stopped\n", pi); 252661a3b0e5SAndrew Rybchenko fprintf(stderr, "Fail to configure port %d hairpin queues\n", 252761a3b0e5SAndrew Rybchenko pi); 25281c69df45SOri Kam /* try to reconfigure queues next time */ 25291c69df45SOri Kam port->need_reconfig_queues = 1; 25301c69df45SOri Kam return -1; 25311c69df45SOri Kam } 25321c69df45SOri Kam for (qi = nb_rxq, i = 0; qi < nb_hairpinq + nb_rxq; qi++) { 253301817b10SBing Zhao hairpin_conf.peers[0].port = peer_tx_port; 25341c69df45SOri Kam hairpin_conf.peers[0].queue = i + nb_txq; 253501817b10SBing Zhao hairpin_conf.manual_bind = !!manual; 253601817b10SBing Zhao hairpin_conf.tx_explicit = !!tx_exp; 25371c69df45SOri Kam diag = rte_eth_rx_hairpin_queue_setup 25381c69df45SOri Kam (pi, qi, nb_rxd, &hairpin_conf); 25391c69df45SOri Kam i++; 25401c69df45SOri Kam if (diag == 0) 25411c69df45SOri Kam continue; 25421c69df45SOri Kam 25431c69df45SOri Kam /* Fail to setup rx queue, return */ 25441c69df45SOri Kam if (rte_atomic16_cmpset(&(port->port_status), 25451c69df45SOri Kam RTE_PORT_HANDLING, 25461c69df45SOri Kam RTE_PORT_STOPPED) == 0) 254761a3b0e5SAndrew Rybchenko fprintf(stderr, 254861a3b0e5SAndrew Rybchenko "Port %d can not be set back to stopped\n", pi); 254961a3b0e5SAndrew Rybchenko fprintf(stderr, "Fail to configure port %d hairpin queues\n", 255061a3b0e5SAndrew Rybchenko pi); 25511c69df45SOri Kam /* try to reconfigure queues next time */ 25521c69df45SOri Kam port->need_reconfig_queues = 1; 25531c69df45SOri Kam return -1; 25541c69df45SOri Kam } 25551c69df45SOri Kam return 0; 25561c69df45SOri Kam } 25571c69df45SOri Kam 25582befc67fSViacheslav Ovsiienko /* Configure the Rx with optional split. */ 25592befc67fSViacheslav Ovsiienko int 25602befc67fSViacheslav Ovsiienko rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id, 25612befc67fSViacheslav Ovsiienko uint16_t nb_rx_desc, unsigned int socket_id, 25622befc67fSViacheslav Ovsiienko struct rte_eth_rxconf *rx_conf, struct rte_mempool *mp) 25632befc67fSViacheslav Ovsiienko { 25642befc67fSViacheslav Ovsiienko union rte_eth_rxseg rx_useg[MAX_SEGS_BUFFER_SPLIT] = {}; 25652befc67fSViacheslav Ovsiienko unsigned int i, mp_n; 25662befc67fSViacheslav Ovsiienko int ret; 25672befc67fSViacheslav Ovsiienko 25682befc67fSViacheslav Ovsiienko if (rx_pkt_nb_segs <= 1 || 25692befc67fSViacheslav Ovsiienko (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) == 0) { 25702befc67fSViacheslav Ovsiienko rx_conf->rx_seg = NULL; 25712befc67fSViacheslav Ovsiienko rx_conf->rx_nseg = 0; 25722befc67fSViacheslav Ovsiienko ret = rte_eth_rx_queue_setup(port_id, rx_queue_id, 25732befc67fSViacheslav Ovsiienko nb_rx_desc, socket_id, 25742befc67fSViacheslav Ovsiienko rx_conf, mp); 25752befc67fSViacheslav Ovsiienko return ret; 25762befc67fSViacheslav Ovsiienko } 25772befc67fSViacheslav Ovsiienko for (i = 0; i < rx_pkt_nb_segs; i++) { 25782befc67fSViacheslav Ovsiienko struct rte_eth_rxseg_split *rx_seg = &rx_useg[i].split; 25792befc67fSViacheslav Ovsiienko struct rte_mempool *mpx; 25802befc67fSViacheslav Ovsiienko /* 25812befc67fSViacheslav Ovsiienko * Use last valid pool for the segments with number 25822befc67fSViacheslav Ovsiienko * exceeding the pool index. 25832befc67fSViacheslav Ovsiienko */ 25842befc67fSViacheslav Ovsiienko mp_n = (i > mbuf_data_size_n) ? mbuf_data_size_n - 1 : i; 25852befc67fSViacheslav Ovsiienko mpx = mbuf_pool_find(socket_id, mp_n); 25862befc67fSViacheslav Ovsiienko /* Handle zero as mbuf data buffer size. */ 25872befc67fSViacheslav Ovsiienko rx_seg->length = rx_pkt_seg_lengths[i] ? 25882befc67fSViacheslav Ovsiienko rx_pkt_seg_lengths[i] : 25892befc67fSViacheslav Ovsiienko mbuf_data_size[mp_n]; 25902befc67fSViacheslav Ovsiienko rx_seg->offset = i < rx_pkt_nb_offs ? 25912befc67fSViacheslav Ovsiienko rx_pkt_seg_offsets[i] : 0; 25922befc67fSViacheslav Ovsiienko rx_seg->mp = mpx ? mpx : mp; 25932befc67fSViacheslav Ovsiienko } 25942befc67fSViacheslav Ovsiienko rx_conf->rx_nseg = rx_pkt_nb_segs; 25952befc67fSViacheslav Ovsiienko rx_conf->rx_seg = rx_useg; 25962befc67fSViacheslav Ovsiienko ret = rte_eth_rx_queue_setup(port_id, rx_queue_id, nb_rx_desc, 25972befc67fSViacheslav Ovsiienko socket_id, rx_conf, NULL); 25982befc67fSViacheslav Ovsiienko rx_conf->rx_seg = NULL; 25992befc67fSViacheslav Ovsiienko rx_conf->rx_nseg = 0; 26002befc67fSViacheslav Ovsiienko return ret; 26012befc67fSViacheslav Ovsiienko } 26022befc67fSViacheslav Ovsiienko 260363b72657SIvan Ilchenko static int 260463b72657SIvan Ilchenko alloc_xstats_display_info(portid_t pi) 260563b72657SIvan Ilchenko { 260663b72657SIvan Ilchenko uint64_t **ids_supp = &ports[pi].xstats_info.ids_supp; 260763b72657SIvan Ilchenko uint64_t **prev_values = &ports[pi].xstats_info.prev_values; 260863b72657SIvan Ilchenko uint64_t **curr_values = &ports[pi].xstats_info.curr_values; 260963b72657SIvan Ilchenko 261063b72657SIvan Ilchenko if (xstats_display_num == 0) 261163b72657SIvan Ilchenko return 0; 261263b72657SIvan Ilchenko 261363b72657SIvan Ilchenko *ids_supp = calloc(xstats_display_num, sizeof(**ids_supp)); 261463b72657SIvan Ilchenko if (*ids_supp == NULL) 261563b72657SIvan Ilchenko goto fail_ids_supp; 261663b72657SIvan Ilchenko 261763b72657SIvan Ilchenko *prev_values = calloc(xstats_display_num, 261863b72657SIvan Ilchenko sizeof(**prev_values)); 261963b72657SIvan Ilchenko if (*prev_values == NULL) 262063b72657SIvan Ilchenko goto fail_prev_values; 262163b72657SIvan Ilchenko 262263b72657SIvan Ilchenko *curr_values = calloc(xstats_display_num, 262363b72657SIvan Ilchenko sizeof(**curr_values)); 262463b72657SIvan Ilchenko if (*curr_values == NULL) 262563b72657SIvan Ilchenko goto fail_curr_values; 262663b72657SIvan Ilchenko 262763b72657SIvan Ilchenko ports[pi].xstats_info.allocated = true; 262863b72657SIvan Ilchenko 262963b72657SIvan Ilchenko return 0; 263063b72657SIvan Ilchenko 263163b72657SIvan Ilchenko fail_curr_values: 263263b72657SIvan Ilchenko free(*prev_values); 263363b72657SIvan Ilchenko fail_prev_values: 263463b72657SIvan Ilchenko free(*ids_supp); 263563b72657SIvan Ilchenko fail_ids_supp: 263663b72657SIvan Ilchenko return -ENOMEM; 263763b72657SIvan Ilchenko } 263863b72657SIvan Ilchenko 263963b72657SIvan Ilchenko static void 264063b72657SIvan Ilchenko free_xstats_display_info(portid_t pi) 264163b72657SIvan Ilchenko { 264263b72657SIvan Ilchenko if (!ports[pi].xstats_info.allocated) 264363b72657SIvan Ilchenko return; 264463b72657SIvan Ilchenko free(ports[pi].xstats_info.ids_supp); 264563b72657SIvan Ilchenko free(ports[pi].xstats_info.prev_values); 264663b72657SIvan Ilchenko free(ports[pi].xstats_info.curr_values); 264763b72657SIvan Ilchenko ports[pi].xstats_info.allocated = false; 264863b72657SIvan Ilchenko } 264963b72657SIvan Ilchenko 265063b72657SIvan Ilchenko /** Fill helper structures for specified port to show extended statistics. */ 265163b72657SIvan Ilchenko static void 265263b72657SIvan Ilchenko fill_xstats_display_info_for_port(portid_t pi) 265363b72657SIvan Ilchenko { 265463b72657SIvan Ilchenko unsigned int stat, stat_supp; 265563b72657SIvan Ilchenko const char *xstat_name; 265663b72657SIvan Ilchenko struct rte_port *port; 265763b72657SIvan Ilchenko uint64_t *ids_supp; 265863b72657SIvan Ilchenko int rc; 265963b72657SIvan Ilchenko 266063b72657SIvan Ilchenko if (xstats_display_num == 0) 266163b72657SIvan Ilchenko return; 266263b72657SIvan Ilchenko 266363b72657SIvan Ilchenko if (pi == (portid_t)RTE_PORT_ALL) { 266463b72657SIvan Ilchenko fill_xstats_display_info(); 266563b72657SIvan Ilchenko return; 266663b72657SIvan Ilchenko } 266763b72657SIvan Ilchenko 266863b72657SIvan Ilchenko port = &ports[pi]; 266963b72657SIvan Ilchenko if (port->port_status != RTE_PORT_STARTED) 267063b72657SIvan Ilchenko return; 267163b72657SIvan Ilchenko 267263b72657SIvan Ilchenko if (!port->xstats_info.allocated && alloc_xstats_display_info(pi) != 0) 267363b72657SIvan Ilchenko rte_exit(EXIT_FAILURE, 267463b72657SIvan Ilchenko "Failed to allocate xstats display memory\n"); 267563b72657SIvan Ilchenko 267663b72657SIvan Ilchenko ids_supp = port->xstats_info.ids_supp; 267763b72657SIvan Ilchenko for (stat = stat_supp = 0; stat < xstats_display_num; stat++) { 267863b72657SIvan Ilchenko xstat_name = xstats_display[stat].name; 267963b72657SIvan Ilchenko rc = rte_eth_xstats_get_id_by_name(pi, xstat_name, 268063b72657SIvan Ilchenko ids_supp + stat_supp); 268163b72657SIvan Ilchenko if (rc != 0) { 268263b72657SIvan Ilchenko fprintf(stderr, "No xstat '%s' on port %u - skip it %u\n", 268363b72657SIvan Ilchenko xstat_name, pi, stat); 268463b72657SIvan Ilchenko continue; 268563b72657SIvan Ilchenko } 268663b72657SIvan Ilchenko stat_supp++; 268763b72657SIvan Ilchenko } 268863b72657SIvan Ilchenko 268963b72657SIvan Ilchenko port->xstats_info.ids_supp_sz = stat_supp; 269063b72657SIvan Ilchenko } 269163b72657SIvan Ilchenko 269263b72657SIvan Ilchenko /** Fill helper structures for all ports to show extended statistics. */ 269363b72657SIvan Ilchenko static void 269463b72657SIvan Ilchenko fill_xstats_display_info(void) 269563b72657SIvan Ilchenko { 269663b72657SIvan Ilchenko portid_t pi; 269763b72657SIvan Ilchenko 269863b72657SIvan Ilchenko if (xstats_display_num == 0) 269963b72657SIvan Ilchenko return; 270063b72657SIvan Ilchenko 270163b72657SIvan Ilchenko RTE_ETH_FOREACH_DEV(pi) 270263b72657SIvan Ilchenko fill_xstats_display_info_for_port(pi); 270363b72657SIvan Ilchenko } 270463b72657SIvan Ilchenko 2705edab33b1STetsuya Mukawa int 2706ce8d5614SIntel start_port(portid_t pid) 2707ce8d5614SIntel { 270892d2703eSMichael Qiu int diag, need_check_link_status = -1; 2709ce8d5614SIntel portid_t pi; 271001817b10SBing Zhao portid_t p_pi = RTE_MAX_ETHPORTS; 271101817b10SBing Zhao portid_t pl[RTE_MAX_ETHPORTS]; 271201817b10SBing Zhao portid_t peer_pl[RTE_MAX_ETHPORTS]; 271301817b10SBing Zhao uint16_t cnt_pi = 0; 271401817b10SBing Zhao uint16_t cfg_pi = 0; 271501817b10SBing Zhao int peer_pi; 2716ce8d5614SIntel queueid_t qi; 2717ce8d5614SIntel struct rte_port *port; 27181c69df45SOri Kam struct rte_eth_hairpin_cap cap; 2719ce8d5614SIntel 27204468635fSMichael Qiu if (port_id_is_invalid(pid, ENABLED_WARN)) 27214468635fSMichael Qiu return 0; 27224468635fSMichael Qiu 27237d89b261SGaetan Rivet RTE_ETH_FOREACH_DEV(pi) { 2724edab33b1STetsuya Mukawa if (pid != pi && pid != (portid_t)RTE_PORT_ALL) 2725ce8d5614SIntel continue; 2726ce8d5614SIntel 272792d2703eSMichael Qiu need_check_link_status = 0; 2728ce8d5614SIntel port = &ports[pi]; 2729ce8d5614SIntel if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED, 2730ce8d5614SIntel RTE_PORT_HANDLING) == 0) { 273161a3b0e5SAndrew Rybchenko fprintf(stderr, "Port %d is now not stopped\n", pi); 2732ce8d5614SIntel continue; 2733ce8d5614SIntel } 2734ce8d5614SIntel 2735ce8d5614SIntel if (port->need_reconfig > 0) { 2736655eae01SJie Wang struct rte_eth_conf dev_conf; 2737655eae01SJie Wang int k; 2738655eae01SJie Wang 2739ce8d5614SIntel port->need_reconfig = 0; 2740ce8d5614SIntel 27417ee3e944SVasily Philipov if (flow_isolate_all) { 27427ee3e944SVasily Philipov int ret = port_flow_isolate(pi, 1); 27437ee3e944SVasily Philipov if (ret) { 274461a3b0e5SAndrew Rybchenko fprintf(stderr, 274561a3b0e5SAndrew Rybchenko "Failed to apply isolated mode on port %d\n", 274661a3b0e5SAndrew Rybchenko pi); 27477ee3e944SVasily Philipov return -1; 27487ee3e944SVasily Philipov } 27497ee3e944SVasily Philipov } 2750b5b38ed8SRaslan Darawsheh configure_rxtx_dump_callbacks(0); 27515706de65SJulien Cretin printf("Configuring Port %d (socket %u)\n", pi, 275220a0286fSLiu Xiaofeng port->socket_id); 27531c69df45SOri Kam if (nb_hairpinq > 0 && 27541c69df45SOri Kam rte_eth_dev_hairpin_capability_get(pi, &cap)) { 275561a3b0e5SAndrew Rybchenko fprintf(stderr, 275661a3b0e5SAndrew Rybchenko "Port %d doesn't support hairpin queues\n", 275761a3b0e5SAndrew Rybchenko pi); 27581c69df45SOri Kam return -1; 27591c69df45SOri Kam } 2760*1bb4a528SFerruh Yigit 2761ce8d5614SIntel /* configure port */ 2762a550baf2SMin Hu (Connor) diag = eth_dev_configure_mp(pi, nb_rxq + nb_hairpinq, 27631c69df45SOri Kam nb_txq + nb_hairpinq, 2764ce8d5614SIntel &(port->dev_conf)); 2765ce8d5614SIntel if (diag != 0) { 2766ce8d5614SIntel if (rte_atomic16_cmpset(&(port->port_status), 2767ce8d5614SIntel RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0) 276861a3b0e5SAndrew Rybchenko fprintf(stderr, 276961a3b0e5SAndrew Rybchenko "Port %d can not be set back to stopped\n", 277061a3b0e5SAndrew Rybchenko pi); 277161a3b0e5SAndrew Rybchenko fprintf(stderr, "Fail to configure port %d\n", 277261a3b0e5SAndrew Rybchenko pi); 2773ce8d5614SIntel /* try to reconfigure port next time */ 2774ce8d5614SIntel port->need_reconfig = 1; 2775148f963fSBruce Richardson return -1; 2776ce8d5614SIntel } 2777655eae01SJie Wang /* get device configuration*/ 2778655eae01SJie Wang if (0 != 2779655eae01SJie Wang eth_dev_conf_get_print_err(pi, &dev_conf)) { 2780655eae01SJie Wang fprintf(stderr, 2781655eae01SJie Wang "port %d can not get device configuration\n", 2782655eae01SJie Wang pi); 2783655eae01SJie Wang return -1; 2784655eae01SJie Wang } 2785655eae01SJie Wang /* Apply Rx offloads configuration */ 2786655eae01SJie Wang if (dev_conf.rxmode.offloads != 2787655eae01SJie Wang port->dev_conf.rxmode.offloads) { 2788655eae01SJie Wang port->dev_conf.rxmode.offloads |= 2789655eae01SJie Wang dev_conf.rxmode.offloads; 2790655eae01SJie Wang for (k = 0; 2791655eae01SJie Wang k < port->dev_info.max_rx_queues; 2792655eae01SJie Wang k++) 2793655eae01SJie Wang port->rx_conf[k].offloads |= 2794655eae01SJie Wang dev_conf.rxmode.offloads; 2795655eae01SJie Wang } 2796655eae01SJie Wang /* Apply Tx offloads configuration */ 2797655eae01SJie Wang if (dev_conf.txmode.offloads != 2798655eae01SJie Wang port->dev_conf.txmode.offloads) { 2799655eae01SJie Wang port->dev_conf.txmode.offloads |= 2800655eae01SJie Wang dev_conf.txmode.offloads; 2801655eae01SJie Wang for (k = 0; 2802655eae01SJie Wang k < port->dev_info.max_tx_queues; 2803655eae01SJie Wang k++) 2804655eae01SJie Wang port->tx_conf[k].offloads |= 2805655eae01SJie Wang dev_conf.txmode.offloads; 2806655eae01SJie Wang } 2807ce8d5614SIntel } 2808a550baf2SMin Hu (Connor) if (port->need_reconfig_queues > 0 && is_proc_primary()) { 2809ce8d5614SIntel port->need_reconfig_queues = 0; 2810ce8d5614SIntel /* setup tx queues */ 2811ce8d5614SIntel for (qi = 0; qi < nb_txq; qi++) { 2812b6ea6408SIntel if ((numa_support) && 2813b6ea6408SIntel (txring_numa[pi] != NUMA_NO_CONFIG)) 2814b6ea6408SIntel diag = rte_eth_tx_queue_setup(pi, qi, 2815d44f8a48SQi Zhang port->nb_tx_desc[qi], 2816d44f8a48SQi Zhang txring_numa[pi], 2817d44f8a48SQi Zhang &(port->tx_conf[qi])); 2818b6ea6408SIntel else 2819b6ea6408SIntel diag = rte_eth_tx_queue_setup(pi, qi, 2820d44f8a48SQi Zhang port->nb_tx_desc[qi], 2821d44f8a48SQi Zhang port->socket_id, 2822d44f8a48SQi Zhang &(port->tx_conf[qi])); 2823b6ea6408SIntel 2824ce8d5614SIntel if (diag == 0) 2825ce8d5614SIntel continue; 2826ce8d5614SIntel 2827ce8d5614SIntel /* Fail to setup tx queue, return */ 2828ce8d5614SIntel if (rte_atomic16_cmpset(&(port->port_status), 2829ce8d5614SIntel RTE_PORT_HANDLING, 2830ce8d5614SIntel RTE_PORT_STOPPED) == 0) 283161a3b0e5SAndrew Rybchenko fprintf(stderr, 283261a3b0e5SAndrew Rybchenko "Port %d can not be set back to stopped\n", 283361a3b0e5SAndrew Rybchenko pi); 283461a3b0e5SAndrew Rybchenko fprintf(stderr, 283561a3b0e5SAndrew Rybchenko "Fail to configure port %d tx queues\n", 2836d44f8a48SQi Zhang pi); 2837ce8d5614SIntel /* try to reconfigure queues next time */ 2838ce8d5614SIntel port->need_reconfig_queues = 1; 2839148f963fSBruce Richardson return -1; 2840ce8d5614SIntel } 2841ce8d5614SIntel for (qi = 0; qi < nb_rxq; qi++) { 2842d44f8a48SQi Zhang /* setup rx queues */ 2843b6ea6408SIntel if ((numa_support) && 2844b6ea6408SIntel (rxring_numa[pi] != NUMA_NO_CONFIG)) { 2845b6ea6408SIntel struct rte_mempool * mp = 284626cbb419SViacheslav Ovsiienko mbuf_pool_find 284726cbb419SViacheslav Ovsiienko (rxring_numa[pi], 0); 2848b6ea6408SIntel if (mp == NULL) { 284961a3b0e5SAndrew Rybchenko fprintf(stderr, 285061a3b0e5SAndrew Rybchenko "Failed to setup RX queue: No mempool allocation on the socket %d\n", 2851b6ea6408SIntel rxring_numa[pi]); 2852148f963fSBruce Richardson return -1; 2853b6ea6408SIntel } 2854b6ea6408SIntel 28552befc67fSViacheslav Ovsiienko diag = rx_queue_setup(pi, qi, 2856d4930794SFerruh Yigit port->nb_rx_desc[qi], 2857d44f8a48SQi Zhang rxring_numa[pi], 2858d44f8a48SQi Zhang &(port->rx_conf[qi]), 2859d44f8a48SQi Zhang mp); 28601e1d6bddSBernard Iremonger } else { 28611e1d6bddSBernard Iremonger struct rte_mempool *mp = 286226cbb419SViacheslav Ovsiienko mbuf_pool_find 286326cbb419SViacheslav Ovsiienko (port->socket_id, 0); 28641e1d6bddSBernard Iremonger if (mp == NULL) { 286561a3b0e5SAndrew Rybchenko fprintf(stderr, 286661a3b0e5SAndrew Rybchenko "Failed to setup RX queue: No mempool allocation on the socket %d\n", 28671e1d6bddSBernard Iremonger port->socket_id); 28681e1d6bddSBernard Iremonger return -1; 2869b6ea6408SIntel } 28702befc67fSViacheslav Ovsiienko diag = rx_queue_setup(pi, qi, 2871d4930794SFerruh Yigit port->nb_rx_desc[qi], 2872d44f8a48SQi Zhang port->socket_id, 2873d44f8a48SQi Zhang &(port->rx_conf[qi]), 2874d44f8a48SQi Zhang mp); 28751e1d6bddSBernard Iremonger } 2876ce8d5614SIntel if (diag == 0) 2877ce8d5614SIntel continue; 2878ce8d5614SIntel 2879ce8d5614SIntel /* Fail to setup rx queue, return */ 2880ce8d5614SIntel if (rte_atomic16_cmpset(&(port->port_status), 2881ce8d5614SIntel RTE_PORT_HANDLING, 2882ce8d5614SIntel RTE_PORT_STOPPED) == 0) 288361a3b0e5SAndrew Rybchenko fprintf(stderr, 288461a3b0e5SAndrew Rybchenko "Port %d can not be set back to stopped\n", 288561a3b0e5SAndrew Rybchenko pi); 288661a3b0e5SAndrew Rybchenko fprintf(stderr, 288761a3b0e5SAndrew Rybchenko "Fail to configure port %d rx queues\n", 2888d44f8a48SQi Zhang pi); 2889ce8d5614SIntel /* try to reconfigure queues next time */ 2890ce8d5614SIntel port->need_reconfig_queues = 1; 2891148f963fSBruce Richardson return -1; 2892ce8d5614SIntel } 28931c69df45SOri Kam /* setup hairpin queues */ 289401817b10SBing Zhao if (setup_hairpin_queues(pi, p_pi, cnt_pi) != 0) 28951c69df45SOri Kam return -1; 2896ce8d5614SIntel } 2897b5b38ed8SRaslan Darawsheh configure_rxtx_dump_callbacks(verbose_level); 2898b0a9354aSPavan Nikhilesh if (clear_ptypes) { 2899b0a9354aSPavan Nikhilesh diag = rte_eth_dev_set_ptypes(pi, RTE_PTYPE_UNKNOWN, 2900b0a9354aSPavan Nikhilesh NULL, 0); 2901b0a9354aSPavan Nikhilesh if (diag < 0) 290261a3b0e5SAndrew Rybchenko fprintf(stderr, 2903b0a9354aSPavan Nikhilesh "Port %d: Failed to disable Ptype parsing\n", 2904b0a9354aSPavan Nikhilesh pi); 2905b0a9354aSPavan Nikhilesh } 2906b0a9354aSPavan Nikhilesh 290701817b10SBing Zhao p_pi = pi; 290801817b10SBing Zhao cnt_pi++; 290901817b10SBing Zhao 2910ce8d5614SIntel /* start port */ 2911a550baf2SMin Hu (Connor) diag = eth_dev_start_mp(pi); 291252f2c6f2SAndrew Rybchenko if (diag < 0) { 291361a3b0e5SAndrew Rybchenko fprintf(stderr, "Fail to start port %d: %s\n", 291461a3b0e5SAndrew Rybchenko pi, rte_strerror(-diag)); 2915ce8d5614SIntel 2916ce8d5614SIntel /* Fail to setup rx queue, return */ 2917ce8d5614SIntel if (rte_atomic16_cmpset(&(port->port_status), 2918ce8d5614SIntel RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0) 291961a3b0e5SAndrew Rybchenko fprintf(stderr, 292061a3b0e5SAndrew Rybchenko "Port %d can not be set back to stopped\n", 292161a3b0e5SAndrew Rybchenko pi); 2922ce8d5614SIntel continue; 2923ce8d5614SIntel } 2924ce8d5614SIntel 2925ce8d5614SIntel if (rte_atomic16_cmpset(&(port->port_status), 2926ce8d5614SIntel RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0) 292761a3b0e5SAndrew Rybchenko fprintf(stderr, "Port %d can not be set into started\n", 292861a3b0e5SAndrew Rybchenko pi); 2929ce8d5614SIntel 29305ffc4a2aSYuying Zhang if (eth_macaddr_get_print_err(pi, &port->eth_addr) == 0) 2931c2c4f87bSAman Deep Singh printf("Port %d: " RTE_ETHER_ADDR_PRT_FMT "\n", pi, 2932a7db3afcSAman Deep Singh RTE_ETHER_ADDR_BYTES(&port->eth_addr)); 2933d8c89163SZijie Pan 2934ce8d5614SIntel /* at least one port started, need checking link status */ 2935ce8d5614SIntel need_check_link_status = 1; 293601817b10SBing Zhao 293701817b10SBing Zhao pl[cfg_pi++] = pi; 2938ce8d5614SIntel } 2939ce8d5614SIntel 294092d2703eSMichael Qiu if (need_check_link_status == 1 && !no_link_check) 2941edab33b1STetsuya Mukawa check_all_ports_link_status(RTE_PORT_ALL); 294292d2703eSMichael Qiu else if (need_check_link_status == 0) 294361a3b0e5SAndrew Rybchenko fprintf(stderr, "Please stop the ports first\n"); 2944ce8d5614SIntel 294501817b10SBing Zhao if (hairpin_mode & 0xf) { 294601817b10SBing Zhao uint16_t i; 294701817b10SBing Zhao int j; 294801817b10SBing Zhao 294901817b10SBing Zhao /* bind all started hairpin ports */ 295001817b10SBing Zhao for (i = 0; i < cfg_pi; i++) { 295101817b10SBing Zhao pi = pl[i]; 295201817b10SBing Zhao /* bind current Tx to all peer Rx */ 295301817b10SBing Zhao peer_pi = rte_eth_hairpin_get_peer_ports(pi, peer_pl, 295401817b10SBing Zhao RTE_MAX_ETHPORTS, 1); 295501817b10SBing Zhao if (peer_pi < 0) 295601817b10SBing Zhao return peer_pi; 295701817b10SBing Zhao for (j = 0; j < peer_pi; j++) { 295801817b10SBing Zhao if (!port_is_started(peer_pl[j])) 295901817b10SBing Zhao continue; 296001817b10SBing Zhao diag = rte_eth_hairpin_bind(pi, peer_pl[j]); 296101817b10SBing Zhao if (diag < 0) { 296261a3b0e5SAndrew Rybchenko fprintf(stderr, 296361a3b0e5SAndrew Rybchenko "Error during binding hairpin Tx port %u to %u: %s\n", 296401817b10SBing Zhao pi, peer_pl[j], 296501817b10SBing Zhao rte_strerror(-diag)); 296601817b10SBing Zhao return -1; 296701817b10SBing Zhao } 296801817b10SBing Zhao } 296901817b10SBing Zhao /* bind all peer Tx to current Rx */ 297001817b10SBing Zhao peer_pi = rte_eth_hairpin_get_peer_ports(pi, peer_pl, 297101817b10SBing Zhao RTE_MAX_ETHPORTS, 0); 297201817b10SBing Zhao if (peer_pi < 0) 297301817b10SBing Zhao return peer_pi; 297401817b10SBing Zhao for (j = 0; j < peer_pi; j++) { 297501817b10SBing Zhao if (!port_is_started(peer_pl[j])) 297601817b10SBing Zhao continue; 297701817b10SBing Zhao diag = rte_eth_hairpin_bind(peer_pl[j], pi); 297801817b10SBing Zhao if (diag < 0) { 297961a3b0e5SAndrew Rybchenko fprintf(stderr, 298061a3b0e5SAndrew Rybchenko "Error during binding hairpin Tx port %u to %u: %s\n", 298101817b10SBing Zhao peer_pl[j], pi, 298201817b10SBing Zhao rte_strerror(-diag)); 298301817b10SBing Zhao return -1; 298401817b10SBing Zhao } 298501817b10SBing Zhao } 298601817b10SBing Zhao } 298701817b10SBing Zhao } 298801817b10SBing Zhao 298963b72657SIvan Ilchenko fill_xstats_display_info_for_port(pid); 299063b72657SIvan Ilchenko 2991ce8d5614SIntel printf("Done\n"); 2992148f963fSBruce Richardson return 0; 2993ce8d5614SIntel } 2994ce8d5614SIntel 2995ce8d5614SIntel void 2996ce8d5614SIntel stop_port(portid_t pid) 2997ce8d5614SIntel { 2998ce8d5614SIntel portid_t pi; 2999ce8d5614SIntel struct rte_port *port; 3000ce8d5614SIntel int need_check_link_status = 0; 300101817b10SBing Zhao portid_t peer_pl[RTE_MAX_ETHPORTS]; 300201817b10SBing Zhao int peer_pi; 3003ce8d5614SIntel 30044468635fSMichael Qiu if (port_id_is_invalid(pid, ENABLED_WARN)) 30054468635fSMichael Qiu return; 30064468635fSMichael Qiu 3007ce8d5614SIntel printf("Stopping ports...\n"); 3008ce8d5614SIntel 30097d89b261SGaetan Rivet RTE_ETH_FOREACH_DEV(pi) { 30104468635fSMichael Qiu if (pid != pi && pid != (portid_t)RTE_PORT_ALL) 3011ce8d5614SIntel continue; 3012ce8d5614SIntel 3013a8ef3e3aSBernard Iremonger if (port_is_forwarding(pi) != 0 && test_done == 0) { 301461a3b0e5SAndrew Rybchenko fprintf(stderr, 301561a3b0e5SAndrew Rybchenko "Please remove port %d from forwarding configuration.\n", 301661a3b0e5SAndrew Rybchenko pi); 3017a8ef3e3aSBernard Iremonger continue; 3018a8ef3e3aSBernard Iremonger } 3019a8ef3e3aSBernard Iremonger 30200e545d30SBernard Iremonger if (port_is_bonding_slave(pi)) { 302161a3b0e5SAndrew Rybchenko fprintf(stderr, 302261a3b0e5SAndrew Rybchenko "Please remove port %d from bonded device.\n", 302361a3b0e5SAndrew Rybchenko pi); 30240e545d30SBernard Iremonger continue; 30250e545d30SBernard Iremonger } 30260e545d30SBernard Iremonger 3027ce8d5614SIntel port = &ports[pi]; 3028ce8d5614SIntel if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED, 3029ce8d5614SIntel RTE_PORT_HANDLING) == 0) 3030ce8d5614SIntel continue; 3031ce8d5614SIntel 303201817b10SBing Zhao if (hairpin_mode & 0xf) { 303301817b10SBing Zhao int j; 303401817b10SBing Zhao 303501817b10SBing Zhao rte_eth_hairpin_unbind(pi, RTE_MAX_ETHPORTS); 303601817b10SBing Zhao /* unbind all peer Tx from current Rx */ 303701817b10SBing Zhao peer_pi = rte_eth_hairpin_get_peer_ports(pi, peer_pl, 303801817b10SBing Zhao RTE_MAX_ETHPORTS, 0); 303901817b10SBing Zhao if (peer_pi < 0) 304001817b10SBing Zhao continue; 304101817b10SBing Zhao for (j = 0; j < peer_pi; j++) { 304201817b10SBing Zhao if (!port_is_started(peer_pl[j])) 304301817b10SBing Zhao continue; 304401817b10SBing Zhao rte_eth_hairpin_unbind(peer_pl[j], pi); 304501817b10SBing Zhao } 304601817b10SBing Zhao } 304701817b10SBing Zhao 30480f93edbfSGregory Etelson if (port->flow_list) 30490f93edbfSGregory Etelson port_flow_flush(pi); 30500f93edbfSGregory Etelson 3051a550baf2SMin Hu (Connor) if (eth_dev_stop_mp(pi) != 0) 3052e62c5a12SIvan Ilchenko RTE_LOG(ERR, EAL, "rte_eth_dev_stop failed for port %u\n", 3053e62c5a12SIvan Ilchenko pi); 3054ce8d5614SIntel 3055ce8d5614SIntel if (rte_atomic16_cmpset(&(port->port_status), 3056ce8d5614SIntel RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0) 305761a3b0e5SAndrew Rybchenko fprintf(stderr, "Port %d can not be set into stopped\n", 305861a3b0e5SAndrew Rybchenko pi); 3059ce8d5614SIntel need_check_link_status = 1; 3060ce8d5614SIntel } 3061bc202406SDavid Marchand if (need_check_link_status && !no_link_check) 3062edab33b1STetsuya Mukawa check_all_ports_link_status(RTE_PORT_ALL); 3063ce8d5614SIntel 3064ce8d5614SIntel printf("Done\n"); 3065ce8d5614SIntel } 3066ce8d5614SIntel 3067ce6959bfSWisam Jaddo static void 30684f1de450SThomas Monjalon remove_invalid_ports_in(portid_t *array, portid_t *total) 3069ce6959bfSWisam Jaddo { 30704f1de450SThomas Monjalon portid_t i; 30714f1de450SThomas Monjalon portid_t new_total = 0; 3072ce6959bfSWisam Jaddo 30734f1de450SThomas Monjalon for (i = 0; i < *total; i++) 30744f1de450SThomas Monjalon if (!port_id_is_invalid(array[i], DISABLED_WARN)) { 30754f1de450SThomas Monjalon array[new_total] = array[i]; 30764f1de450SThomas Monjalon new_total++; 3077ce6959bfSWisam Jaddo } 30784f1de450SThomas Monjalon *total = new_total; 30794f1de450SThomas Monjalon } 30804f1de450SThomas Monjalon 30814f1de450SThomas Monjalon static void 30824f1de450SThomas Monjalon remove_invalid_ports(void) 30834f1de450SThomas Monjalon { 30844f1de450SThomas Monjalon remove_invalid_ports_in(ports_ids, &nb_ports); 30854f1de450SThomas Monjalon remove_invalid_ports_in(fwd_ports_ids, &nb_fwd_ports); 30864f1de450SThomas Monjalon nb_cfg_ports = nb_fwd_ports; 3087ce6959bfSWisam Jaddo } 3088ce6959bfSWisam Jaddo 3089ce8d5614SIntel void 3090ce8d5614SIntel close_port(portid_t pid) 3091ce8d5614SIntel { 3092ce8d5614SIntel portid_t pi; 3093ce8d5614SIntel struct rte_port *port; 3094ce8d5614SIntel 30954468635fSMichael Qiu if (port_id_is_invalid(pid, ENABLED_WARN)) 30964468635fSMichael Qiu return; 30974468635fSMichael Qiu 3098ce8d5614SIntel printf("Closing ports...\n"); 3099ce8d5614SIntel 31007d89b261SGaetan Rivet RTE_ETH_FOREACH_DEV(pi) { 31014468635fSMichael Qiu if (pid != pi && pid != (portid_t)RTE_PORT_ALL) 3102ce8d5614SIntel continue; 3103ce8d5614SIntel 3104a8ef3e3aSBernard Iremonger if (port_is_forwarding(pi) != 0 && test_done == 0) { 310561a3b0e5SAndrew Rybchenko fprintf(stderr, 310661a3b0e5SAndrew Rybchenko "Please remove port %d from forwarding configuration.\n", 310761a3b0e5SAndrew Rybchenko pi); 3108a8ef3e3aSBernard Iremonger continue; 3109a8ef3e3aSBernard Iremonger } 3110a8ef3e3aSBernard Iremonger 31110e545d30SBernard Iremonger if (port_is_bonding_slave(pi)) { 311261a3b0e5SAndrew Rybchenko fprintf(stderr, 311361a3b0e5SAndrew Rybchenko "Please remove port %d from bonded device.\n", 311461a3b0e5SAndrew Rybchenko pi); 31150e545d30SBernard Iremonger continue; 31160e545d30SBernard Iremonger } 31170e545d30SBernard Iremonger 3118ce8d5614SIntel port = &ports[pi]; 3119ce8d5614SIntel if (rte_atomic16_cmpset(&(port->port_status), 3120d4e8ad64SMichael Qiu RTE_PORT_CLOSED, RTE_PORT_CLOSED) == 1) { 312161a3b0e5SAndrew Rybchenko fprintf(stderr, "Port %d is already closed\n", pi); 3122d4e8ad64SMichael Qiu continue; 3123d4e8ad64SMichael Qiu } 3124d4e8ad64SMichael Qiu 3125a550baf2SMin Hu (Connor) if (is_proc_primary()) { 3126938a184aSAdrien Mazarguil port_flow_flush(pi); 3127ce8d5614SIntel rte_eth_dev_close(pi); 3128ce8d5614SIntel } 312963b72657SIvan Ilchenko 313063b72657SIvan Ilchenko free_xstats_display_info(pi); 3131a550baf2SMin Hu (Connor) } 3132ce8d5614SIntel 313385c6571cSThomas Monjalon remove_invalid_ports(); 3134ce8d5614SIntel printf("Done\n"); 3135ce8d5614SIntel } 3136ce8d5614SIntel 3137edab33b1STetsuya Mukawa void 313897f1e196SWei Dai reset_port(portid_t pid) 313997f1e196SWei Dai { 314097f1e196SWei Dai int diag; 314197f1e196SWei Dai portid_t pi; 314297f1e196SWei Dai struct rte_port *port; 314397f1e196SWei Dai 314497f1e196SWei Dai if (port_id_is_invalid(pid, ENABLED_WARN)) 314597f1e196SWei Dai return; 314697f1e196SWei Dai 31471cde1b9aSShougang Wang if ((pid == (portid_t)RTE_PORT_ALL && !all_ports_stopped()) || 31481cde1b9aSShougang Wang (pid != (portid_t)RTE_PORT_ALL && !port_is_stopped(pid))) { 314961a3b0e5SAndrew Rybchenko fprintf(stderr, 315061a3b0e5SAndrew Rybchenko "Can not reset port(s), please stop port(s) first.\n"); 31511cde1b9aSShougang Wang return; 31521cde1b9aSShougang Wang } 31531cde1b9aSShougang Wang 315497f1e196SWei Dai printf("Resetting ports...\n"); 315597f1e196SWei Dai 315697f1e196SWei Dai RTE_ETH_FOREACH_DEV(pi) { 315797f1e196SWei Dai if (pid != pi && pid != (portid_t)RTE_PORT_ALL) 315897f1e196SWei Dai continue; 315997f1e196SWei Dai 316097f1e196SWei Dai if (port_is_forwarding(pi) != 0 && test_done == 0) { 316161a3b0e5SAndrew Rybchenko fprintf(stderr, 316261a3b0e5SAndrew Rybchenko "Please remove port %d from forwarding configuration.\n", 316361a3b0e5SAndrew Rybchenko pi); 316497f1e196SWei Dai continue; 316597f1e196SWei Dai } 316697f1e196SWei Dai 316797f1e196SWei Dai if (port_is_bonding_slave(pi)) { 316861a3b0e5SAndrew Rybchenko fprintf(stderr, 316961a3b0e5SAndrew Rybchenko "Please remove port %d from bonded device.\n", 317097f1e196SWei Dai pi); 317197f1e196SWei Dai continue; 317297f1e196SWei Dai } 317397f1e196SWei Dai 317497f1e196SWei Dai diag = rte_eth_dev_reset(pi); 317597f1e196SWei Dai if (diag == 0) { 317697f1e196SWei Dai port = &ports[pi]; 317797f1e196SWei Dai port->need_reconfig = 1; 317897f1e196SWei Dai port->need_reconfig_queues = 1; 317997f1e196SWei Dai } else { 318061a3b0e5SAndrew Rybchenko fprintf(stderr, "Failed to reset port %d. diag=%d\n", 318161a3b0e5SAndrew Rybchenko pi, diag); 318297f1e196SWei Dai } 318397f1e196SWei Dai } 318497f1e196SWei Dai 318597f1e196SWei Dai printf("Done\n"); 318697f1e196SWei Dai } 318797f1e196SWei Dai 318897f1e196SWei Dai void 3189edab33b1STetsuya Mukawa attach_port(char *identifier) 3190ce8d5614SIntel { 31914f1ed78eSThomas Monjalon portid_t pi; 3192c9cce428SThomas Monjalon struct rte_dev_iterator iterator; 3193ce8d5614SIntel 3194edab33b1STetsuya Mukawa printf("Attaching a new port...\n"); 3195edab33b1STetsuya Mukawa 3196edab33b1STetsuya Mukawa if (identifier == NULL) { 319761a3b0e5SAndrew Rybchenko fprintf(stderr, "Invalid parameters are specified\n"); 3198edab33b1STetsuya Mukawa return; 3199ce8d5614SIntel } 3200ce8d5614SIntel 320175b66decSIlya Maximets if (rte_dev_probe(identifier) < 0) { 3202c9cce428SThomas Monjalon TESTPMD_LOG(ERR, "Failed to attach port %s\n", identifier); 3203edab33b1STetsuya Mukawa return; 3204c9cce428SThomas Monjalon } 3205c9cce428SThomas Monjalon 32064f1ed78eSThomas Monjalon /* first attach mode: event */ 32074f1ed78eSThomas Monjalon if (setup_on_probe_event) { 32084f1ed78eSThomas Monjalon /* new ports are detected on RTE_ETH_EVENT_NEW event */ 32094f1ed78eSThomas Monjalon for (pi = 0; pi < RTE_MAX_ETHPORTS; pi++) 32104f1ed78eSThomas Monjalon if (ports[pi].port_status == RTE_PORT_HANDLING && 32114f1ed78eSThomas Monjalon ports[pi].need_setup != 0) 32124f1ed78eSThomas Monjalon setup_attached_port(pi); 32134f1ed78eSThomas Monjalon return; 32144f1ed78eSThomas Monjalon } 32154f1ed78eSThomas Monjalon 32164f1ed78eSThomas Monjalon /* second attach mode: iterator */ 321786fa5de1SThomas Monjalon RTE_ETH_FOREACH_MATCHING_DEV(pi, identifier, &iterator) { 32184f1ed78eSThomas Monjalon /* setup ports matching the devargs used for probing */ 321986fa5de1SThomas Monjalon if (port_is_forwarding(pi)) 322086fa5de1SThomas Monjalon continue; /* port was already attached before */ 3221c9cce428SThomas Monjalon setup_attached_port(pi); 3222c9cce428SThomas Monjalon } 322386fa5de1SThomas Monjalon } 3224c9cce428SThomas Monjalon 3225c9cce428SThomas Monjalon static void 3226c9cce428SThomas Monjalon setup_attached_port(portid_t pi) 3227c9cce428SThomas Monjalon { 3228c9cce428SThomas Monjalon unsigned int socket_id; 322934fc1051SIvan Ilchenko int ret; 3230edab33b1STetsuya Mukawa 3231931126baSBernard Iremonger socket_id = (unsigned)rte_eth_dev_socket_id(pi); 323229841336SPhil Yang /* if socket_id is invalid, set to the first available socket. */ 3233931126baSBernard Iremonger if (check_socket_id(socket_id) < 0) 323429841336SPhil Yang socket_id = socket_ids[0]; 3235931126baSBernard Iremonger reconfig(pi, socket_id); 323634fc1051SIvan Ilchenko ret = rte_eth_promiscuous_enable(pi); 323734fc1051SIvan Ilchenko if (ret != 0) 323861a3b0e5SAndrew Rybchenko fprintf(stderr, 323961a3b0e5SAndrew Rybchenko "Error during enabling promiscuous mode for port %u: %s - ignore\n", 324034fc1051SIvan Ilchenko pi, rte_strerror(-ret)); 3241edab33b1STetsuya Mukawa 32424f1de450SThomas Monjalon ports_ids[nb_ports++] = pi; 32434f1de450SThomas Monjalon fwd_ports_ids[nb_fwd_ports++] = pi; 32444f1de450SThomas Monjalon nb_cfg_ports = nb_fwd_ports; 32454f1ed78eSThomas Monjalon ports[pi].need_setup = 0; 3246edab33b1STetsuya Mukawa ports[pi].port_status = RTE_PORT_STOPPED; 3247edab33b1STetsuya Mukawa 3248edab33b1STetsuya Mukawa printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports); 3249edab33b1STetsuya Mukawa printf("Done\n"); 3250edab33b1STetsuya Mukawa } 3251edab33b1STetsuya Mukawa 32520654d4a8SThomas Monjalon static void 32530654d4a8SThomas Monjalon detach_device(struct rte_device *dev) 32545f4ec54fSChen Jing D(Mark) { 3255f8e5baa2SThomas Monjalon portid_t sibling; 3256f8e5baa2SThomas Monjalon 3257f8e5baa2SThomas Monjalon if (dev == NULL) { 325861a3b0e5SAndrew Rybchenko fprintf(stderr, "Device already removed\n"); 3259f8e5baa2SThomas Monjalon return; 3260f8e5baa2SThomas Monjalon } 3261f8e5baa2SThomas Monjalon 32620654d4a8SThomas Monjalon printf("Removing a device...\n"); 3263938a184aSAdrien Mazarguil 32642a449871SThomas Monjalon RTE_ETH_FOREACH_DEV_OF(sibling, dev) { 32652a449871SThomas Monjalon if (ports[sibling].port_status != RTE_PORT_CLOSED) { 32662a449871SThomas Monjalon if (ports[sibling].port_status != RTE_PORT_STOPPED) { 326761a3b0e5SAndrew Rybchenko fprintf(stderr, "Port %u not stopped\n", 326861a3b0e5SAndrew Rybchenko sibling); 32692a449871SThomas Monjalon return; 32702a449871SThomas Monjalon } 32712a449871SThomas Monjalon port_flow_flush(sibling); 32722a449871SThomas Monjalon } 32732a449871SThomas Monjalon } 32742a449871SThomas Monjalon 327575b66decSIlya Maximets if (rte_dev_remove(dev) < 0) { 3276f8e5baa2SThomas Monjalon TESTPMD_LOG(ERR, "Failed to detach device %s\n", dev->name); 3277edab33b1STetsuya Mukawa return; 32783070419eSGaetan Rivet } 32794f1de450SThomas Monjalon remove_invalid_ports(); 328003ce2c53SMatan Azrad 32810654d4a8SThomas Monjalon printf("Device is detached\n"); 3282f8e5baa2SThomas Monjalon printf("Now total ports is %d\n", nb_ports); 3283edab33b1STetsuya Mukawa printf("Done\n"); 3284edab33b1STetsuya Mukawa return; 32855f4ec54fSChen Jing D(Mark) } 32865f4ec54fSChen Jing D(Mark) 3287af75078fSIntel void 32880654d4a8SThomas Monjalon detach_port_device(portid_t port_id) 32890654d4a8SThomas Monjalon { 32900a0821bcSPaulis Gributs int ret; 32910a0821bcSPaulis Gributs struct rte_eth_dev_info dev_info; 32920a0821bcSPaulis Gributs 32930654d4a8SThomas Monjalon if (port_id_is_invalid(port_id, ENABLED_WARN)) 32940654d4a8SThomas Monjalon return; 32950654d4a8SThomas Monjalon 32960654d4a8SThomas Monjalon if (ports[port_id].port_status != RTE_PORT_CLOSED) { 32970654d4a8SThomas Monjalon if (ports[port_id].port_status != RTE_PORT_STOPPED) { 329861a3b0e5SAndrew Rybchenko fprintf(stderr, "Port not stopped\n"); 32990654d4a8SThomas Monjalon return; 33000654d4a8SThomas Monjalon } 330161a3b0e5SAndrew Rybchenko fprintf(stderr, "Port was not closed\n"); 33020654d4a8SThomas Monjalon } 33030654d4a8SThomas Monjalon 33040a0821bcSPaulis Gributs ret = eth_dev_info_get_print_err(port_id, &dev_info); 33050a0821bcSPaulis Gributs if (ret != 0) { 33060a0821bcSPaulis Gributs TESTPMD_LOG(ERR, 33070a0821bcSPaulis Gributs "Failed to get device info for port %d, not detaching\n", 33080a0821bcSPaulis Gributs port_id); 33090a0821bcSPaulis Gributs return; 33100a0821bcSPaulis Gributs } 33110a0821bcSPaulis Gributs detach_device(dev_info.device); 33120654d4a8SThomas Monjalon } 33130654d4a8SThomas Monjalon 33140654d4a8SThomas Monjalon void 33155edee5f6SThomas Monjalon detach_devargs(char *identifier) 331655e51c96SNithin Dabilpuram { 331755e51c96SNithin Dabilpuram struct rte_dev_iterator iterator; 331855e51c96SNithin Dabilpuram struct rte_devargs da; 331955e51c96SNithin Dabilpuram portid_t port_id; 332055e51c96SNithin Dabilpuram 332155e51c96SNithin Dabilpuram printf("Removing a device...\n"); 332255e51c96SNithin Dabilpuram 332355e51c96SNithin Dabilpuram memset(&da, 0, sizeof(da)); 332455e51c96SNithin Dabilpuram if (rte_devargs_parsef(&da, "%s", identifier)) { 332561a3b0e5SAndrew Rybchenko fprintf(stderr, "cannot parse identifier\n"); 332655e51c96SNithin Dabilpuram return; 332755e51c96SNithin Dabilpuram } 332855e51c96SNithin Dabilpuram 332955e51c96SNithin Dabilpuram RTE_ETH_FOREACH_MATCHING_DEV(port_id, identifier, &iterator) { 333055e51c96SNithin Dabilpuram if (ports[port_id].port_status != RTE_PORT_CLOSED) { 333155e51c96SNithin Dabilpuram if (ports[port_id].port_status != RTE_PORT_STOPPED) { 333261a3b0e5SAndrew Rybchenko fprintf(stderr, "Port %u not stopped\n", 333361a3b0e5SAndrew Rybchenko port_id); 3334149677c9SStephen Hemminger rte_eth_iterator_cleanup(&iterator); 333564051bb1SXueming Li rte_devargs_reset(&da); 333655e51c96SNithin Dabilpuram return; 333755e51c96SNithin Dabilpuram } 333855e51c96SNithin Dabilpuram port_flow_flush(port_id); 333955e51c96SNithin Dabilpuram } 334055e51c96SNithin Dabilpuram } 334155e51c96SNithin Dabilpuram 334255e51c96SNithin Dabilpuram if (rte_eal_hotplug_remove(da.bus->name, da.name) != 0) { 334355e51c96SNithin Dabilpuram TESTPMD_LOG(ERR, "Failed to detach device %s(%s)\n", 334455e51c96SNithin Dabilpuram da.name, da.bus->name); 334564051bb1SXueming Li rte_devargs_reset(&da); 334655e51c96SNithin Dabilpuram return; 334755e51c96SNithin Dabilpuram } 334855e51c96SNithin Dabilpuram 334955e51c96SNithin Dabilpuram remove_invalid_ports(); 335055e51c96SNithin Dabilpuram 335155e51c96SNithin Dabilpuram printf("Device %s is detached\n", identifier); 335255e51c96SNithin Dabilpuram printf("Now total ports is %d\n", nb_ports); 335355e51c96SNithin Dabilpuram printf("Done\n"); 335464051bb1SXueming Li rte_devargs_reset(&da); 335555e51c96SNithin Dabilpuram } 335655e51c96SNithin Dabilpuram 335755e51c96SNithin Dabilpuram void 3358af75078fSIntel pmd_test_exit(void) 3359af75078fSIntel { 3360af75078fSIntel portid_t pt_id; 336126cbb419SViacheslav Ovsiienko unsigned int i; 3362fb73e096SJeff Guo int ret; 3363af75078fSIntel 33648210ec25SPablo de Lara if (test_done == 0) 33658210ec25SPablo de Lara stop_packet_forwarding(); 33668210ec25SPablo de Lara 3367761f7ae1SJie Zhou #ifndef RTE_EXEC_ENV_WINDOWS 336826cbb419SViacheslav Ovsiienko for (i = 0 ; i < RTE_DIM(mempools) ; i++) { 33693a0968c8SShahaf Shuler if (mempools[i]) { 33703a0968c8SShahaf Shuler if (mp_alloc_type == MP_ALLOC_ANON) 33713a0968c8SShahaf Shuler rte_mempool_mem_iter(mempools[i], dma_unmap_cb, 33723a0968c8SShahaf Shuler NULL); 33733a0968c8SShahaf Shuler } 33743a0968c8SShahaf Shuler } 3375761f7ae1SJie Zhou #endif 3376d3a274ceSZhihong Wang if (ports != NULL) { 3377d3a274ceSZhihong Wang no_link_check = 1; 33787d89b261SGaetan Rivet RTE_ETH_FOREACH_DEV(pt_id) { 337908fd782bSCristian Dumitrescu printf("\nStopping port %d...\n", pt_id); 3380af75078fSIntel fflush(stdout); 3381d3a274ceSZhihong Wang stop_port(pt_id); 338208fd782bSCristian Dumitrescu } 338308fd782bSCristian Dumitrescu RTE_ETH_FOREACH_DEV(pt_id) { 338408fd782bSCristian Dumitrescu printf("\nShutting down port %d...\n", pt_id); 338508fd782bSCristian Dumitrescu fflush(stdout); 3386d3a274ceSZhihong Wang close_port(pt_id); 3387af75078fSIntel } 3388d3a274ceSZhihong Wang } 3389fb73e096SJeff Guo 3390fb73e096SJeff Guo if (hot_plug) { 3391fb73e096SJeff Guo ret = rte_dev_event_monitor_stop(); 33922049c511SJeff Guo if (ret) { 3393fb73e096SJeff Guo RTE_LOG(ERR, EAL, 3394fb73e096SJeff Guo "fail to stop device event monitor."); 33952049c511SJeff Guo return; 33962049c511SJeff Guo } 3397fb73e096SJeff Guo 33982049c511SJeff Guo ret = rte_dev_event_callback_unregister(NULL, 3399cc1bf307SJeff Guo dev_event_callback, NULL); 34002049c511SJeff Guo if (ret < 0) { 3401fb73e096SJeff Guo RTE_LOG(ERR, EAL, 34022049c511SJeff Guo "fail to unregister device event callback.\n"); 34032049c511SJeff Guo return; 34042049c511SJeff Guo } 34052049c511SJeff Guo 34062049c511SJeff Guo ret = rte_dev_hotplug_handle_disable(); 34072049c511SJeff Guo if (ret) { 34082049c511SJeff Guo RTE_LOG(ERR, EAL, 34092049c511SJeff Guo "fail to disable hotplug handling.\n"); 34102049c511SJeff Guo return; 34112049c511SJeff Guo } 3412fb73e096SJeff Guo } 341326cbb419SViacheslav Ovsiienko for (i = 0 ; i < RTE_DIM(mempools) ; i++) { 3414401b744dSShahaf Shuler if (mempools[i]) 3415a550baf2SMin Hu (Connor) mempool_free_mp(mempools[i]); 3416401b744dSShahaf Shuler } 341763b72657SIvan Ilchenko free(xstats_display); 3418fb73e096SJeff Guo 3419d3a274ceSZhihong Wang printf("\nBye...\n"); 3420af75078fSIntel } 3421af75078fSIntel 3422af75078fSIntel typedef void (*cmd_func_t)(void); 3423af75078fSIntel struct pmd_test_command { 3424af75078fSIntel const char *cmd_name; 3425af75078fSIntel cmd_func_t cmd_func; 3426af75078fSIntel }; 3427af75078fSIntel 3428ce8d5614SIntel /* Check the link status of all ports in up to 9s, and print them finally */ 3429af75078fSIntel static void 3430edab33b1STetsuya Mukawa check_all_ports_link_status(uint32_t port_mask) 3431af75078fSIntel { 3432ce8d5614SIntel #define CHECK_INTERVAL 100 /* 100ms */ 3433ce8d5614SIntel #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */ 3434f8244c63SZhiyong Yang portid_t portid; 3435f8244c63SZhiyong Yang uint8_t count, all_ports_up, print_flag = 0; 3436ce8d5614SIntel struct rte_eth_link link; 3437e661a08bSIgor Romanov int ret; 3438ba5509a6SIvan Dyukov char link_status[RTE_ETH_LINK_MAX_STR_LEN]; 3439ce8d5614SIntel 3440ce8d5614SIntel printf("Checking link statuses...\n"); 3441ce8d5614SIntel fflush(stdout); 3442ce8d5614SIntel for (count = 0; count <= MAX_CHECK_TIME; count++) { 3443ce8d5614SIntel all_ports_up = 1; 34447d89b261SGaetan Rivet RTE_ETH_FOREACH_DEV(portid) { 3445ce8d5614SIntel if ((port_mask & (1 << portid)) == 0) 3446ce8d5614SIntel continue; 3447ce8d5614SIntel memset(&link, 0, sizeof(link)); 3448e661a08bSIgor Romanov ret = rte_eth_link_get_nowait(portid, &link); 3449e661a08bSIgor Romanov if (ret < 0) { 3450e661a08bSIgor Romanov all_ports_up = 0; 3451e661a08bSIgor Romanov if (print_flag == 1) 345261a3b0e5SAndrew Rybchenko fprintf(stderr, 345361a3b0e5SAndrew Rybchenko "Port %u link get failed: %s\n", 3454e661a08bSIgor Romanov portid, rte_strerror(-ret)); 3455e661a08bSIgor Romanov continue; 3456e661a08bSIgor Romanov } 3457ce8d5614SIntel /* print link status if flag set */ 3458ce8d5614SIntel if (print_flag == 1) { 3459ba5509a6SIvan Dyukov rte_eth_link_to_str(link_status, 3460ba5509a6SIvan Dyukov sizeof(link_status), &link); 3461ba5509a6SIvan Dyukov printf("Port %d %s\n", portid, link_status); 3462ce8d5614SIntel continue; 3463ce8d5614SIntel } 3464ce8d5614SIntel /* clear all_ports_up flag if any link down */ 346509419f23SThomas Monjalon if (link.link_status == ETH_LINK_DOWN) { 3466ce8d5614SIntel all_ports_up = 0; 3467ce8d5614SIntel break; 3468ce8d5614SIntel } 3469ce8d5614SIntel } 3470ce8d5614SIntel /* after finally printing all link status, get out */ 3471ce8d5614SIntel if (print_flag == 1) 3472ce8d5614SIntel break; 3473ce8d5614SIntel 3474ce8d5614SIntel if (all_ports_up == 0) { 3475ce8d5614SIntel fflush(stdout); 3476ce8d5614SIntel rte_delay_ms(CHECK_INTERVAL); 3477ce8d5614SIntel } 3478ce8d5614SIntel 3479ce8d5614SIntel /* set the print_flag if all ports up or timeout */ 3480ce8d5614SIntel if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) { 3481ce8d5614SIntel print_flag = 1; 3482ce8d5614SIntel } 34838ea656f8SGaetan Rivet 34848ea656f8SGaetan Rivet if (lsc_interrupt) 34858ea656f8SGaetan Rivet break; 3486ce8d5614SIntel } 3487af75078fSIntel } 3488af75078fSIntel 3489284c908cSGaetan Rivet static void 3490cc1bf307SJeff Guo rmv_port_callback(void *arg) 3491284c908cSGaetan Rivet { 34923b97888aSMatan Azrad int need_to_start = 0; 34930da2a62bSMatan Azrad int org_no_link_check = no_link_check; 349428caa76aSZhiyong Yang portid_t port_id = (intptr_t)arg; 34950a0821bcSPaulis Gributs struct rte_eth_dev_info dev_info; 34960a0821bcSPaulis Gributs int ret; 3497284c908cSGaetan Rivet 3498284c908cSGaetan Rivet RTE_ETH_VALID_PORTID_OR_RET(port_id); 3499284c908cSGaetan Rivet 35003b97888aSMatan Azrad if (!test_done && port_is_forwarding(port_id)) { 35013b97888aSMatan Azrad need_to_start = 1; 35023b97888aSMatan Azrad stop_packet_forwarding(); 35033b97888aSMatan Azrad } 35040da2a62bSMatan Azrad no_link_check = 1; 3505284c908cSGaetan Rivet stop_port(port_id); 35060da2a62bSMatan Azrad no_link_check = org_no_link_check; 35070654d4a8SThomas Monjalon 35080a0821bcSPaulis Gributs ret = eth_dev_info_get_print_err(port_id, &dev_info); 35090a0821bcSPaulis Gributs if (ret != 0) 35100a0821bcSPaulis Gributs TESTPMD_LOG(ERR, 35110a0821bcSPaulis Gributs "Failed to get device info for port %d, not detaching\n", 35120a0821bcSPaulis Gributs port_id); 3513e1d38504SPaulis Gributs else { 3514e1d38504SPaulis Gributs struct rte_device *device = dev_info.device; 3515e1d38504SPaulis Gributs close_port(port_id); 3516e1d38504SPaulis Gributs detach_device(device); /* might be already removed or have more ports */ 3517e1d38504SPaulis Gributs } 35183b97888aSMatan Azrad if (need_to_start) 35193b97888aSMatan Azrad start_packet_forwarding(0); 3520284c908cSGaetan Rivet } 3521284c908cSGaetan Rivet 352276ad4a2dSGaetan Rivet /* This function is used by the interrupt thread */ 3523d6af1a13SBernard Iremonger static int 3524f8244c63SZhiyong Yang eth_event_callback(portid_t port_id, enum rte_eth_event_type type, void *param, 3525d6af1a13SBernard Iremonger void *ret_param) 352676ad4a2dSGaetan Rivet { 352776ad4a2dSGaetan Rivet RTE_SET_USED(param); 3528d6af1a13SBernard Iremonger RTE_SET_USED(ret_param); 352976ad4a2dSGaetan Rivet 353076ad4a2dSGaetan Rivet if (type >= RTE_ETH_EVENT_MAX) { 353161a3b0e5SAndrew Rybchenko fprintf(stderr, 353261a3b0e5SAndrew Rybchenko "\nPort %" PRIu16 ": %s called upon invalid event %d\n", 353376ad4a2dSGaetan Rivet port_id, __func__, type); 353476ad4a2dSGaetan Rivet fflush(stderr); 35353af72783SGaetan Rivet } else if (event_print_mask & (UINT32_C(1) << type)) { 3536f431e010SHerakliusz Lipiec printf("\nPort %" PRIu16 ": %s event\n", port_id, 353797b5d8b5SThomas Monjalon eth_event_desc[type]); 353876ad4a2dSGaetan Rivet fflush(stdout); 353976ad4a2dSGaetan Rivet } 3540284c908cSGaetan Rivet 3541284c908cSGaetan Rivet switch (type) { 35424f1ed78eSThomas Monjalon case RTE_ETH_EVENT_NEW: 35434f1ed78eSThomas Monjalon ports[port_id].need_setup = 1; 35444f1ed78eSThomas Monjalon ports[port_id].port_status = RTE_PORT_HANDLING; 35454f1ed78eSThomas Monjalon break; 3546284c908cSGaetan Rivet case RTE_ETH_EVENT_INTR_RMV: 35474f1ed78eSThomas Monjalon if (port_id_is_invalid(port_id, DISABLED_WARN)) 35484f1ed78eSThomas Monjalon break; 3549284c908cSGaetan Rivet if (rte_eal_alarm_set(100000, 3550cc1bf307SJeff Guo rmv_port_callback, (void *)(intptr_t)port_id)) 355161a3b0e5SAndrew Rybchenko fprintf(stderr, 355261a3b0e5SAndrew Rybchenko "Could not set up deferred device removal\n"); 3553284c908cSGaetan Rivet break; 355485c6571cSThomas Monjalon case RTE_ETH_EVENT_DESTROY: 355585c6571cSThomas Monjalon ports[port_id].port_status = RTE_PORT_CLOSED; 355685c6571cSThomas Monjalon printf("Port %u is closed\n", port_id); 355785c6571cSThomas Monjalon break; 3558284c908cSGaetan Rivet default: 3559284c908cSGaetan Rivet break; 3560284c908cSGaetan Rivet } 3561d6af1a13SBernard Iremonger return 0; 356276ad4a2dSGaetan Rivet } 356376ad4a2dSGaetan Rivet 356497b5d8b5SThomas Monjalon static int 356597b5d8b5SThomas Monjalon register_eth_event_callback(void) 356697b5d8b5SThomas Monjalon { 356797b5d8b5SThomas Monjalon int ret; 356897b5d8b5SThomas Monjalon enum rte_eth_event_type event; 356997b5d8b5SThomas Monjalon 357097b5d8b5SThomas Monjalon for (event = RTE_ETH_EVENT_UNKNOWN; 357197b5d8b5SThomas Monjalon event < RTE_ETH_EVENT_MAX; event++) { 357297b5d8b5SThomas Monjalon ret = rte_eth_dev_callback_register(RTE_ETH_ALL, 357397b5d8b5SThomas Monjalon event, 357497b5d8b5SThomas Monjalon eth_event_callback, 357597b5d8b5SThomas Monjalon NULL); 357697b5d8b5SThomas Monjalon if (ret != 0) { 357797b5d8b5SThomas Monjalon TESTPMD_LOG(ERR, "Failed to register callback for " 357897b5d8b5SThomas Monjalon "%s event\n", eth_event_desc[event]); 357997b5d8b5SThomas Monjalon return -1; 358097b5d8b5SThomas Monjalon } 358197b5d8b5SThomas Monjalon } 358297b5d8b5SThomas Monjalon 358397b5d8b5SThomas Monjalon return 0; 358497b5d8b5SThomas Monjalon } 358597b5d8b5SThomas Monjalon 3586fb73e096SJeff Guo /* This function is used by the interrupt thread */ 3587fb73e096SJeff Guo static void 3588cc1bf307SJeff Guo dev_event_callback(const char *device_name, enum rte_dev_event_type type, 3589fb73e096SJeff Guo __rte_unused void *arg) 3590fb73e096SJeff Guo { 35912049c511SJeff Guo uint16_t port_id; 35922049c511SJeff Guo int ret; 35932049c511SJeff Guo 3594fb73e096SJeff Guo if (type >= RTE_DEV_EVENT_MAX) { 3595fb73e096SJeff Guo fprintf(stderr, "%s called upon invalid event %d\n", 3596fb73e096SJeff Guo __func__, type); 3597fb73e096SJeff Guo fflush(stderr); 3598fb73e096SJeff Guo } 3599fb73e096SJeff Guo 3600fb73e096SJeff Guo switch (type) { 3601fb73e096SJeff Guo case RTE_DEV_EVENT_REMOVE: 3602cc1bf307SJeff Guo RTE_LOG(DEBUG, EAL, "The device: %s has been removed!\n", 3603fb73e096SJeff Guo device_name); 36042049c511SJeff Guo ret = rte_eth_dev_get_port_by_name(device_name, &port_id); 36052049c511SJeff Guo if (ret) { 36062049c511SJeff Guo RTE_LOG(ERR, EAL, "can not get port by device %s!\n", 36072049c511SJeff Guo device_name); 36082049c511SJeff Guo return; 36092049c511SJeff Guo } 3610cc1bf307SJeff Guo /* 3611cc1bf307SJeff Guo * Because the user's callback is invoked in eal interrupt 3612cc1bf307SJeff Guo * callback, the interrupt callback need to be finished before 3613cc1bf307SJeff Guo * it can be unregistered when detaching device. So finish 3614cc1bf307SJeff Guo * callback soon and use a deferred removal to detach device 3615cc1bf307SJeff Guo * is need. It is a workaround, once the device detaching be 3616cc1bf307SJeff Guo * moved into the eal in the future, the deferred removal could 3617cc1bf307SJeff Guo * be deleted. 3618cc1bf307SJeff Guo */ 3619cc1bf307SJeff Guo if (rte_eal_alarm_set(100000, 3620cc1bf307SJeff Guo rmv_port_callback, (void *)(intptr_t)port_id)) 3621cc1bf307SJeff Guo RTE_LOG(ERR, EAL, 3622cc1bf307SJeff Guo "Could not set up deferred device removal\n"); 3623fb73e096SJeff Guo break; 3624fb73e096SJeff Guo case RTE_DEV_EVENT_ADD: 3625fb73e096SJeff Guo RTE_LOG(ERR, EAL, "The device: %s has been added!\n", 3626fb73e096SJeff Guo device_name); 3627fb73e096SJeff Guo /* TODO: After finish kernel driver binding, 3628fb73e096SJeff Guo * begin to attach port. 3629fb73e096SJeff Guo */ 3630fb73e096SJeff Guo break; 3631fb73e096SJeff Guo default: 3632fb73e096SJeff Guo break; 3633fb73e096SJeff Guo } 3634fb73e096SJeff Guo } 3635fb73e096SJeff Guo 3636f2c5125aSPablo de Lara static void 3637f2c5125aSPablo de Lara rxtx_port_config(struct rte_port *port) 3638f2c5125aSPablo de Lara { 3639d44f8a48SQi Zhang uint16_t qid; 36405e91aeefSWei Zhao uint64_t offloads; 3641f2c5125aSPablo de Lara 3642d44f8a48SQi Zhang for (qid = 0; qid < nb_rxq; qid++) { 36435e91aeefSWei Zhao offloads = port->rx_conf[qid].offloads; 3644d44f8a48SQi Zhang port->rx_conf[qid] = port->dev_info.default_rxconf; 3645575e0fd1SWei Zhao if (offloads != 0) 3646575e0fd1SWei Zhao port->rx_conf[qid].offloads = offloads; 3647d44f8a48SQi Zhang 3648d44f8a48SQi Zhang /* Check if any Rx parameters have been passed */ 3649f2c5125aSPablo de Lara if (rx_pthresh != RTE_PMD_PARAM_UNSET) 3650d44f8a48SQi Zhang port->rx_conf[qid].rx_thresh.pthresh = rx_pthresh; 3651f2c5125aSPablo de Lara 3652f2c5125aSPablo de Lara if (rx_hthresh != RTE_PMD_PARAM_UNSET) 3653d44f8a48SQi Zhang port->rx_conf[qid].rx_thresh.hthresh = rx_hthresh; 3654f2c5125aSPablo de Lara 3655f2c5125aSPablo de Lara if (rx_wthresh != RTE_PMD_PARAM_UNSET) 3656d44f8a48SQi Zhang port->rx_conf[qid].rx_thresh.wthresh = rx_wthresh; 3657f2c5125aSPablo de Lara 3658f2c5125aSPablo de Lara if (rx_free_thresh != RTE_PMD_PARAM_UNSET) 3659d44f8a48SQi Zhang port->rx_conf[qid].rx_free_thresh = rx_free_thresh; 3660f2c5125aSPablo de Lara 3661f2c5125aSPablo de Lara if (rx_drop_en != RTE_PMD_PARAM_UNSET) 3662d44f8a48SQi Zhang port->rx_conf[qid].rx_drop_en = rx_drop_en; 3663f2c5125aSPablo de Lara 3664d44f8a48SQi Zhang port->nb_rx_desc[qid] = nb_rxd; 3665d44f8a48SQi Zhang } 3666d44f8a48SQi Zhang 3667d44f8a48SQi Zhang for (qid = 0; qid < nb_txq; qid++) { 36685e91aeefSWei Zhao offloads = port->tx_conf[qid].offloads; 3669d44f8a48SQi Zhang port->tx_conf[qid] = port->dev_info.default_txconf; 3670575e0fd1SWei Zhao if (offloads != 0) 3671575e0fd1SWei Zhao port->tx_conf[qid].offloads = offloads; 3672d44f8a48SQi Zhang 3673d44f8a48SQi Zhang /* Check if any Tx parameters have been passed */ 3674f2c5125aSPablo de Lara if (tx_pthresh != RTE_PMD_PARAM_UNSET) 3675d44f8a48SQi Zhang port->tx_conf[qid].tx_thresh.pthresh = tx_pthresh; 3676f2c5125aSPablo de Lara 3677f2c5125aSPablo de Lara if (tx_hthresh != RTE_PMD_PARAM_UNSET) 3678d44f8a48SQi Zhang port->tx_conf[qid].tx_thresh.hthresh = tx_hthresh; 3679f2c5125aSPablo de Lara 3680f2c5125aSPablo de Lara if (tx_wthresh != RTE_PMD_PARAM_UNSET) 3681d44f8a48SQi Zhang port->tx_conf[qid].tx_thresh.wthresh = tx_wthresh; 3682f2c5125aSPablo de Lara 3683f2c5125aSPablo de Lara if (tx_rs_thresh != RTE_PMD_PARAM_UNSET) 3684d44f8a48SQi Zhang port->tx_conf[qid].tx_rs_thresh = tx_rs_thresh; 3685f2c5125aSPablo de Lara 3686f2c5125aSPablo de Lara if (tx_free_thresh != RTE_PMD_PARAM_UNSET) 3687d44f8a48SQi Zhang port->tx_conf[qid].tx_free_thresh = tx_free_thresh; 3688d44f8a48SQi Zhang 3689d44f8a48SQi Zhang port->nb_tx_desc[qid] = nb_txd; 3690d44f8a48SQi Zhang } 3691f2c5125aSPablo de Lara } 3692f2c5125aSPablo de Lara 36930c4abd36SSteve Yang /* 36940c4abd36SSteve Yang * Helper function to arrange max_rx_pktlen value and JUMBO_FRAME offload, 3695*1bb4a528SFerruh Yigit * MTU is also aligned. 36960c4abd36SSteve Yang * 36970c4abd36SSteve Yang * port->dev_info should be set before calling this function. 36980c4abd36SSteve Yang * 3699*1bb4a528SFerruh Yigit * if 'max_rx_pktlen' is zero, it is set to current device value, "MTU + 3700*1bb4a528SFerruh Yigit * ETH_OVERHEAD". This is useful to update flags but not MTU value. 3701*1bb4a528SFerruh Yigit * 37020c4abd36SSteve Yang * return 0 on success, negative on error 37030c4abd36SSteve Yang */ 37040c4abd36SSteve Yang int 3705*1bb4a528SFerruh Yigit update_jumbo_frame_offload(portid_t portid, uint32_t max_rx_pktlen) 37060c4abd36SSteve Yang { 37070c4abd36SSteve Yang struct rte_port *port = &ports[portid]; 37080c4abd36SSteve Yang uint32_t eth_overhead; 37090c4abd36SSteve Yang uint64_t rx_offloads; 3710*1bb4a528SFerruh Yigit uint16_t mtu, new_mtu; 37110c4abd36SSteve Yang bool on; 37120c4abd36SSteve Yang 3713*1bb4a528SFerruh Yigit eth_overhead = get_eth_overhead(&port->dev_info); 3714*1bb4a528SFerruh Yigit 3715*1bb4a528SFerruh Yigit if (rte_eth_dev_get_mtu(portid, &mtu) != 0) { 3716*1bb4a528SFerruh Yigit printf("Failed to get MTU for port %u\n", portid); 3717*1bb4a528SFerruh Yigit return -1; 3718*1bb4a528SFerruh Yigit } 3719*1bb4a528SFerruh Yigit 3720*1bb4a528SFerruh Yigit if (max_rx_pktlen == 0) 3721*1bb4a528SFerruh Yigit max_rx_pktlen = mtu + eth_overhead; 37220c4abd36SSteve Yang 37230c4abd36SSteve Yang rx_offloads = port->dev_conf.rxmode.offloads; 3724*1bb4a528SFerruh Yigit new_mtu = max_rx_pktlen - eth_overhead; 37250c4abd36SSteve Yang 3726*1bb4a528SFerruh Yigit if (new_mtu <= RTE_ETHER_MTU) { 37270c4abd36SSteve Yang rx_offloads &= ~DEV_RX_OFFLOAD_JUMBO_FRAME; 37280c4abd36SSteve Yang on = false; 37290c4abd36SSteve Yang } else { 37300c4abd36SSteve Yang if ((port->dev_info.rx_offload_capa & DEV_RX_OFFLOAD_JUMBO_FRAME) == 0) { 373161a3b0e5SAndrew Rybchenko fprintf(stderr, 373261a3b0e5SAndrew Rybchenko "Frame size (%u) is not supported by port %u\n", 3733*1bb4a528SFerruh Yigit max_rx_pktlen, portid); 37340c4abd36SSteve Yang return -1; 37350c4abd36SSteve Yang } 37360c4abd36SSteve Yang rx_offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME; 37370c4abd36SSteve Yang on = true; 37380c4abd36SSteve Yang } 37390c4abd36SSteve Yang 37400c4abd36SSteve Yang if (rx_offloads != port->dev_conf.rxmode.offloads) { 37410c4abd36SSteve Yang uint16_t qid; 37420c4abd36SSteve Yang 37430c4abd36SSteve Yang port->dev_conf.rxmode.offloads = rx_offloads; 37440c4abd36SSteve Yang 37450c4abd36SSteve Yang /* Apply JUMBO_FRAME offload configuration to Rx queue(s) */ 37460c4abd36SSteve Yang for (qid = 0; qid < port->dev_info.nb_rx_queues; qid++) { 37470c4abd36SSteve Yang if (on) 37480c4abd36SSteve Yang port->rx_conf[qid].offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME; 37490c4abd36SSteve Yang else 37500c4abd36SSteve Yang port->rx_conf[qid].offloads &= ~DEV_RX_OFFLOAD_JUMBO_FRAME; 37510c4abd36SSteve Yang } 37520c4abd36SSteve Yang } 37530c4abd36SSteve Yang 3754*1bb4a528SFerruh Yigit if (mtu == new_mtu) 3755*1bb4a528SFerruh Yigit return 0; 3756*1bb4a528SFerruh Yigit 3757*1bb4a528SFerruh Yigit if (eth_dev_set_mtu_mp(portid, new_mtu) != 0) { 375861a3b0e5SAndrew Rybchenko fprintf(stderr, 375961a3b0e5SAndrew Rybchenko "Failed to set MTU to %u for port %u\n", 3760*1bb4a528SFerruh Yigit new_mtu, portid); 3761*1bb4a528SFerruh Yigit return -1; 37620c4abd36SSteve Yang } 37630c4abd36SSteve Yang 3764*1bb4a528SFerruh Yigit port->dev_conf.rxmode.mtu = new_mtu; 3765*1bb4a528SFerruh Yigit 37660c4abd36SSteve Yang return 0; 37670c4abd36SSteve Yang } 37680c4abd36SSteve Yang 3769013af9b6SIntel void 3770013af9b6SIntel init_port_config(void) 3771013af9b6SIntel { 3772013af9b6SIntel portid_t pid; 3773013af9b6SIntel struct rte_port *port; 3774655eae01SJie Wang int ret, i; 3775013af9b6SIntel 37767d89b261SGaetan Rivet RTE_ETH_FOREACH_DEV(pid) { 3777013af9b6SIntel port = &ports[pid]; 3778013af9b6SIntel port->dev_conf.fdir_conf = fdir_conf; 37796f51deb9SIvan Ilchenko 37806f51deb9SIvan Ilchenko ret = eth_dev_info_get_print_err(pid, &port->dev_info); 37816f51deb9SIvan Ilchenko if (ret != 0) 37826f51deb9SIvan Ilchenko return; 37836f51deb9SIvan Ilchenko 37843ce690d3SBruce Richardson if (nb_rxq > 1) { 3785013af9b6SIntel port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL; 378690892962SQi Zhang port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 3787422515b9SAdrien Mazarguil rss_hf & port->dev_info.flow_type_rss_offloads; 3788af75078fSIntel } else { 3789013af9b6SIntel port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL; 3790013af9b6SIntel port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0; 3791af75078fSIntel } 37923ce690d3SBruce Richardson 37935f592039SJingjing Wu if (port->dcb_flag == 0) { 3794655eae01SJie Wang if (port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0) { 3795f9295aa2SXiaoyu Min port->dev_conf.rxmode.mq_mode = 3796f9295aa2SXiaoyu Min (enum rte_eth_rx_mq_mode) 3797f9295aa2SXiaoyu Min (rx_mq_mode & ETH_MQ_RX_RSS); 3798655eae01SJie Wang } else { 37993ce690d3SBruce Richardson port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE; 3800655eae01SJie Wang port->dev_conf.rxmode.offloads &= 3801655eae01SJie Wang ~DEV_RX_OFFLOAD_RSS_HASH; 3802655eae01SJie Wang 3803655eae01SJie Wang for (i = 0; 3804655eae01SJie Wang i < port->dev_info.nb_rx_queues; 3805655eae01SJie Wang i++) 3806655eae01SJie Wang port->rx_conf[i].offloads &= 3807655eae01SJie Wang ~DEV_RX_OFFLOAD_RSS_HASH; 3808655eae01SJie Wang } 38093ce690d3SBruce Richardson } 38103ce690d3SBruce Richardson 3811f2c5125aSPablo de Lara rxtx_port_config(port); 3812013af9b6SIntel 3813a5279d25SIgor Romanov ret = eth_macaddr_get_print_err(pid, &port->eth_addr); 3814a5279d25SIgor Romanov if (ret != 0) 3815a5279d25SIgor Romanov return; 3816013af9b6SIntel 3817a8d0d473SBruce Richardson #if defined RTE_NET_IXGBE && defined RTE_LIBRTE_IXGBE_BYPASS 3818e261265eSRadu Nicolau rte_pmd_ixgbe_bypass_init(pid); 38197b7e5ba7SIntel #endif 38208ea656f8SGaetan Rivet 38210a0821bcSPaulis Gributs if (lsc_interrupt && (*port->dev_info.dev_flags & RTE_ETH_DEV_INTR_LSC)) 38228ea656f8SGaetan Rivet port->dev_conf.intr_conf.lsc = 1; 38230a0821bcSPaulis Gributs if (rmv_interrupt && (*port->dev_info.dev_flags & RTE_ETH_DEV_INTR_RMV)) 3824284c908cSGaetan Rivet port->dev_conf.intr_conf.rmv = 1; 3825013af9b6SIntel } 3826013af9b6SIntel } 3827013af9b6SIntel 382841b05095SBernard Iremonger void set_port_slave_flag(portid_t slave_pid) 382941b05095SBernard Iremonger { 383041b05095SBernard Iremonger struct rte_port *port; 383141b05095SBernard Iremonger 383241b05095SBernard Iremonger port = &ports[slave_pid]; 383341b05095SBernard Iremonger port->slave_flag = 1; 383441b05095SBernard Iremonger } 383541b05095SBernard Iremonger 383641b05095SBernard Iremonger void clear_port_slave_flag(portid_t slave_pid) 383741b05095SBernard Iremonger { 383841b05095SBernard Iremonger struct rte_port *port; 383941b05095SBernard Iremonger 384041b05095SBernard Iremonger port = &ports[slave_pid]; 384141b05095SBernard Iremonger port->slave_flag = 0; 384241b05095SBernard Iremonger } 384341b05095SBernard Iremonger 38440e545d30SBernard Iremonger uint8_t port_is_bonding_slave(portid_t slave_pid) 38450e545d30SBernard Iremonger { 38460e545d30SBernard Iremonger struct rte_port *port; 38470a0821bcSPaulis Gributs struct rte_eth_dev_info dev_info; 38480a0821bcSPaulis Gributs int ret; 38490e545d30SBernard Iremonger 38500e545d30SBernard Iremonger port = &ports[slave_pid]; 38510a0821bcSPaulis Gributs ret = eth_dev_info_get_print_err(slave_pid, &dev_info); 38520a0821bcSPaulis Gributs if (ret != 0) { 38530a0821bcSPaulis Gributs TESTPMD_LOG(ERR, 38540a0821bcSPaulis Gributs "Failed to get device info for port id %d," 38550a0821bcSPaulis Gributs "cannot determine if the port is a bonded slave", 38560a0821bcSPaulis Gributs slave_pid); 38570a0821bcSPaulis Gributs return 0; 38580a0821bcSPaulis Gributs } 38590a0821bcSPaulis Gributs if ((*dev_info.dev_flags & RTE_ETH_DEV_BONDED_SLAVE) || (port->slave_flag == 1)) 3860b8b8b344SMatan Azrad return 1; 3861b8b8b344SMatan Azrad return 0; 38620e545d30SBernard Iremonger } 38630e545d30SBernard Iremonger 3864013af9b6SIntel const uint16_t vlan_tags[] = { 3865013af9b6SIntel 0, 1, 2, 3, 4, 5, 6, 7, 3866013af9b6SIntel 8, 9, 10, 11, 12, 13, 14, 15, 3867013af9b6SIntel 16, 17, 18, 19, 20, 21, 22, 23, 3868013af9b6SIntel 24, 25, 26, 27, 28, 29, 30, 31 3869013af9b6SIntel }; 3870013af9b6SIntel 3871013af9b6SIntel static int 3872ac7c491cSKonstantin Ananyev get_eth_dcb_conf(portid_t pid, struct rte_eth_conf *eth_conf, 38731a572499SJingjing Wu enum dcb_mode_enable dcb_mode, 38741a572499SJingjing Wu enum rte_eth_nb_tcs num_tcs, 38751a572499SJingjing Wu uint8_t pfc_en) 3876013af9b6SIntel { 3877013af9b6SIntel uint8_t i; 3878ac7c491cSKonstantin Ananyev int32_t rc; 3879ac7c491cSKonstantin Ananyev struct rte_eth_rss_conf rss_conf; 3880af75078fSIntel 3881af75078fSIntel /* 3882013af9b6SIntel * Builds up the correct configuration for dcb+vt based on the vlan tags array 3883013af9b6SIntel * given above, and the number of traffic classes available for use. 3884af75078fSIntel */ 38851a572499SJingjing Wu if (dcb_mode == DCB_VT_ENABLED) { 38861a572499SJingjing Wu struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf = 38871a572499SJingjing Wu ð_conf->rx_adv_conf.vmdq_dcb_conf; 38881a572499SJingjing Wu struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf = 38891a572499SJingjing Wu ð_conf->tx_adv_conf.vmdq_dcb_tx_conf; 3890013af9b6SIntel 3891547d946cSNirmoy Das /* VMDQ+DCB RX and TX configurations */ 38921a572499SJingjing Wu vmdq_rx_conf->enable_default_pool = 0; 38931a572499SJingjing Wu vmdq_rx_conf->default_pool = 0; 38941a572499SJingjing Wu vmdq_rx_conf->nb_queue_pools = 38951a572499SJingjing Wu (num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS); 38961a572499SJingjing Wu vmdq_tx_conf->nb_queue_pools = 38971a572499SJingjing Wu (num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS); 3898013af9b6SIntel 38991a572499SJingjing Wu vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools; 39001a572499SJingjing Wu for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) { 39011a572499SJingjing Wu vmdq_rx_conf->pool_map[i].vlan_id = vlan_tags[i]; 39021a572499SJingjing Wu vmdq_rx_conf->pool_map[i].pools = 39031a572499SJingjing Wu 1 << (i % vmdq_rx_conf->nb_queue_pools); 3904af75078fSIntel } 3905013af9b6SIntel for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) { 3906f59908feSWei Dai vmdq_rx_conf->dcb_tc[i] = i % num_tcs; 3907f59908feSWei Dai vmdq_tx_conf->dcb_tc[i] = i % num_tcs; 3908013af9b6SIntel } 3909013af9b6SIntel 3910013af9b6SIntel /* set DCB mode of RX and TX of multiple queues */ 3911f9295aa2SXiaoyu Min eth_conf->rxmode.mq_mode = 3912f9295aa2SXiaoyu Min (enum rte_eth_rx_mq_mode) 3913f9295aa2SXiaoyu Min (rx_mq_mode & ETH_MQ_RX_VMDQ_DCB); 391432e7aa0bSIntel eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB; 39151a572499SJingjing Wu } else { 39161a572499SJingjing Wu struct rte_eth_dcb_rx_conf *rx_conf = 39171a572499SJingjing Wu ð_conf->rx_adv_conf.dcb_rx_conf; 39181a572499SJingjing Wu struct rte_eth_dcb_tx_conf *tx_conf = 39191a572499SJingjing Wu ð_conf->tx_adv_conf.dcb_tx_conf; 3920013af9b6SIntel 39215139bc12STing Xu memset(&rss_conf, 0, sizeof(struct rte_eth_rss_conf)); 39225139bc12STing Xu 3923ac7c491cSKonstantin Ananyev rc = rte_eth_dev_rss_hash_conf_get(pid, &rss_conf); 3924ac7c491cSKonstantin Ananyev if (rc != 0) 3925ac7c491cSKonstantin Ananyev return rc; 3926ac7c491cSKonstantin Ananyev 39271a572499SJingjing Wu rx_conf->nb_tcs = num_tcs; 39281a572499SJingjing Wu tx_conf->nb_tcs = num_tcs; 39291a572499SJingjing Wu 3930bcd0e432SJingjing Wu for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) { 3931bcd0e432SJingjing Wu rx_conf->dcb_tc[i] = i % num_tcs; 3932bcd0e432SJingjing Wu tx_conf->dcb_tc[i] = i % num_tcs; 3933013af9b6SIntel } 3934ac7c491cSKonstantin Ananyev 3935f9295aa2SXiaoyu Min eth_conf->rxmode.mq_mode = 3936f9295aa2SXiaoyu Min (enum rte_eth_rx_mq_mode) 3937f9295aa2SXiaoyu Min (rx_mq_mode & ETH_MQ_RX_DCB_RSS); 3938ac7c491cSKonstantin Ananyev eth_conf->rx_adv_conf.rss_conf = rss_conf; 393932e7aa0bSIntel eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB; 39401a572499SJingjing Wu } 39411a572499SJingjing Wu 39421a572499SJingjing Wu if (pfc_en) 39431a572499SJingjing Wu eth_conf->dcb_capability_en = 39441a572499SJingjing Wu ETH_DCB_PG_SUPPORT | ETH_DCB_PFC_SUPPORT; 3945013af9b6SIntel else 3946013af9b6SIntel eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT; 3947013af9b6SIntel 3948013af9b6SIntel return 0; 3949013af9b6SIntel } 3950013af9b6SIntel 3951013af9b6SIntel int 39521a572499SJingjing Wu init_port_dcb_config(portid_t pid, 39531a572499SJingjing Wu enum dcb_mode_enable dcb_mode, 39541a572499SJingjing Wu enum rte_eth_nb_tcs num_tcs, 39551a572499SJingjing Wu uint8_t pfc_en) 3956013af9b6SIntel { 3957013af9b6SIntel struct rte_eth_conf port_conf; 3958013af9b6SIntel struct rte_port *rte_port; 3959013af9b6SIntel int retval; 3960013af9b6SIntel uint16_t i; 3961013af9b6SIntel 3962a550baf2SMin Hu (Connor) if (num_procs > 1) { 3963a550baf2SMin Hu (Connor) printf("The multi-process feature doesn't support dcb.\n"); 3964a550baf2SMin Hu (Connor) return -ENOTSUP; 3965a550baf2SMin Hu (Connor) } 39662a977b89SWenzhuo Lu rte_port = &ports[pid]; 3967013af9b6SIntel 3968c1ba6c32SHuisong Li /* retain the original device configuration. */ 3969c1ba6c32SHuisong Li memcpy(&port_conf, &rte_port->dev_conf, sizeof(struct rte_eth_conf)); 3970d5354e89SYanglong Wu 3971013af9b6SIntel /*set configuration of DCB in vt mode and DCB in non-vt mode*/ 3972ac7c491cSKonstantin Ananyev retval = get_eth_dcb_conf(pid, &port_conf, dcb_mode, num_tcs, pfc_en); 3973013af9b6SIntel if (retval < 0) 3974013af9b6SIntel return retval; 39750074d02fSShahaf Shuler port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER; 3976013af9b6SIntel 39772f203d44SQi Zhang /* re-configure the device . */ 39782b0e0ebaSChenbo Xia retval = rte_eth_dev_configure(pid, nb_rxq, nb_rxq, &port_conf); 39792b0e0ebaSChenbo Xia if (retval < 0) 39802b0e0ebaSChenbo Xia return retval; 39816f51deb9SIvan Ilchenko 39826f51deb9SIvan Ilchenko retval = eth_dev_info_get_print_err(pid, &rte_port->dev_info); 39836f51deb9SIvan Ilchenko if (retval != 0) 39846f51deb9SIvan Ilchenko return retval; 39852a977b89SWenzhuo Lu 39862a977b89SWenzhuo Lu /* If dev_info.vmdq_pool_base is greater than 0, 39872a977b89SWenzhuo Lu * the queue id of vmdq pools is started after pf queues. 39882a977b89SWenzhuo Lu */ 39892a977b89SWenzhuo Lu if (dcb_mode == DCB_VT_ENABLED && 39902a977b89SWenzhuo Lu rte_port->dev_info.vmdq_pool_base > 0) { 399161a3b0e5SAndrew Rybchenko fprintf(stderr, 399261a3b0e5SAndrew Rybchenko "VMDQ_DCB multi-queue mode is nonsensical for port %d.\n", 399361a3b0e5SAndrew Rybchenko pid); 39942a977b89SWenzhuo Lu return -1; 39952a977b89SWenzhuo Lu } 39962a977b89SWenzhuo Lu 39972a977b89SWenzhuo Lu /* Assume the ports in testpmd have the same dcb capability 39982a977b89SWenzhuo Lu * and has the same number of rxq and txq in dcb mode 39992a977b89SWenzhuo Lu */ 40002a977b89SWenzhuo Lu if (dcb_mode == DCB_VT_ENABLED) { 400186ef65eeSBernard Iremonger if (rte_port->dev_info.max_vfs > 0) { 400286ef65eeSBernard Iremonger nb_rxq = rte_port->dev_info.nb_rx_queues; 400386ef65eeSBernard Iremonger nb_txq = rte_port->dev_info.nb_tx_queues; 400486ef65eeSBernard Iremonger } else { 40052a977b89SWenzhuo Lu nb_rxq = rte_port->dev_info.max_rx_queues; 40062a977b89SWenzhuo Lu nb_txq = rte_port->dev_info.max_tx_queues; 400786ef65eeSBernard Iremonger } 40082a977b89SWenzhuo Lu } else { 40092a977b89SWenzhuo Lu /*if vt is disabled, use all pf queues */ 40102a977b89SWenzhuo Lu if (rte_port->dev_info.vmdq_pool_base == 0) { 40112a977b89SWenzhuo Lu nb_rxq = rte_port->dev_info.max_rx_queues; 40122a977b89SWenzhuo Lu nb_txq = rte_port->dev_info.max_tx_queues; 40132a977b89SWenzhuo Lu } else { 40142a977b89SWenzhuo Lu nb_rxq = (queueid_t)num_tcs; 40152a977b89SWenzhuo Lu nb_txq = (queueid_t)num_tcs; 40162a977b89SWenzhuo Lu 40172a977b89SWenzhuo Lu } 40182a977b89SWenzhuo Lu } 40192a977b89SWenzhuo Lu rx_free_thresh = 64; 40202a977b89SWenzhuo Lu 4021013af9b6SIntel memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf)); 4022013af9b6SIntel 4023f2c5125aSPablo de Lara rxtx_port_config(rte_port); 4024013af9b6SIntel /* VLAN filter */ 40250074d02fSShahaf Shuler rte_port->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER; 40261a572499SJingjing Wu for (i = 0; i < RTE_DIM(vlan_tags); i++) 4027013af9b6SIntel rx_vft_set(pid, vlan_tags[i], 1); 4028013af9b6SIntel 4029a5279d25SIgor Romanov retval = eth_macaddr_get_print_err(pid, &rte_port->eth_addr); 4030a5279d25SIgor Romanov if (retval != 0) 4031a5279d25SIgor Romanov return retval; 4032a5279d25SIgor Romanov 40337741e4cfSIntel rte_port->dcb_flag = 1; 40347741e4cfSIntel 4035a690a070SHuisong Li /* Enter DCB configuration status */ 4036a690a070SHuisong Li dcb_config = 1; 4037a690a070SHuisong Li 4038013af9b6SIntel return 0; 4039af75078fSIntel } 4040af75078fSIntel 4041ffc468ffSTetsuya Mukawa static void 4042ffc468ffSTetsuya Mukawa init_port(void) 4043ffc468ffSTetsuya Mukawa { 40441b9f2746SGregory Etelson int i; 40451b9f2746SGregory Etelson 4046ffc468ffSTetsuya Mukawa /* Configuration of Ethernet ports. */ 4047ffc468ffSTetsuya Mukawa ports = rte_zmalloc("testpmd: ports", 4048ffc468ffSTetsuya Mukawa sizeof(struct rte_port) * RTE_MAX_ETHPORTS, 4049ffc468ffSTetsuya Mukawa RTE_CACHE_LINE_SIZE); 4050ffc468ffSTetsuya Mukawa if (ports == NULL) { 4051ffc468ffSTetsuya Mukawa rte_exit(EXIT_FAILURE, 4052ffc468ffSTetsuya Mukawa "rte_zmalloc(%d struct rte_port) failed\n", 4053ffc468ffSTetsuya Mukawa RTE_MAX_ETHPORTS); 4054ffc468ffSTetsuya Mukawa } 40551b9f2746SGregory Etelson for (i = 0; i < RTE_MAX_ETHPORTS; i++) 405663b72657SIvan Ilchenko ports[i].xstats_info.allocated = false; 405763b72657SIvan Ilchenko for (i = 0; i < RTE_MAX_ETHPORTS; i++) 40581b9f2746SGregory Etelson LIST_INIT(&ports[i].flow_tunnel_list); 405929841336SPhil Yang /* Initialize ports NUMA structures */ 406029841336SPhil Yang memset(port_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS); 406129841336SPhil Yang memset(rxring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS); 406229841336SPhil Yang memset(txring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS); 4063ffc468ffSTetsuya Mukawa } 4064ffc468ffSTetsuya Mukawa 4065d3a274ceSZhihong Wang static void 4066d3a274ceSZhihong Wang force_quit(void) 4067d3a274ceSZhihong Wang { 4068d3a274ceSZhihong Wang pmd_test_exit(); 4069d3a274ceSZhihong Wang prompt_exit(); 4070d3a274ceSZhihong Wang } 4071d3a274ceSZhihong Wang 4072d3a274ceSZhihong Wang static void 4073cfea1f30SPablo de Lara print_stats(void) 4074cfea1f30SPablo de Lara { 4075cfea1f30SPablo de Lara uint8_t i; 4076cfea1f30SPablo de Lara const char clr[] = { 27, '[', '2', 'J', '\0' }; 4077cfea1f30SPablo de Lara const char top_left[] = { 27, '[', '1', ';', '1', 'H', '\0' }; 4078cfea1f30SPablo de Lara 4079cfea1f30SPablo de Lara /* Clear screen and move to top left */ 4080cfea1f30SPablo de Lara printf("%s%s", clr, top_left); 4081cfea1f30SPablo de Lara 4082cfea1f30SPablo de Lara printf("\nPort statistics ===================================="); 4083cfea1f30SPablo de Lara for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) 4084cfea1f30SPablo de Lara nic_stats_display(fwd_ports_ids[i]); 4085683d1e82SIgor Romanov 4086683d1e82SIgor Romanov fflush(stdout); 4087cfea1f30SPablo de Lara } 4088cfea1f30SPablo de Lara 4089cfea1f30SPablo de Lara static void 4090d3a274ceSZhihong Wang signal_handler(int signum) 4091d3a274ceSZhihong Wang { 4092d3a274ceSZhihong Wang if (signum == SIGINT || signum == SIGTERM) { 409361a3b0e5SAndrew Rybchenko fprintf(stderr, "\nSignal %d received, preparing to exit...\n", 4094d3a274ceSZhihong Wang signum); 4095a8d0d473SBruce Richardson #ifdef RTE_LIB_PDUMP 4096102b7329SReshma Pattan /* uninitialize packet capture framework */ 4097102b7329SReshma Pattan rte_pdump_uninit(); 4098102b7329SReshma Pattan #endif 4099a8d0d473SBruce Richardson #ifdef RTE_LIB_LATENCYSTATS 41008b36297dSAmit Gupta if (latencystats_enabled != 0) 410162d3216dSReshma Pattan rte_latencystats_uninit(); 410262d3216dSReshma Pattan #endif 4103d3a274ceSZhihong Wang force_quit(); 4104d9a191a0SPhil Yang /* Set flag to indicate the force termination. */ 4105d9a191a0SPhil Yang f_quit = 1; 4106d3a274ceSZhihong Wang /* exit with the expected status */ 4107761f7ae1SJie Zhou #ifndef RTE_EXEC_ENV_WINDOWS 4108d3a274ceSZhihong Wang signal(signum, SIG_DFL); 4109d3a274ceSZhihong Wang kill(getpid(), signum); 4110761f7ae1SJie Zhou #endif 4111d3a274ceSZhihong Wang } 4112d3a274ceSZhihong Wang } 4113d3a274ceSZhihong Wang 4114af75078fSIntel int 4115af75078fSIntel main(int argc, char** argv) 4116af75078fSIntel { 4117af75078fSIntel int diag; 4118f8244c63SZhiyong Yang portid_t port_id; 41194918a357SXiaoyun Li uint16_t count; 4120fb73e096SJeff Guo int ret; 4121af75078fSIntel 4122d3a274ceSZhihong Wang signal(SIGINT, signal_handler); 4123d3a274ceSZhihong Wang signal(SIGTERM, signal_handler); 4124d3a274ceSZhihong Wang 4125285fd101SOlivier Matz testpmd_logtype = rte_log_register("testpmd"); 4126285fd101SOlivier Matz if (testpmd_logtype < 0) 412716267ceeSStephen Hemminger rte_exit(EXIT_FAILURE, "Cannot register log type"); 4128285fd101SOlivier Matz rte_log_set_level(testpmd_logtype, RTE_LOG_DEBUG); 4129285fd101SOlivier Matz 41309201806eSStephen Hemminger diag = rte_eal_init(argc, argv); 41319201806eSStephen Hemminger if (diag < 0) 413216267ceeSStephen Hemminger rte_exit(EXIT_FAILURE, "Cannot init EAL: %s\n", 413316267ceeSStephen Hemminger rte_strerror(rte_errno)); 41349201806eSStephen Hemminger 413597b5d8b5SThomas Monjalon ret = register_eth_event_callback(); 413697b5d8b5SThomas Monjalon if (ret != 0) 413716267ceeSStephen Hemminger rte_exit(EXIT_FAILURE, "Cannot register for ethdev events"); 413897b5d8b5SThomas Monjalon 4139a8d0d473SBruce Richardson #ifdef RTE_LIB_PDUMP 41404aa0d012SAnatoly Burakov /* initialize packet capture framework */ 4141e9436f54STiwei Bie rte_pdump_init(); 41424aa0d012SAnatoly Burakov #endif 41434aa0d012SAnatoly Burakov 41444918a357SXiaoyun Li count = 0; 41454918a357SXiaoyun Li RTE_ETH_FOREACH_DEV(port_id) { 41464918a357SXiaoyun Li ports_ids[count] = port_id; 41474918a357SXiaoyun Li count++; 41484918a357SXiaoyun Li } 41494918a357SXiaoyun Li nb_ports = (portid_t) count; 41504aa0d012SAnatoly Burakov if (nb_ports == 0) 41514aa0d012SAnatoly Burakov TESTPMD_LOG(WARNING, "No probed ethernet devices\n"); 41524aa0d012SAnatoly Burakov 41534aa0d012SAnatoly Burakov /* allocate port structures, and init them */ 41544aa0d012SAnatoly Burakov init_port(); 41554aa0d012SAnatoly Burakov 41564aa0d012SAnatoly Burakov set_def_fwd_config(); 41574aa0d012SAnatoly Burakov if (nb_lcores == 0) 415816267ceeSStephen Hemminger rte_exit(EXIT_FAILURE, "No cores defined for forwarding\n" 415916267ceeSStephen Hemminger "Check the core mask argument\n"); 41604aa0d012SAnatoly Burakov 4161e505d84cSAnatoly Burakov /* Bitrate/latency stats disabled by default */ 4162a8d0d473SBruce Richardson #ifdef RTE_LIB_BITRATESTATS 4163e505d84cSAnatoly Burakov bitrate_enabled = 0; 4164e505d84cSAnatoly Burakov #endif 4165a8d0d473SBruce Richardson #ifdef RTE_LIB_LATENCYSTATS 4166e505d84cSAnatoly Burakov latencystats_enabled = 0; 4167e505d84cSAnatoly Burakov #endif 4168e505d84cSAnatoly Burakov 4169fb7b8b32SAnatoly Burakov /* on FreeBSD, mlockall() is disabled by default */ 41705fbc1d49SBruce Richardson #ifdef RTE_EXEC_ENV_FREEBSD 4171fb7b8b32SAnatoly Burakov do_mlockall = 0; 4172fb7b8b32SAnatoly Burakov #else 4173fb7b8b32SAnatoly Burakov do_mlockall = 1; 4174fb7b8b32SAnatoly Burakov #endif 4175fb7b8b32SAnatoly Burakov 4176e505d84cSAnatoly Burakov argc -= diag; 4177e505d84cSAnatoly Burakov argv += diag; 4178e505d84cSAnatoly Burakov if (argc > 1) 4179e505d84cSAnatoly Burakov launch_args_parse(argc, argv); 4180e505d84cSAnatoly Burakov 4181761f7ae1SJie Zhou #ifndef RTE_EXEC_ENV_WINDOWS 4182e505d84cSAnatoly Burakov if (do_mlockall && mlockall(MCL_CURRENT | MCL_FUTURE)) { 4183285fd101SOlivier Matz TESTPMD_LOG(NOTICE, "mlockall() failed with error \"%s\"\n", 41841c036b16SEelco Chaudron strerror(errno)); 41851c036b16SEelco Chaudron } 4186761f7ae1SJie Zhou #endif 41871c036b16SEelco Chaudron 418899cabef0SPablo de Lara if (tx_first && interactive) 418999cabef0SPablo de Lara rte_exit(EXIT_FAILURE, "--tx-first cannot be used on " 419099cabef0SPablo de Lara "interactive mode.\n"); 41918820cba4SDavid Hunt 41928820cba4SDavid Hunt if (tx_first && lsc_interrupt) { 419361a3b0e5SAndrew Rybchenko fprintf(stderr, 419461a3b0e5SAndrew Rybchenko "Warning: lsc_interrupt needs to be off when using tx_first. Disabling.\n"); 41958820cba4SDavid Hunt lsc_interrupt = 0; 41968820cba4SDavid Hunt } 41978820cba4SDavid Hunt 41985a8fb55cSReshma Pattan if (!nb_rxq && !nb_txq) 419961a3b0e5SAndrew Rybchenko fprintf(stderr, 420061a3b0e5SAndrew Rybchenko "Warning: Either rx or tx queues should be non-zero\n"); 42015a8fb55cSReshma Pattan 42025a8fb55cSReshma Pattan if (nb_rxq > 1 && nb_rxq > nb_txq) 420361a3b0e5SAndrew Rybchenko fprintf(stderr, 420461a3b0e5SAndrew Rybchenko "Warning: nb_rxq=%d enables RSS configuration, but nb_txq=%d will prevent to fully test it.\n", 4205af75078fSIntel nb_rxq, nb_txq); 4206af75078fSIntel 4207af75078fSIntel init_config(); 4208fb73e096SJeff Guo 4209fb73e096SJeff Guo if (hot_plug) { 42102049c511SJeff Guo ret = rte_dev_hotplug_handle_enable(); 4211fb73e096SJeff Guo if (ret) { 42122049c511SJeff Guo RTE_LOG(ERR, EAL, 42132049c511SJeff Guo "fail to enable hotplug handling."); 4214fb73e096SJeff Guo return -1; 4215fb73e096SJeff Guo } 4216fb73e096SJeff Guo 42172049c511SJeff Guo ret = rte_dev_event_monitor_start(); 42182049c511SJeff Guo if (ret) { 42192049c511SJeff Guo RTE_LOG(ERR, EAL, 42202049c511SJeff Guo "fail to start device event monitoring."); 42212049c511SJeff Guo return -1; 42222049c511SJeff Guo } 42232049c511SJeff Guo 42242049c511SJeff Guo ret = rte_dev_event_callback_register(NULL, 4225cc1bf307SJeff Guo dev_event_callback, NULL); 42262049c511SJeff Guo if (ret) { 42272049c511SJeff Guo RTE_LOG(ERR, EAL, 42282049c511SJeff Guo "fail to register device event callback\n"); 42292049c511SJeff Guo return -1; 42302049c511SJeff Guo } 4231fb73e096SJeff Guo } 4232fb73e096SJeff Guo 42336937d210SStephen Hemminger if (!no_device_start && start_port(RTE_PORT_ALL) != 0) 4234148f963fSBruce Richardson rte_exit(EXIT_FAILURE, "Start ports failed\n"); 4235af75078fSIntel 4236ce8d5614SIntel /* set all ports to promiscuous mode by default */ 423734fc1051SIvan Ilchenko RTE_ETH_FOREACH_DEV(port_id) { 423834fc1051SIvan Ilchenko ret = rte_eth_promiscuous_enable(port_id); 423934fc1051SIvan Ilchenko if (ret != 0) 424061a3b0e5SAndrew Rybchenko fprintf(stderr, 424161a3b0e5SAndrew Rybchenko "Error during enabling promiscuous mode for port %u: %s - ignore\n", 424234fc1051SIvan Ilchenko port_id, rte_strerror(-ret)); 424334fc1051SIvan Ilchenko } 4244af75078fSIntel 42457e4441c8SRemy Horton /* Init metrics library */ 42467e4441c8SRemy Horton rte_metrics_init(rte_socket_id()); 42477e4441c8SRemy Horton 4248a8d0d473SBruce Richardson #ifdef RTE_LIB_LATENCYSTATS 424962d3216dSReshma Pattan if (latencystats_enabled != 0) { 425062d3216dSReshma Pattan int ret = rte_latencystats_init(1, NULL); 425162d3216dSReshma Pattan if (ret) 425261a3b0e5SAndrew Rybchenko fprintf(stderr, 425361a3b0e5SAndrew Rybchenko "Warning: latencystats init() returned error %d\n", 425461a3b0e5SAndrew Rybchenko ret); 425561a3b0e5SAndrew Rybchenko fprintf(stderr, "Latencystats running on lcore %d\n", 425662d3216dSReshma Pattan latencystats_lcore_id); 425762d3216dSReshma Pattan } 425862d3216dSReshma Pattan #endif 425962d3216dSReshma Pattan 42607e4441c8SRemy Horton /* Setup bitrate stats */ 4261a8d0d473SBruce Richardson #ifdef RTE_LIB_BITRATESTATS 4262e25e6c70SRemy Horton if (bitrate_enabled != 0) { 42637e4441c8SRemy Horton bitrate_data = rte_stats_bitrate_create(); 42647e4441c8SRemy Horton if (bitrate_data == NULL) 4265e25e6c70SRemy Horton rte_exit(EXIT_FAILURE, 4266e25e6c70SRemy Horton "Could not allocate bitrate data.\n"); 42677e4441c8SRemy Horton rte_stats_bitrate_reg(bitrate_data); 4268e25e6c70SRemy Horton } 42697e4441c8SRemy Horton #endif 42707e4441c8SRemy Horton 4271a8d0d473SBruce Richardson #ifdef RTE_LIB_CMDLINE 427281ef862bSAllain Legacy if (strlen(cmdline_filename) != 0) 427381ef862bSAllain Legacy cmdline_read_from_file(cmdline_filename); 427481ef862bSAllain Legacy 4275ca7feb22SCyril Chemparathy if (interactive == 1) { 4276ca7feb22SCyril Chemparathy if (auto_start) { 4277ca7feb22SCyril Chemparathy printf("Start automatic packet forwarding\n"); 4278ca7feb22SCyril Chemparathy start_packet_forwarding(0); 4279ca7feb22SCyril Chemparathy } 4280af75078fSIntel prompt(); 42810de738cfSJiayu Hu pmd_test_exit(); 4282ca7feb22SCyril Chemparathy } else 42830d56cb81SThomas Monjalon #endif 42840d56cb81SThomas Monjalon { 4285af75078fSIntel char c; 4286af75078fSIntel int rc; 4287af75078fSIntel 4288d9a191a0SPhil Yang f_quit = 0; 4289d9a191a0SPhil Yang 4290af75078fSIntel printf("No commandline core given, start packet forwarding\n"); 429199cabef0SPablo de Lara start_packet_forwarding(tx_first); 4292cfea1f30SPablo de Lara if (stats_period != 0) { 4293cfea1f30SPablo de Lara uint64_t prev_time = 0, cur_time, diff_time = 0; 4294cfea1f30SPablo de Lara uint64_t timer_period; 4295cfea1f30SPablo de Lara 4296cfea1f30SPablo de Lara /* Convert to number of cycles */ 4297cfea1f30SPablo de Lara timer_period = stats_period * rte_get_timer_hz(); 4298cfea1f30SPablo de Lara 4299d9a191a0SPhil Yang while (f_quit == 0) { 4300cfea1f30SPablo de Lara cur_time = rte_get_timer_cycles(); 4301cfea1f30SPablo de Lara diff_time += cur_time - prev_time; 4302cfea1f30SPablo de Lara 4303cfea1f30SPablo de Lara if (diff_time >= timer_period) { 4304cfea1f30SPablo de Lara print_stats(); 4305cfea1f30SPablo de Lara /* Reset the timer */ 4306cfea1f30SPablo de Lara diff_time = 0; 4307cfea1f30SPablo de Lara } 4308cfea1f30SPablo de Lara /* Sleep to avoid unnecessary checks */ 4309cfea1f30SPablo de Lara prev_time = cur_time; 4310761f7ae1SJie Zhou rte_delay_us_sleep(US_PER_S); 4311cfea1f30SPablo de Lara } 4312cfea1f30SPablo de Lara } 4313cfea1f30SPablo de Lara 4314af75078fSIntel printf("Press enter to exit\n"); 4315af75078fSIntel rc = read(0, &c, 1); 4316d3a274ceSZhihong Wang pmd_test_exit(); 4317af75078fSIntel if (rc < 0) 4318af75078fSIntel return 1; 4319af75078fSIntel } 4320af75078fSIntel 43215e516c89SStephen Hemminger ret = rte_eal_cleanup(); 43225e516c89SStephen Hemminger if (ret != 0) 43235e516c89SStephen Hemminger rte_exit(EXIT_FAILURE, 43245e516c89SStephen Hemminger "EAL cleanup failed: %s\n", strerror(-ret)); 43255e516c89SStephen Hemminger 43265e516c89SStephen Hemminger return EXIT_SUCCESS; 4327af75078fSIntel } 4328