1174a1631SBruce Richardson /* SPDX-License-Identifier: BSD-3-Clause 2174a1631SBruce Richardson * Copyright(c) 2010-2017 Intel Corporation 3af75078fSIntel */ 4af75078fSIntel 5af75078fSIntel #include <stdarg.h> 6af75078fSIntel #include <stdio.h> 7af75078fSIntel #include <stdlib.h> 8af75078fSIntel #include <signal.h> 9af75078fSIntel #include <string.h> 10af75078fSIntel #include <time.h> 11af75078fSIntel #include <fcntl.h> 121c036b16SEelco Chaudron #include <sys/mman.h> 13af75078fSIntel #include <sys/types.h> 14af75078fSIntel #include <errno.h> 15fb73e096SJeff Guo #include <stdbool.h> 16af75078fSIntel 17af75078fSIntel #include <sys/queue.h> 18af75078fSIntel #include <sys/stat.h> 19af75078fSIntel 20af75078fSIntel #include <stdint.h> 21af75078fSIntel #include <unistd.h> 22af75078fSIntel #include <inttypes.h> 23af75078fSIntel 24af75078fSIntel #include <rte_common.h> 25d1eb542eSOlivier Matz #include <rte_errno.h> 26af75078fSIntel #include <rte_byteorder.h> 27af75078fSIntel #include <rte_log.h> 28af75078fSIntel #include <rte_debug.h> 29af75078fSIntel #include <rte_cycles.h> 30af75078fSIntel #include <rte_memory.h> 31af75078fSIntel #include <rte_memcpy.h> 32af75078fSIntel #include <rte_launch.h> 33af75078fSIntel #include <rte_eal.h> 34284c908cSGaetan Rivet #include <rte_alarm.h> 35af75078fSIntel #include <rte_per_lcore.h> 36af75078fSIntel #include <rte_lcore.h> 37af75078fSIntel #include <rte_atomic.h> 38af75078fSIntel #include <rte_branch_prediction.h> 39af75078fSIntel #include <rte_mempool.h> 40af75078fSIntel #include <rte_malloc.h> 41af75078fSIntel #include <rte_mbuf.h> 420e798567SPavan Nikhilesh #include <rte_mbuf_pool_ops.h> 43af75078fSIntel #include <rte_interrupts.h> 44af75078fSIntel #include <rte_pci.h> 45af75078fSIntel #include <rte_ether.h> 46af75078fSIntel #include <rte_ethdev.h> 47edab33b1STetsuya Mukawa #include <rte_dev.h> 48af75078fSIntel #include <rte_string_fns.h> 49e261265eSRadu Nicolau #ifdef RTE_LIBRTE_IXGBE_PMD 50e261265eSRadu Nicolau #include <rte_pmd_ixgbe.h> 51e261265eSRadu Nicolau #endif 52102b7329SReshma Pattan #ifdef RTE_LIBRTE_PDUMP 53102b7329SReshma Pattan #include <rte_pdump.h> 54102b7329SReshma Pattan #endif 55938a184aSAdrien Mazarguil #include <rte_flow.h> 567e4441c8SRemy Horton #include <rte_metrics.h> 577e4441c8SRemy Horton #ifdef RTE_LIBRTE_BITRATE 587e4441c8SRemy Horton #include <rte_bitrate.h> 597e4441c8SRemy Horton #endif 6062d3216dSReshma Pattan #ifdef RTE_LIBRTE_LATENCY_STATS 6162d3216dSReshma Pattan #include <rte_latencystats.h> 6262d3216dSReshma Pattan #endif 63af75078fSIntel 64af75078fSIntel #include "testpmd.h" 65af75078fSIntel 66c7f5dba7SAnatoly Burakov #ifndef MAP_HUGETLB 67c7f5dba7SAnatoly Burakov /* FreeBSD may not have MAP_HUGETLB (in fact, it probably doesn't) */ 68c7f5dba7SAnatoly Burakov #define HUGE_FLAG (0x40000) 69c7f5dba7SAnatoly Burakov #else 70c7f5dba7SAnatoly Burakov #define HUGE_FLAG MAP_HUGETLB 71c7f5dba7SAnatoly Burakov #endif 72c7f5dba7SAnatoly Burakov 73c7f5dba7SAnatoly Burakov #ifndef MAP_HUGE_SHIFT 74c7f5dba7SAnatoly Burakov /* older kernels (or FreeBSD) will not have this define */ 75c7f5dba7SAnatoly Burakov #define HUGE_SHIFT (26) 76c7f5dba7SAnatoly Burakov #else 77c7f5dba7SAnatoly Burakov #define HUGE_SHIFT MAP_HUGE_SHIFT 78c7f5dba7SAnatoly Burakov #endif 79c7f5dba7SAnatoly Burakov 80c7f5dba7SAnatoly Burakov #define EXTMEM_HEAP_NAME "extmem" 8172512e18SViacheslav Ovsiienko #define EXTBUF_ZONE_SIZE RTE_PGSIZE_2M 82c7f5dba7SAnatoly Burakov 83af75078fSIntel uint16_t verbose_level = 0; /**< Silent by default. */ 84285fd101SOlivier Matz int testpmd_logtype; /**< Log type for testpmd logs */ 85af75078fSIntel 86af75078fSIntel /* use master core for command line ? */ 87af75078fSIntel uint8_t interactive = 0; 88ca7feb22SCyril Chemparathy uint8_t auto_start = 0; 8999cabef0SPablo de Lara uint8_t tx_first; 9081ef862bSAllain Legacy char cmdline_filename[PATH_MAX] = {0}; 91af75078fSIntel 92af75078fSIntel /* 93af75078fSIntel * NUMA support configuration. 94af75078fSIntel * When set, the NUMA support attempts to dispatch the allocation of the 95af75078fSIntel * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the 96af75078fSIntel * probed ports among the CPU sockets 0 and 1. 97af75078fSIntel * Otherwise, all memory is allocated from CPU socket 0. 98af75078fSIntel */ 99999b2ee0SBruce Richardson uint8_t numa_support = 1; /**< numa enabled by default */ 100af75078fSIntel 101af75078fSIntel /* 102b6ea6408SIntel * In UMA mode,all memory is allocated from socket 0 if --socket-num is 103b6ea6408SIntel * not configured. 104b6ea6408SIntel */ 105b6ea6408SIntel uint8_t socket_num = UMA_NO_CONFIG; 106b6ea6408SIntel 107b6ea6408SIntel /* 108c7f5dba7SAnatoly Burakov * Select mempool allocation type: 109c7f5dba7SAnatoly Burakov * - native: use regular DPDK memory 110c7f5dba7SAnatoly Burakov * - anon: use regular DPDK memory to create mempool, but populate using 111c7f5dba7SAnatoly Burakov * anonymous memory (may not be IOVA-contiguous) 112c7f5dba7SAnatoly Burakov * - xmem: use externally allocated hugepage memory 113148f963fSBruce Richardson */ 114c7f5dba7SAnatoly Burakov uint8_t mp_alloc_type = MP_ALLOC_NATIVE; 115148f963fSBruce Richardson 116148f963fSBruce Richardson /* 11763531389SGeorgios Katsikas * Store specified sockets on which memory pool to be used by ports 11863531389SGeorgios Katsikas * is allocated. 11963531389SGeorgios Katsikas */ 12063531389SGeorgios Katsikas uint8_t port_numa[RTE_MAX_ETHPORTS]; 12163531389SGeorgios Katsikas 12263531389SGeorgios Katsikas /* 12363531389SGeorgios Katsikas * Store specified sockets on which RX ring to be used by ports 12463531389SGeorgios Katsikas * is allocated. 12563531389SGeorgios Katsikas */ 12663531389SGeorgios Katsikas uint8_t rxring_numa[RTE_MAX_ETHPORTS]; 12763531389SGeorgios Katsikas 12863531389SGeorgios Katsikas /* 12963531389SGeorgios Katsikas * Store specified sockets on which TX ring to be used by ports 13063531389SGeorgios Katsikas * is allocated. 13163531389SGeorgios Katsikas */ 13263531389SGeorgios Katsikas uint8_t txring_numa[RTE_MAX_ETHPORTS]; 13363531389SGeorgios Katsikas 13463531389SGeorgios Katsikas /* 135af75078fSIntel * Record the Ethernet address of peer target ports to which packets are 136af75078fSIntel * forwarded. 137547d946cSNirmoy Das * Must be instantiated with the ethernet addresses of peer traffic generator 138af75078fSIntel * ports. 139af75078fSIntel */ 1406d13ea8eSOlivier Matz struct rte_ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS]; 141af75078fSIntel portid_t nb_peer_eth_addrs = 0; 142af75078fSIntel 143af75078fSIntel /* 144af75078fSIntel * Probed Target Environment. 145af75078fSIntel */ 146af75078fSIntel struct rte_port *ports; /**< For all probed ethernet ports. */ 147af75078fSIntel portid_t nb_ports; /**< Number of probed ethernet ports. */ 148af75078fSIntel struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */ 149af75078fSIntel lcoreid_t nb_lcores; /**< Number of probed logical cores. */ 150af75078fSIntel 1514918a357SXiaoyun Li portid_t ports_ids[RTE_MAX_ETHPORTS]; /**< Store all port ids. */ 1524918a357SXiaoyun Li 153af75078fSIntel /* 154af75078fSIntel * Test Forwarding Configuration. 155af75078fSIntel * nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores 156af75078fSIntel * nb_fwd_ports <= nb_cfg_ports <= nb_ports 157af75078fSIntel */ 158af75078fSIntel lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */ 159af75078fSIntel lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */ 160af75078fSIntel portid_t nb_cfg_ports; /**< Number of configured ports. */ 161af75078fSIntel portid_t nb_fwd_ports; /**< Number of forwarding ports. */ 162af75078fSIntel 163af75078fSIntel unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */ 164af75078fSIntel portid_t fwd_ports_ids[RTE_MAX_ETHPORTS]; /**< Port ids configuration. */ 165af75078fSIntel 166af75078fSIntel struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */ 167af75078fSIntel streamid_t nb_fwd_streams; /**< Is equal to (nb_ports * nb_rxq). */ 168af75078fSIntel 169af75078fSIntel /* 170af75078fSIntel * Forwarding engines. 171af75078fSIntel */ 172af75078fSIntel struct fwd_engine * fwd_engines[] = { 173af75078fSIntel &io_fwd_engine, 174af75078fSIntel &mac_fwd_engine, 175d47388f1SCyril Chemparathy &mac_swap_engine, 176e9e23a61SCyril Chemparathy &flow_gen_engine, 177af75078fSIntel &rx_only_engine, 178af75078fSIntel &tx_only_engine, 179af75078fSIntel &csum_fwd_engine, 180168dfa61SIvan Boule &icmp_echo_engine, 1813c156061SJens Freimann &noisy_vnf_engine, 1822564abdaSShiri Kuzin &five_tuple_swap_fwd_engine, 183af75078fSIntel #ifdef RTE_LIBRTE_IEEE1588 184af75078fSIntel &ieee1588_fwd_engine, 185af75078fSIntel #endif 186af75078fSIntel NULL, 187af75078fSIntel }; 188af75078fSIntel 189401b744dSShahaf Shuler struct rte_mempool *mempools[RTE_MAX_NUMA_NODES]; 19059fcf854SShahaf Shuler uint16_t mempool_flags; 191401b744dSShahaf Shuler 192af75078fSIntel struct fwd_config cur_fwd_config; 193af75078fSIntel struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */ 194bf56fce1SZhihong Wang uint32_t retry_enabled; 195bf56fce1SZhihong Wang uint32_t burst_tx_delay_time = BURST_TX_WAIT_US; 196bf56fce1SZhihong Wang uint32_t burst_tx_retry_num = BURST_TX_RETRIES; 197af75078fSIntel 198af75078fSIntel uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */ 199c8798818SIntel uint32_t param_total_num_mbufs = 0; /**< number of mbufs in all pools - if 200c8798818SIntel * specified on command-line. */ 201cfea1f30SPablo de Lara uint16_t stats_period; /**< Period to show statistics (disabled by default) */ 202d9a191a0SPhil Yang 203d9a191a0SPhil Yang /* 204d9a191a0SPhil Yang * In container, it cannot terminate the process which running with 'stats-period' 205d9a191a0SPhil Yang * option. Set flag to exit stats period loop after received SIGINT/SIGTERM. 206d9a191a0SPhil Yang */ 207d9a191a0SPhil Yang uint8_t f_quit; 208d9a191a0SPhil Yang 209af75078fSIntel /* 210af75078fSIntel * Configuration of packet segments used by the "txonly" processing engine. 211af75078fSIntel */ 212af75078fSIntel uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */ 213af75078fSIntel uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = { 214af75078fSIntel TXONLY_DEF_PACKET_LEN, 215af75078fSIntel }; 216af75078fSIntel uint8_t tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */ 217af75078fSIntel 21879bec05bSKonstantin Ananyev enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF; 21979bec05bSKonstantin Ananyev /**< Split policy for packets to TX. */ 22079bec05bSKonstantin Ananyev 22182010ef5SYongseok Koh uint8_t txonly_multi_flow; 22282010ef5SYongseok Koh /**< Whether multiple flows are generated in TXONLY mode. */ 22382010ef5SYongseok Koh 2244940344dSViacheslav Ovsiienko uint32_t tx_pkt_times_inter; 2254940344dSViacheslav Ovsiienko /**< Timings for send scheduling in TXONLY mode, time between bursts. */ 2264940344dSViacheslav Ovsiienko 2274940344dSViacheslav Ovsiienko uint32_t tx_pkt_times_intra; 2284940344dSViacheslav Ovsiienko /**< Timings for send scheduling in TXONLY mode, time between packets. */ 2294940344dSViacheslav Ovsiienko 230af75078fSIntel uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */ 231e9378bbcSCunming Liang uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */ 232af75078fSIntel 233900550deSIntel /* current configuration is in DCB or not,0 means it is not in DCB mode */ 234900550deSIntel uint8_t dcb_config = 0; 235900550deSIntel 236900550deSIntel /* Whether the dcb is in testing status */ 237900550deSIntel uint8_t dcb_test = 0; 238900550deSIntel 239af75078fSIntel /* 240af75078fSIntel * Configurable number of RX/TX queues. 241af75078fSIntel */ 2421c69df45SOri Kam queueid_t nb_hairpinq; /**< Number of hairpin queues per port. */ 243af75078fSIntel queueid_t nb_rxq = 1; /**< Number of RX queues per port. */ 244af75078fSIntel queueid_t nb_txq = 1; /**< Number of TX queues per port. */ 245af75078fSIntel 246af75078fSIntel /* 247af75078fSIntel * Configurable number of RX/TX ring descriptors. 2488599ed31SRemy Horton * Defaults are supplied by drivers via ethdev. 249af75078fSIntel */ 2508599ed31SRemy Horton #define RTE_TEST_RX_DESC_DEFAULT 0 2518599ed31SRemy Horton #define RTE_TEST_TX_DESC_DEFAULT 0 252af75078fSIntel uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */ 253af75078fSIntel uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */ 254af75078fSIntel 255f2c5125aSPablo de Lara #define RTE_PMD_PARAM_UNSET -1 256af75078fSIntel /* 257af75078fSIntel * Configurable values of RX and TX ring threshold registers. 258af75078fSIntel */ 259af75078fSIntel 260f2c5125aSPablo de Lara int8_t rx_pthresh = RTE_PMD_PARAM_UNSET; 261f2c5125aSPablo de Lara int8_t rx_hthresh = RTE_PMD_PARAM_UNSET; 262f2c5125aSPablo de Lara int8_t rx_wthresh = RTE_PMD_PARAM_UNSET; 263af75078fSIntel 264f2c5125aSPablo de Lara int8_t tx_pthresh = RTE_PMD_PARAM_UNSET; 265f2c5125aSPablo de Lara int8_t tx_hthresh = RTE_PMD_PARAM_UNSET; 266f2c5125aSPablo de Lara int8_t tx_wthresh = RTE_PMD_PARAM_UNSET; 267af75078fSIntel 268af75078fSIntel /* 269af75078fSIntel * Configurable value of RX free threshold. 270af75078fSIntel */ 271f2c5125aSPablo de Lara int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET; 272af75078fSIntel 273af75078fSIntel /* 274ce8d5614SIntel * Configurable value of RX drop enable. 275ce8d5614SIntel */ 276f2c5125aSPablo de Lara int8_t rx_drop_en = RTE_PMD_PARAM_UNSET; 277ce8d5614SIntel 278ce8d5614SIntel /* 279af75078fSIntel * Configurable value of TX free threshold. 280af75078fSIntel */ 281f2c5125aSPablo de Lara int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET; 282af75078fSIntel 283af75078fSIntel /* 284af75078fSIntel * Configurable value of TX RS bit threshold. 285af75078fSIntel */ 286f2c5125aSPablo de Lara int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET; 287af75078fSIntel 288af75078fSIntel /* 2893c156061SJens Freimann * Configurable value of buffered packets before sending. 2903c156061SJens Freimann */ 2913c156061SJens Freimann uint16_t noisy_tx_sw_bufsz; 2923c156061SJens Freimann 2933c156061SJens Freimann /* 2943c156061SJens Freimann * Configurable value of packet buffer timeout. 2953c156061SJens Freimann */ 2963c156061SJens Freimann uint16_t noisy_tx_sw_buf_flush_time; 2973c156061SJens Freimann 2983c156061SJens Freimann /* 2993c156061SJens Freimann * Configurable value for size of VNF internal memory area 3003c156061SJens Freimann * used for simulating noisy neighbour behaviour 3013c156061SJens Freimann */ 3023c156061SJens Freimann uint64_t noisy_lkup_mem_sz; 3033c156061SJens Freimann 3043c156061SJens Freimann /* 3053c156061SJens Freimann * Configurable value of number of random writes done in 3063c156061SJens Freimann * VNF simulation memory area. 3073c156061SJens Freimann */ 3083c156061SJens Freimann uint64_t noisy_lkup_num_writes; 3093c156061SJens Freimann 3103c156061SJens Freimann /* 3113c156061SJens Freimann * Configurable value of number of random reads done in 3123c156061SJens Freimann * VNF simulation memory area. 3133c156061SJens Freimann */ 3143c156061SJens Freimann uint64_t noisy_lkup_num_reads; 3153c156061SJens Freimann 3163c156061SJens Freimann /* 3173c156061SJens Freimann * Configurable value of number of random reads/writes done in 3183c156061SJens Freimann * VNF simulation memory area. 3193c156061SJens Freimann */ 3203c156061SJens Freimann uint64_t noisy_lkup_num_reads_writes; 3213c156061SJens Freimann 3223c156061SJens Freimann /* 323af75078fSIntel * Receive Side Scaling (RSS) configuration. 324af75078fSIntel */ 3258a387fa8SHelin Zhang uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */ 326af75078fSIntel 327af75078fSIntel /* 328af75078fSIntel * Port topology configuration 329af75078fSIntel */ 330af75078fSIntel uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */ 331af75078fSIntel 3327741e4cfSIntel /* 3337741e4cfSIntel * Avoids to flush all the RX streams before starts forwarding. 3347741e4cfSIntel */ 3357741e4cfSIntel uint8_t no_flush_rx = 0; /* flush by default */ 3367741e4cfSIntel 337af75078fSIntel /* 3387ee3e944SVasily Philipov * Flow API isolated mode. 3397ee3e944SVasily Philipov */ 3407ee3e944SVasily Philipov uint8_t flow_isolate_all; 3417ee3e944SVasily Philipov 3427ee3e944SVasily Philipov /* 343bc202406SDavid Marchand * Avoids to check link status when starting/stopping a port. 344bc202406SDavid Marchand */ 345bc202406SDavid Marchand uint8_t no_link_check = 0; /* check by default */ 346bc202406SDavid Marchand 347bc202406SDavid Marchand /* 3486937d210SStephen Hemminger * Don't automatically start all ports in interactive mode. 3496937d210SStephen Hemminger */ 3506937d210SStephen Hemminger uint8_t no_device_start = 0; 3516937d210SStephen Hemminger 3526937d210SStephen Hemminger /* 3538ea656f8SGaetan Rivet * Enable link status change notification 3548ea656f8SGaetan Rivet */ 3558ea656f8SGaetan Rivet uint8_t lsc_interrupt = 1; /* enabled by default */ 3568ea656f8SGaetan Rivet 3578ea656f8SGaetan Rivet /* 358284c908cSGaetan Rivet * Enable device removal notification. 359284c908cSGaetan Rivet */ 360284c908cSGaetan Rivet uint8_t rmv_interrupt = 1; /* enabled by default */ 361284c908cSGaetan Rivet 362fb73e096SJeff Guo uint8_t hot_plug = 0; /**< hotplug disabled by default. */ 363fb73e096SJeff Guo 3644f1ed78eSThomas Monjalon /* After attach, port setup is called on event or by iterator */ 3654f1ed78eSThomas Monjalon bool setup_on_probe_event = true; 3664f1ed78eSThomas Monjalon 367b0a9354aSPavan Nikhilesh /* Clear ptypes on port initialization. */ 368b0a9354aSPavan Nikhilesh uint8_t clear_ptypes = true; 369b0a9354aSPavan Nikhilesh 37097b5d8b5SThomas Monjalon /* Pretty printing of ethdev events */ 37197b5d8b5SThomas Monjalon static const char * const eth_event_desc[] = { 37297b5d8b5SThomas Monjalon [RTE_ETH_EVENT_UNKNOWN] = "unknown", 37397b5d8b5SThomas Monjalon [RTE_ETH_EVENT_INTR_LSC] = "link state change", 37497b5d8b5SThomas Monjalon [RTE_ETH_EVENT_QUEUE_STATE] = "queue state", 37597b5d8b5SThomas Monjalon [RTE_ETH_EVENT_INTR_RESET] = "reset", 37697b5d8b5SThomas Monjalon [RTE_ETH_EVENT_VF_MBOX] = "VF mbox", 37797b5d8b5SThomas Monjalon [RTE_ETH_EVENT_IPSEC] = "IPsec", 37897b5d8b5SThomas Monjalon [RTE_ETH_EVENT_MACSEC] = "MACsec", 37997b5d8b5SThomas Monjalon [RTE_ETH_EVENT_INTR_RMV] = "device removal", 38097b5d8b5SThomas Monjalon [RTE_ETH_EVENT_NEW] = "device probed", 38197b5d8b5SThomas Monjalon [RTE_ETH_EVENT_DESTROY] = "device released", 3820e459ffaSDong Zhou [RTE_ETH_EVENT_FLOW_AGED] = "flow aged", 38397b5d8b5SThomas Monjalon [RTE_ETH_EVENT_MAX] = NULL, 38497b5d8b5SThomas Monjalon }; 38597b5d8b5SThomas Monjalon 386284c908cSGaetan Rivet /* 3873af72783SGaetan Rivet * Display or mask ether events 3883af72783SGaetan Rivet * Default to all events except VF_MBOX 3893af72783SGaetan Rivet */ 3903af72783SGaetan Rivet uint32_t event_print_mask = (UINT32_C(1) << RTE_ETH_EVENT_UNKNOWN) | 3913af72783SGaetan Rivet (UINT32_C(1) << RTE_ETH_EVENT_INTR_LSC) | 3923af72783SGaetan Rivet (UINT32_C(1) << RTE_ETH_EVENT_QUEUE_STATE) | 3933af72783SGaetan Rivet (UINT32_C(1) << RTE_ETH_EVENT_INTR_RESET) | 394badb87c1SAnoob Joseph (UINT32_C(1) << RTE_ETH_EVENT_IPSEC) | 3953af72783SGaetan Rivet (UINT32_C(1) << RTE_ETH_EVENT_MACSEC) | 3960e459ffaSDong Zhou (UINT32_C(1) << RTE_ETH_EVENT_INTR_RMV) | 3970e459ffaSDong Zhou (UINT32_C(1) << RTE_ETH_EVENT_FLOW_AGED); 398e505d84cSAnatoly Burakov /* 399e505d84cSAnatoly Burakov * Decide if all memory are locked for performance. 400e505d84cSAnatoly Burakov */ 401e505d84cSAnatoly Burakov int do_mlockall = 0; 4023af72783SGaetan Rivet 4033af72783SGaetan Rivet /* 4047b7e5ba7SIntel * NIC bypass mode configuration options. 4057b7e5ba7SIntel */ 4067b7e5ba7SIntel 40750c4440eSThomas Monjalon #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS 4087b7e5ba7SIntel /* The NIC bypass watchdog timeout. */ 409e261265eSRadu Nicolau uint32_t bypass_timeout = RTE_PMD_IXGBE_BYPASS_TMT_OFF; 4107b7e5ba7SIntel #endif 4117b7e5ba7SIntel 412e261265eSRadu Nicolau 41362d3216dSReshma Pattan #ifdef RTE_LIBRTE_LATENCY_STATS 41462d3216dSReshma Pattan 41562d3216dSReshma Pattan /* 41662d3216dSReshma Pattan * Set when latency stats is enabled in the commandline 41762d3216dSReshma Pattan */ 41862d3216dSReshma Pattan uint8_t latencystats_enabled; 41962d3216dSReshma Pattan 42062d3216dSReshma Pattan /* 42162d3216dSReshma Pattan * Lcore ID to serive latency statistics. 42262d3216dSReshma Pattan */ 42362d3216dSReshma Pattan lcoreid_t latencystats_lcore_id = -1; 42462d3216dSReshma Pattan 42562d3216dSReshma Pattan #endif 42662d3216dSReshma Pattan 4277b7e5ba7SIntel /* 428af75078fSIntel * Ethernet device configuration. 429af75078fSIntel */ 430af75078fSIntel struct rte_eth_rxmode rx_mode = { 43135b2d13fSOlivier Matz .max_rx_pkt_len = RTE_ETHER_MAX_LEN, 43235b2d13fSOlivier Matz /**< Default maximum frame length. */ 433af75078fSIntel }; 434af75078fSIntel 43507e5f7bdSShahaf Shuler struct rte_eth_txmode tx_mode = { 43607e5f7bdSShahaf Shuler .offloads = DEV_TX_OFFLOAD_MBUF_FAST_FREE, 43707e5f7bdSShahaf Shuler }; 438fd8c20aaSShahaf Shuler 439af75078fSIntel struct rte_fdir_conf fdir_conf = { 440af75078fSIntel .mode = RTE_FDIR_MODE_NONE, 441af75078fSIntel .pballoc = RTE_FDIR_PBALLOC_64K, 442af75078fSIntel .status = RTE_FDIR_REPORT_STATUS, 443d9d5e6f2SJingjing Wu .mask = { 44426f579aaSWei Zhao .vlan_tci_mask = 0xFFEF, 445d9d5e6f2SJingjing Wu .ipv4_mask = { 446d9d5e6f2SJingjing Wu .src_ip = 0xFFFFFFFF, 447d9d5e6f2SJingjing Wu .dst_ip = 0xFFFFFFFF, 448d9d5e6f2SJingjing Wu }, 449d9d5e6f2SJingjing Wu .ipv6_mask = { 450d9d5e6f2SJingjing Wu .src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF}, 451d9d5e6f2SJingjing Wu .dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF}, 452d9d5e6f2SJingjing Wu }, 453d9d5e6f2SJingjing Wu .src_port_mask = 0xFFFF, 454d9d5e6f2SJingjing Wu .dst_port_mask = 0xFFFF, 45547b3ac6bSWenzhuo Lu .mac_addr_byte_mask = 0xFF, 45647b3ac6bSWenzhuo Lu .tunnel_type_mask = 1, 45747b3ac6bSWenzhuo Lu .tunnel_id_mask = 0xFFFFFFFF, 458d9d5e6f2SJingjing Wu }, 459af75078fSIntel .drop_queue = 127, 460af75078fSIntel }; 461af75078fSIntel 4622950a769SDeclan Doherty volatile int test_done = 1; /* stop packet forwarding when set to 1. */ 463af75078fSIntel 464ed30d9b6SIntel struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS]; 465ed30d9b6SIntel struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS]; 466ed30d9b6SIntel 467ed30d9b6SIntel struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array; 468ed30d9b6SIntel struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array; 469ed30d9b6SIntel 470ed30d9b6SIntel uint16_t nb_tx_queue_stats_mappings = 0; 471ed30d9b6SIntel uint16_t nb_rx_queue_stats_mappings = 0; 472ed30d9b6SIntel 473a4fd5eeeSElza Mathew /* 474a4fd5eeeSElza Mathew * Display zero values by default for xstats 475a4fd5eeeSElza Mathew */ 476a4fd5eeeSElza Mathew uint8_t xstats_hide_zero; 477a4fd5eeeSElza Mathew 478*bc700b67SDharmik Thakkar /* 479*bc700b67SDharmik Thakkar * Measure of CPU cycles disabled by default 480*bc700b67SDharmik Thakkar */ 481*bc700b67SDharmik Thakkar uint8_t record_core_cycles; 482*bc700b67SDharmik Thakkar 483c9cafcc8SShahaf Shuler unsigned int num_sockets = 0; 484c9cafcc8SShahaf Shuler unsigned int socket_ids[RTE_MAX_NUMA_NODES]; 4857acf894dSStephen Hurd 486e25e6c70SRemy Horton #ifdef RTE_LIBRTE_BITRATE 4877e4441c8SRemy Horton /* Bitrate statistics */ 4887e4441c8SRemy Horton struct rte_stats_bitrates *bitrate_data; 489e25e6c70SRemy Horton lcoreid_t bitrate_lcore_id; 490e25e6c70SRemy Horton uint8_t bitrate_enabled; 491e25e6c70SRemy Horton #endif 4927e4441c8SRemy Horton 493b40f8d78SJiayu Hu struct gro_status gro_ports[RTE_MAX_ETHPORTS]; 494b7091f1dSJiayu Hu uint8_t gro_flush_cycles = GRO_DEFAULT_FLUSH_CYCLES; 495b40f8d78SJiayu Hu 496f9295aa2SXiaoyu Min /* 497f9295aa2SXiaoyu Min * hexadecimal bitmask of RX mq mode can be enabled. 498f9295aa2SXiaoyu Min */ 499f9295aa2SXiaoyu Min enum rte_eth_rx_mq_mode rx_mq_mode = ETH_MQ_RX_VMDQ_DCB_RSS; 500f9295aa2SXiaoyu Min 501ed30d9b6SIntel /* Forward function declarations */ 502c9cce428SThomas Monjalon static void setup_attached_port(portid_t pi); 50328caa76aSZhiyong Yang static void map_port_queue_stats_mapping_registers(portid_t pi, 50428caa76aSZhiyong Yang struct rte_port *port); 505edab33b1STetsuya Mukawa static void check_all_ports_link_status(uint32_t port_mask); 506f8244c63SZhiyong Yang static int eth_event_callback(portid_t port_id, 50776ad4a2dSGaetan Rivet enum rte_eth_event_type type, 508d6af1a13SBernard Iremonger void *param, void *ret_param); 509cc1bf307SJeff Guo static void dev_event_callback(const char *device_name, 510fb73e096SJeff Guo enum rte_dev_event_type type, 511fb73e096SJeff Guo void *param); 512ce8d5614SIntel 513ce8d5614SIntel /* 514ce8d5614SIntel * Check if all the ports are started. 515ce8d5614SIntel * If yes, return positive value. If not, return zero. 516ce8d5614SIntel */ 517ce8d5614SIntel static int all_ports_started(void); 518ed30d9b6SIntel 51952f38a20SJiayu Hu struct gso_status gso_ports[RTE_MAX_ETHPORTS]; 52035b2d13fSOlivier Matz uint16_t gso_max_segment_size = RTE_ETHER_MAX_LEN - RTE_ETHER_CRC_LEN; 52152f38a20SJiayu Hu 522b57b66a9SOri Kam /* Holds the registered mbuf dynamic flags names. */ 523b57b66a9SOri Kam char dynf_names[64][RTE_MBUF_DYN_NAMESIZE]; 524b57b66a9SOri Kam 525af75078fSIntel /* 52698a7ea33SJerin Jacob * Helper function to check if socket is already discovered. 527c9cafcc8SShahaf Shuler * If yes, return positive value. If not, return zero. 528c9cafcc8SShahaf Shuler */ 529c9cafcc8SShahaf Shuler int 530c9cafcc8SShahaf Shuler new_socket_id(unsigned int socket_id) 531c9cafcc8SShahaf Shuler { 532c9cafcc8SShahaf Shuler unsigned int i; 533c9cafcc8SShahaf Shuler 534c9cafcc8SShahaf Shuler for (i = 0; i < num_sockets; i++) { 535c9cafcc8SShahaf Shuler if (socket_ids[i] == socket_id) 536c9cafcc8SShahaf Shuler return 0; 537c9cafcc8SShahaf Shuler } 538c9cafcc8SShahaf Shuler return 1; 539c9cafcc8SShahaf Shuler } 540c9cafcc8SShahaf Shuler 541c9cafcc8SShahaf Shuler /* 542af75078fSIntel * Setup default configuration. 543af75078fSIntel */ 544af75078fSIntel static void 545af75078fSIntel set_default_fwd_lcores_config(void) 546af75078fSIntel { 547af75078fSIntel unsigned int i; 548af75078fSIntel unsigned int nb_lc; 5497acf894dSStephen Hurd unsigned int sock_num; 550af75078fSIntel 551af75078fSIntel nb_lc = 0; 552af75078fSIntel for (i = 0; i < RTE_MAX_LCORE; i++) { 553dbfb8ec7SPhil Yang if (!rte_lcore_is_enabled(i)) 554dbfb8ec7SPhil Yang continue; 555c9cafcc8SShahaf Shuler sock_num = rte_lcore_to_socket_id(i); 556c9cafcc8SShahaf Shuler if (new_socket_id(sock_num)) { 557c9cafcc8SShahaf Shuler if (num_sockets >= RTE_MAX_NUMA_NODES) { 558c9cafcc8SShahaf Shuler rte_exit(EXIT_FAILURE, 559c9cafcc8SShahaf Shuler "Total sockets greater than %u\n", 560c9cafcc8SShahaf Shuler RTE_MAX_NUMA_NODES); 561c9cafcc8SShahaf Shuler } 562c9cafcc8SShahaf Shuler socket_ids[num_sockets++] = sock_num; 5637acf894dSStephen Hurd } 564f54fe5eeSStephen Hurd if (i == rte_get_master_lcore()) 565f54fe5eeSStephen Hurd continue; 566f54fe5eeSStephen Hurd fwd_lcores_cpuids[nb_lc++] = i; 567af75078fSIntel } 568af75078fSIntel nb_lcores = (lcoreid_t) nb_lc; 569af75078fSIntel nb_cfg_lcores = nb_lcores; 570af75078fSIntel nb_fwd_lcores = 1; 571af75078fSIntel } 572af75078fSIntel 573af75078fSIntel static void 574af75078fSIntel set_def_peer_eth_addrs(void) 575af75078fSIntel { 576af75078fSIntel portid_t i; 577af75078fSIntel 578af75078fSIntel for (i = 0; i < RTE_MAX_ETHPORTS; i++) { 57935b2d13fSOlivier Matz peer_eth_addrs[i].addr_bytes[0] = RTE_ETHER_LOCAL_ADMIN_ADDR; 580af75078fSIntel peer_eth_addrs[i].addr_bytes[5] = i; 581af75078fSIntel } 582af75078fSIntel } 583af75078fSIntel 584af75078fSIntel static void 585af75078fSIntel set_default_fwd_ports_config(void) 586af75078fSIntel { 587af75078fSIntel portid_t pt_id; 58865a7360cSMatan Azrad int i = 0; 589af75078fSIntel 590effdb8bbSPhil Yang RTE_ETH_FOREACH_DEV(pt_id) { 59165a7360cSMatan Azrad fwd_ports_ids[i++] = pt_id; 592af75078fSIntel 593effdb8bbSPhil Yang /* Update sockets info according to the attached device */ 594effdb8bbSPhil Yang int socket_id = rte_eth_dev_socket_id(pt_id); 595effdb8bbSPhil Yang if (socket_id >= 0 && new_socket_id(socket_id)) { 596effdb8bbSPhil Yang if (num_sockets >= RTE_MAX_NUMA_NODES) { 597effdb8bbSPhil Yang rte_exit(EXIT_FAILURE, 598effdb8bbSPhil Yang "Total sockets greater than %u\n", 599effdb8bbSPhil Yang RTE_MAX_NUMA_NODES); 600effdb8bbSPhil Yang } 601effdb8bbSPhil Yang socket_ids[num_sockets++] = socket_id; 602effdb8bbSPhil Yang } 603effdb8bbSPhil Yang } 604effdb8bbSPhil Yang 605af75078fSIntel nb_cfg_ports = nb_ports; 606af75078fSIntel nb_fwd_ports = nb_ports; 607af75078fSIntel } 608af75078fSIntel 609af75078fSIntel void 610af75078fSIntel set_def_fwd_config(void) 611af75078fSIntel { 612af75078fSIntel set_default_fwd_lcores_config(); 613af75078fSIntel set_def_peer_eth_addrs(); 614af75078fSIntel set_default_fwd_ports_config(); 615af75078fSIntel } 616af75078fSIntel 617c7f5dba7SAnatoly Burakov /* extremely pessimistic estimation of memory required to create a mempool */ 618c7f5dba7SAnatoly Burakov static int 619c7f5dba7SAnatoly Burakov calc_mem_size(uint32_t nb_mbufs, uint32_t mbuf_sz, size_t pgsz, size_t *out) 620c7f5dba7SAnatoly Burakov { 621c7f5dba7SAnatoly Burakov unsigned int n_pages, mbuf_per_pg, leftover; 622c7f5dba7SAnatoly Burakov uint64_t total_mem, mbuf_mem, obj_sz; 623c7f5dba7SAnatoly Burakov 624c7f5dba7SAnatoly Burakov /* there is no good way to predict how much space the mempool will 625c7f5dba7SAnatoly Burakov * occupy because it will allocate chunks on the fly, and some of those 626c7f5dba7SAnatoly Burakov * will come from default DPDK memory while some will come from our 627c7f5dba7SAnatoly Burakov * external memory, so just assume 128MB will be enough for everyone. 628c7f5dba7SAnatoly Burakov */ 629c7f5dba7SAnatoly Burakov uint64_t hdr_mem = 128 << 20; 630c7f5dba7SAnatoly Burakov 631c7f5dba7SAnatoly Burakov /* account for possible non-contiguousness */ 632c7f5dba7SAnatoly Burakov obj_sz = rte_mempool_calc_obj_size(mbuf_sz, 0, NULL); 633c7f5dba7SAnatoly Burakov if (obj_sz > pgsz) { 634c7f5dba7SAnatoly Burakov TESTPMD_LOG(ERR, "Object size is bigger than page size\n"); 635c7f5dba7SAnatoly Burakov return -1; 636c7f5dba7SAnatoly Burakov } 637c7f5dba7SAnatoly Burakov 638c7f5dba7SAnatoly Burakov mbuf_per_pg = pgsz / obj_sz; 639c7f5dba7SAnatoly Burakov leftover = (nb_mbufs % mbuf_per_pg) > 0; 640c7f5dba7SAnatoly Burakov n_pages = (nb_mbufs / mbuf_per_pg) + leftover; 641c7f5dba7SAnatoly Burakov 642c7f5dba7SAnatoly Burakov mbuf_mem = n_pages * pgsz; 643c7f5dba7SAnatoly Burakov 644c7f5dba7SAnatoly Burakov total_mem = RTE_ALIGN(hdr_mem + mbuf_mem, pgsz); 645c7f5dba7SAnatoly Burakov 646c7f5dba7SAnatoly Burakov if (total_mem > SIZE_MAX) { 647c7f5dba7SAnatoly Burakov TESTPMD_LOG(ERR, "Memory size too big\n"); 648c7f5dba7SAnatoly Burakov return -1; 649c7f5dba7SAnatoly Burakov } 650c7f5dba7SAnatoly Burakov *out = (size_t)total_mem; 651c7f5dba7SAnatoly Burakov 652c7f5dba7SAnatoly Burakov return 0; 653c7f5dba7SAnatoly Burakov } 654c7f5dba7SAnatoly Burakov 655c7f5dba7SAnatoly Burakov static int 656c7f5dba7SAnatoly Burakov pagesz_flags(uint64_t page_sz) 657c7f5dba7SAnatoly Burakov { 658c7f5dba7SAnatoly Burakov /* as per mmap() manpage, all page sizes are log2 of page size 659c7f5dba7SAnatoly Burakov * shifted by MAP_HUGE_SHIFT 660c7f5dba7SAnatoly Burakov */ 6619d650537SAnatoly Burakov int log2 = rte_log2_u64(page_sz); 662c7f5dba7SAnatoly Burakov 663c7f5dba7SAnatoly Burakov return (log2 << HUGE_SHIFT); 664c7f5dba7SAnatoly Burakov } 665c7f5dba7SAnatoly Burakov 666c7f5dba7SAnatoly Burakov static void * 667c7f5dba7SAnatoly Burakov alloc_mem(size_t memsz, size_t pgsz, bool huge) 668c7f5dba7SAnatoly Burakov { 669c7f5dba7SAnatoly Burakov void *addr; 670c7f5dba7SAnatoly Burakov int flags; 671c7f5dba7SAnatoly Burakov 672c7f5dba7SAnatoly Burakov /* allocate anonymous hugepages */ 673c7f5dba7SAnatoly Burakov flags = MAP_ANONYMOUS | MAP_PRIVATE; 674c7f5dba7SAnatoly Burakov if (huge) 675c7f5dba7SAnatoly Burakov flags |= HUGE_FLAG | pagesz_flags(pgsz); 676c7f5dba7SAnatoly Burakov 677c7f5dba7SAnatoly Burakov addr = mmap(NULL, memsz, PROT_READ | PROT_WRITE, flags, -1, 0); 678c7f5dba7SAnatoly Burakov if (addr == MAP_FAILED) 679c7f5dba7SAnatoly Burakov return NULL; 680c7f5dba7SAnatoly Burakov 681c7f5dba7SAnatoly Burakov return addr; 682c7f5dba7SAnatoly Burakov } 683c7f5dba7SAnatoly Burakov 684c7f5dba7SAnatoly Burakov struct extmem_param { 685c7f5dba7SAnatoly Burakov void *addr; 686c7f5dba7SAnatoly Burakov size_t len; 687c7f5dba7SAnatoly Burakov size_t pgsz; 688c7f5dba7SAnatoly Burakov rte_iova_t *iova_table; 689c7f5dba7SAnatoly Burakov unsigned int iova_table_len; 690c7f5dba7SAnatoly Burakov }; 691c7f5dba7SAnatoly Burakov 692c7f5dba7SAnatoly Burakov static int 693c7f5dba7SAnatoly Burakov create_extmem(uint32_t nb_mbufs, uint32_t mbuf_sz, struct extmem_param *param, 694c7f5dba7SAnatoly Burakov bool huge) 695c7f5dba7SAnatoly Burakov { 696c7f5dba7SAnatoly Burakov uint64_t pgsizes[] = {RTE_PGSIZE_2M, RTE_PGSIZE_1G, /* x86_64, ARM */ 697c7f5dba7SAnatoly Burakov RTE_PGSIZE_16M, RTE_PGSIZE_16G}; /* POWER */ 698c7f5dba7SAnatoly Burakov unsigned int cur_page, n_pages, pgsz_idx; 699c7f5dba7SAnatoly Burakov size_t mem_sz, cur_pgsz; 700c7f5dba7SAnatoly Burakov rte_iova_t *iovas = NULL; 701c7f5dba7SAnatoly Burakov void *addr; 702c7f5dba7SAnatoly Burakov int ret; 703c7f5dba7SAnatoly Burakov 704c7f5dba7SAnatoly Burakov for (pgsz_idx = 0; pgsz_idx < RTE_DIM(pgsizes); pgsz_idx++) { 705c7f5dba7SAnatoly Burakov /* skip anything that is too big */ 706c7f5dba7SAnatoly Burakov if (pgsizes[pgsz_idx] > SIZE_MAX) 707c7f5dba7SAnatoly Burakov continue; 708c7f5dba7SAnatoly Burakov 709c7f5dba7SAnatoly Burakov cur_pgsz = pgsizes[pgsz_idx]; 710c7f5dba7SAnatoly Burakov 711c7f5dba7SAnatoly Burakov /* if we were told not to allocate hugepages, override */ 712c7f5dba7SAnatoly Burakov if (!huge) 713c7f5dba7SAnatoly Burakov cur_pgsz = sysconf(_SC_PAGESIZE); 714c7f5dba7SAnatoly Burakov 715c7f5dba7SAnatoly Burakov ret = calc_mem_size(nb_mbufs, mbuf_sz, cur_pgsz, &mem_sz); 716c7f5dba7SAnatoly Burakov if (ret < 0) { 717c7f5dba7SAnatoly Burakov TESTPMD_LOG(ERR, "Cannot calculate memory size\n"); 718c7f5dba7SAnatoly Burakov return -1; 719c7f5dba7SAnatoly Burakov } 720c7f5dba7SAnatoly Burakov 721c7f5dba7SAnatoly Burakov /* allocate our memory */ 722c7f5dba7SAnatoly Burakov addr = alloc_mem(mem_sz, cur_pgsz, huge); 723c7f5dba7SAnatoly Burakov 724c7f5dba7SAnatoly Burakov /* if we couldn't allocate memory with a specified page size, 725c7f5dba7SAnatoly Burakov * that doesn't mean we can't do it with other page sizes, so 726c7f5dba7SAnatoly Burakov * try another one. 727c7f5dba7SAnatoly Burakov */ 728c7f5dba7SAnatoly Burakov if (addr == NULL) 729c7f5dba7SAnatoly Burakov continue; 730c7f5dba7SAnatoly Burakov 731c7f5dba7SAnatoly Burakov /* store IOVA addresses for every page in this memory area */ 732c7f5dba7SAnatoly Burakov n_pages = mem_sz / cur_pgsz; 733c7f5dba7SAnatoly Burakov 734c7f5dba7SAnatoly Burakov iovas = malloc(sizeof(*iovas) * n_pages); 735c7f5dba7SAnatoly Burakov 736c7f5dba7SAnatoly Burakov if (iovas == NULL) { 737c7f5dba7SAnatoly Burakov TESTPMD_LOG(ERR, "Cannot allocate memory for iova addresses\n"); 738c7f5dba7SAnatoly Burakov goto fail; 739c7f5dba7SAnatoly Burakov } 740c7f5dba7SAnatoly Burakov /* lock memory if it's not huge pages */ 741c7f5dba7SAnatoly Burakov if (!huge) 742c7f5dba7SAnatoly Burakov mlock(addr, mem_sz); 743c7f5dba7SAnatoly Burakov 744c7f5dba7SAnatoly Burakov /* populate IOVA addresses */ 745c7f5dba7SAnatoly Burakov for (cur_page = 0; cur_page < n_pages; cur_page++) { 746c7f5dba7SAnatoly Burakov rte_iova_t iova; 747c7f5dba7SAnatoly Burakov size_t offset; 748c7f5dba7SAnatoly Burakov void *cur; 749c7f5dba7SAnatoly Burakov 750c7f5dba7SAnatoly Burakov offset = cur_pgsz * cur_page; 751c7f5dba7SAnatoly Burakov cur = RTE_PTR_ADD(addr, offset); 752c7f5dba7SAnatoly Burakov 753c7f5dba7SAnatoly Burakov /* touch the page before getting its IOVA */ 754c7f5dba7SAnatoly Burakov *(volatile char *)cur = 0; 755c7f5dba7SAnatoly Burakov 756c7f5dba7SAnatoly Burakov iova = rte_mem_virt2iova(cur); 757c7f5dba7SAnatoly Burakov 758c7f5dba7SAnatoly Burakov iovas[cur_page] = iova; 759c7f5dba7SAnatoly Burakov } 760c7f5dba7SAnatoly Burakov 761c7f5dba7SAnatoly Burakov break; 762c7f5dba7SAnatoly Burakov } 763c7f5dba7SAnatoly Burakov /* if we couldn't allocate anything */ 764c7f5dba7SAnatoly Burakov if (iovas == NULL) 765c7f5dba7SAnatoly Burakov return -1; 766c7f5dba7SAnatoly Burakov 767c7f5dba7SAnatoly Burakov param->addr = addr; 768c7f5dba7SAnatoly Burakov param->len = mem_sz; 769c7f5dba7SAnatoly Burakov param->pgsz = cur_pgsz; 770c7f5dba7SAnatoly Burakov param->iova_table = iovas; 771c7f5dba7SAnatoly Burakov param->iova_table_len = n_pages; 772c7f5dba7SAnatoly Burakov 773c7f5dba7SAnatoly Burakov return 0; 774c7f5dba7SAnatoly Burakov fail: 775c7f5dba7SAnatoly Burakov if (iovas) 776c7f5dba7SAnatoly Burakov free(iovas); 777c7f5dba7SAnatoly Burakov if (addr) 778c7f5dba7SAnatoly Burakov munmap(addr, mem_sz); 779c7f5dba7SAnatoly Burakov 780c7f5dba7SAnatoly Burakov return -1; 781c7f5dba7SAnatoly Burakov } 782c7f5dba7SAnatoly Burakov 783c7f5dba7SAnatoly Burakov static int 784c7f5dba7SAnatoly Burakov setup_extmem(uint32_t nb_mbufs, uint32_t mbuf_sz, bool huge) 785c7f5dba7SAnatoly Burakov { 786c7f5dba7SAnatoly Burakov struct extmem_param param; 787c7f5dba7SAnatoly Burakov int socket_id, ret; 788c7f5dba7SAnatoly Burakov 789c7f5dba7SAnatoly Burakov memset(¶m, 0, sizeof(param)); 790c7f5dba7SAnatoly Burakov 791c7f5dba7SAnatoly Burakov /* check if our heap exists */ 792c7f5dba7SAnatoly Burakov socket_id = rte_malloc_heap_get_socket(EXTMEM_HEAP_NAME); 793c7f5dba7SAnatoly Burakov if (socket_id < 0) { 794c7f5dba7SAnatoly Burakov /* create our heap */ 795c7f5dba7SAnatoly Burakov ret = rte_malloc_heap_create(EXTMEM_HEAP_NAME); 796c7f5dba7SAnatoly Burakov if (ret < 0) { 797c7f5dba7SAnatoly Burakov TESTPMD_LOG(ERR, "Cannot create heap\n"); 798c7f5dba7SAnatoly Burakov return -1; 799c7f5dba7SAnatoly Burakov } 800c7f5dba7SAnatoly Burakov } 801c7f5dba7SAnatoly Burakov 802c7f5dba7SAnatoly Burakov ret = create_extmem(nb_mbufs, mbuf_sz, ¶m, huge); 803c7f5dba7SAnatoly Burakov if (ret < 0) { 804c7f5dba7SAnatoly Burakov TESTPMD_LOG(ERR, "Cannot create memory area\n"); 805c7f5dba7SAnatoly Burakov return -1; 806c7f5dba7SAnatoly Burakov } 807c7f5dba7SAnatoly Burakov 808c7f5dba7SAnatoly Burakov /* we now have a valid memory area, so add it to heap */ 809c7f5dba7SAnatoly Burakov ret = rte_malloc_heap_memory_add(EXTMEM_HEAP_NAME, 810c7f5dba7SAnatoly Burakov param.addr, param.len, param.iova_table, 811c7f5dba7SAnatoly Burakov param.iova_table_len, param.pgsz); 812c7f5dba7SAnatoly Burakov 813c7f5dba7SAnatoly Burakov /* when using VFIO, memory is automatically mapped for DMA by EAL */ 814c7f5dba7SAnatoly Burakov 815c7f5dba7SAnatoly Burakov /* not needed any more */ 816c7f5dba7SAnatoly Burakov free(param.iova_table); 817c7f5dba7SAnatoly Burakov 818c7f5dba7SAnatoly Burakov if (ret < 0) { 819c7f5dba7SAnatoly Burakov TESTPMD_LOG(ERR, "Cannot add memory to heap\n"); 820c7f5dba7SAnatoly Burakov munmap(param.addr, param.len); 821c7f5dba7SAnatoly Burakov return -1; 822c7f5dba7SAnatoly Burakov } 823c7f5dba7SAnatoly Burakov 824c7f5dba7SAnatoly Burakov /* success */ 825c7f5dba7SAnatoly Burakov 826c7f5dba7SAnatoly Burakov TESTPMD_LOG(DEBUG, "Allocated %zuMB of external memory\n", 827c7f5dba7SAnatoly Burakov param.len >> 20); 828c7f5dba7SAnatoly Burakov 829c7f5dba7SAnatoly Burakov return 0; 830c7f5dba7SAnatoly Burakov } 8313a0968c8SShahaf Shuler static void 8323a0968c8SShahaf Shuler dma_unmap_cb(struct rte_mempool *mp __rte_unused, void *opaque __rte_unused, 8333a0968c8SShahaf Shuler struct rte_mempool_memhdr *memhdr, unsigned mem_idx __rte_unused) 8343a0968c8SShahaf Shuler { 8353a0968c8SShahaf Shuler uint16_t pid = 0; 8363a0968c8SShahaf Shuler int ret; 8373a0968c8SShahaf Shuler 8383a0968c8SShahaf Shuler RTE_ETH_FOREACH_DEV(pid) { 8393a0968c8SShahaf Shuler struct rte_eth_dev *dev = 8403a0968c8SShahaf Shuler &rte_eth_devices[pid]; 8413a0968c8SShahaf Shuler 8423a0968c8SShahaf Shuler ret = rte_dev_dma_unmap(dev->device, memhdr->addr, 0, 8433a0968c8SShahaf Shuler memhdr->len); 8443a0968c8SShahaf Shuler if (ret) { 8453a0968c8SShahaf Shuler TESTPMD_LOG(DEBUG, 8463a0968c8SShahaf Shuler "unable to DMA unmap addr 0x%p " 8473a0968c8SShahaf Shuler "for device %s\n", 8483a0968c8SShahaf Shuler memhdr->addr, dev->data->name); 8493a0968c8SShahaf Shuler } 8503a0968c8SShahaf Shuler } 8513a0968c8SShahaf Shuler ret = rte_extmem_unregister(memhdr->addr, memhdr->len); 8523a0968c8SShahaf Shuler if (ret) { 8533a0968c8SShahaf Shuler TESTPMD_LOG(DEBUG, 8543a0968c8SShahaf Shuler "unable to un-register addr 0x%p\n", memhdr->addr); 8553a0968c8SShahaf Shuler } 8563a0968c8SShahaf Shuler } 8573a0968c8SShahaf Shuler 8583a0968c8SShahaf Shuler static void 8593a0968c8SShahaf Shuler dma_map_cb(struct rte_mempool *mp __rte_unused, void *opaque __rte_unused, 8603a0968c8SShahaf Shuler struct rte_mempool_memhdr *memhdr, unsigned mem_idx __rte_unused) 8613a0968c8SShahaf Shuler { 8623a0968c8SShahaf Shuler uint16_t pid = 0; 8633a0968c8SShahaf Shuler size_t page_size = sysconf(_SC_PAGESIZE); 8643a0968c8SShahaf Shuler int ret; 8653a0968c8SShahaf Shuler 8663a0968c8SShahaf Shuler ret = rte_extmem_register(memhdr->addr, memhdr->len, NULL, 0, 8673a0968c8SShahaf Shuler page_size); 8683a0968c8SShahaf Shuler if (ret) { 8693a0968c8SShahaf Shuler TESTPMD_LOG(DEBUG, 8703a0968c8SShahaf Shuler "unable to register addr 0x%p\n", memhdr->addr); 8713a0968c8SShahaf Shuler return; 8723a0968c8SShahaf Shuler } 8733a0968c8SShahaf Shuler RTE_ETH_FOREACH_DEV(pid) { 8743a0968c8SShahaf Shuler struct rte_eth_dev *dev = 8753a0968c8SShahaf Shuler &rte_eth_devices[pid]; 8763a0968c8SShahaf Shuler 8773a0968c8SShahaf Shuler ret = rte_dev_dma_map(dev->device, memhdr->addr, 0, 8783a0968c8SShahaf Shuler memhdr->len); 8793a0968c8SShahaf Shuler if (ret) { 8803a0968c8SShahaf Shuler TESTPMD_LOG(DEBUG, 8813a0968c8SShahaf Shuler "unable to DMA map addr 0x%p " 8823a0968c8SShahaf Shuler "for device %s\n", 8833a0968c8SShahaf Shuler memhdr->addr, dev->data->name); 8843a0968c8SShahaf Shuler } 8853a0968c8SShahaf Shuler } 8863a0968c8SShahaf Shuler } 887c7f5dba7SAnatoly Burakov 88872512e18SViacheslav Ovsiienko static unsigned int 88972512e18SViacheslav Ovsiienko setup_extbuf(uint32_t nb_mbufs, uint16_t mbuf_sz, unsigned int socket_id, 89072512e18SViacheslav Ovsiienko char *pool_name, struct rte_pktmbuf_extmem **ext_mem) 89172512e18SViacheslav Ovsiienko { 89272512e18SViacheslav Ovsiienko struct rte_pktmbuf_extmem *xmem; 89372512e18SViacheslav Ovsiienko unsigned int ext_num, zone_num, elt_num; 89472512e18SViacheslav Ovsiienko uint16_t elt_size; 89572512e18SViacheslav Ovsiienko 89672512e18SViacheslav Ovsiienko elt_size = RTE_ALIGN_CEIL(mbuf_sz, RTE_CACHE_LINE_SIZE); 89772512e18SViacheslav Ovsiienko elt_num = EXTBUF_ZONE_SIZE / elt_size; 89872512e18SViacheslav Ovsiienko zone_num = (nb_mbufs + elt_num - 1) / elt_num; 89972512e18SViacheslav Ovsiienko 90072512e18SViacheslav Ovsiienko xmem = malloc(sizeof(struct rte_pktmbuf_extmem) * zone_num); 90172512e18SViacheslav Ovsiienko if (xmem == NULL) { 90272512e18SViacheslav Ovsiienko TESTPMD_LOG(ERR, "Cannot allocate memory for " 90372512e18SViacheslav Ovsiienko "external buffer descriptors\n"); 90472512e18SViacheslav Ovsiienko *ext_mem = NULL; 90572512e18SViacheslav Ovsiienko return 0; 90672512e18SViacheslav Ovsiienko } 90772512e18SViacheslav Ovsiienko for (ext_num = 0; ext_num < zone_num; ext_num++) { 90872512e18SViacheslav Ovsiienko struct rte_pktmbuf_extmem *xseg = xmem + ext_num; 90972512e18SViacheslav Ovsiienko const struct rte_memzone *mz; 91072512e18SViacheslav Ovsiienko char mz_name[RTE_MEMZONE_NAMESIZE]; 91172512e18SViacheslav Ovsiienko int ret; 91272512e18SViacheslav Ovsiienko 91372512e18SViacheslav Ovsiienko ret = snprintf(mz_name, sizeof(mz_name), 91472512e18SViacheslav Ovsiienko RTE_MEMPOOL_MZ_FORMAT "_xb_%u", pool_name, ext_num); 91572512e18SViacheslav Ovsiienko if (ret < 0 || ret >= (int)sizeof(mz_name)) { 91672512e18SViacheslav Ovsiienko errno = ENAMETOOLONG; 91772512e18SViacheslav Ovsiienko ext_num = 0; 91872512e18SViacheslav Ovsiienko break; 91972512e18SViacheslav Ovsiienko } 92072512e18SViacheslav Ovsiienko mz = rte_memzone_reserve_aligned(mz_name, EXTBUF_ZONE_SIZE, 92172512e18SViacheslav Ovsiienko socket_id, 92272512e18SViacheslav Ovsiienko RTE_MEMZONE_IOVA_CONTIG | 92372512e18SViacheslav Ovsiienko RTE_MEMZONE_1GB | 92472512e18SViacheslav Ovsiienko RTE_MEMZONE_SIZE_HINT_ONLY, 92572512e18SViacheslav Ovsiienko EXTBUF_ZONE_SIZE); 92672512e18SViacheslav Ovsiienko if (mz == NULL) { 92772512e18SViacheslav Ovsiienko /* 92872512e18SViacheslav Ovsiienko * The caller exits on external buffer creation 92972512e18SViacheslav Ovsiienko * error, so there is no need to free memzones. 93072512e18SViacheslav Ovsiienko */ 93172512e18SViacheslav Ovsiienko errno = ENOMEM; 93272512e18SViacheslav Ovsiienko ext_num = 0; 93372512e18SViacheslav Ovsiienko break; 93472512e18SViacheslav Ovsiienko } 93572512e18SViacheslav Ovsiienko xseg->buf_ptr = mz->addr; 93672512e18SViacheslav Ovsiienko xseg->buf_iova = mz->iova; 93772512e18SViacheslav Ovsiienko xseg->buf_len = EXTBUF_ZONE_SIZE; 93872512e18SViacheslav Ovsiienko xseg->elt_size = elt_size; 93972512e18SViacheslav Ovsiienko } 94072512e18SViacheslav Ovsiienko if (ext_num == 0 && xmem != NULL) { 94172512e18SViacheslav Ovsiienko free(xmem); 94272512e18SViacheslav Ovsiienko xmem = NULL; 94372512e18SViacheslav Ovsiienko } 94472512e18SViacheslav Ovsiienko *ext_mem = xmem; 94572512e18SViacheslav Ovsiienko return ext_num; 94672512e18SViacheslav Ovsiienko } 94772512e18SViacheslav Ovsiienko 948af75078fSIntel /* 949af75078fSIntel * Configuration initialisation done once at init time. 950af75078fSIntel */ 951401b744dSShahaf Shuler static struct rte_mempool * 952af75078fSIntel mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf, 953af75078fSIntel unsigned int socket_id) 954af75078fSIntel { 955af75078fSIntel char pool_name[RTE_MEMPOOL_NAMESIZE]; 956bece7b6cSChristian Ehrhardt struct rte_mempool *rte_mp = NULL; 957af75078fSIntel uint32_t mb_size; 958af75078fSIntel 959dfb03bbeSOlivier Matz mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size; 960af75078fSIntel mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name)); 961148f963fSBruce Richardson 962285fd101SOlivier Matz TESTPMD_LOG(INFO, 963d1eb542eSOlivier Matz "create a new mbuf pool <%s>: n=%u, size=%u, socket=%u\n", 964d1eb542eSOlivier Matz pool_name, nb_mbuf, mbuf_seg_size, socket_id); 965d1eb542eSOlivier Matz 966c7f5dba7SAnatoly Burakov switch (mp_alloc_type) { 967c7f5dba7SAnatoly Burakov case MP_ALLOC_NATIVE: 968c7f5dba7SAnatoly Burakov { 969c7f5dba7SAnatoly Burakov /* wrapper to rte_mempool_create() */ 970c7f5dba7SAnatoly Burakov TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n", 971c7f5dba7SAnatoly Burakov rte_mbuf_best_mempool_ops()); 972c7f5dba7SAnatoly Burakov rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf, 973c7f5dba7SAnatoly Burakov mb_mempool_cache, 0, mbuf_seg_size, socket_id); 974c7f5dba7SAnatoly Burakov break; 975c7f5dba7SAnatoly Burakov } 976c7f5dba7SAnatoly Burakov case MP_ALLOC_ANON: 977c7f5dba7SAnatoly Burakov { 978b19a0c75SOlivier Matz rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf, 979c7f5dba7SAnatoly Burakov mb_size, (unsigned int) mb_mempool_cache, 980148f963fSBruce Richardson sizeof(struct rte_pktmbuf_pool_private), 98159fcf854SShahaf Shuler socket_id, mempool_flags); 98224427bb9SOlivier Matz if (rte_mp == NULL) 98324427bb9SOlivier Matz goto err; 984b19a0c75SOlivier Matz 985b19a0c75SOlivier Matz if (rte_mempool_populate_anon(rte_mp) == 0) { 986b19a0c75SOlivier Matz rte_mempool_free(rte_mp); 987b19a0c75SOlivier Matz rte_mp = NULL; 98824427bb9SOlivier Matz goto err; 989b19a0c75SOlivier Matz } 990b19a0c75SOlivier Matz rte_pktmbuf_pool_init(rte_mp, NULL); 991b19a0c75SOlivier Matz rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL); 9923a0968c8SShahaf Shuler rte_mempool_mem_iter(rte_mp, dma_map_cb, NULL); 993c7f5dba7SAnatoly Burakov break; 994c7f5dba7SAnatoly Burakov } 995c7f5dba7SAnatoly Burakov case MP_ALLOC_XMEM: 996c7f5dba7SAnatoly Burakov case MP_ALLOC_XMEM_HUGE: 997c7f5dba7SAnatoly Burakov { 998c7f5dba7SAnatoly Burakov int heap_socket; 999c7f5dba7SAnatoly Burakov bool huge = mp_alloc_type == MP_ALLOC_XMEM_HUGE; 1000c7f5dba7SAnatoly Burakov 1001c7f5dba7SAnatoly Burakov if (setup_extmem(nb_mbuf, mbuf_seg_size, huge) < 0) 1002c7f5dba7SAnatoly Burakov rte_exit(EXIT_FAILURE, "Could not create external memory\n"); 1003c7f5dba7SAnatoly Burakov 1004c7f5dba7SAnatoly Burakov heap_socket = 1005c7f5dba7SAnatoly Burakov rte_malloc_heap_get_socket(EXTMEM_HEAP_NAME); 1006c7f5dba7SAnatoly Burakov if (heap_socket < 0) 1007c7f5dba7SAnatoly Burakov rte_exit(EXIT_FAILURE, "Could not get external memory socket ID\n"); 1008c7f5dba7SAnatoly Burakov 10090e798567SPavan Nikhilesh TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n", 10100e798567SPavan Nikhilesh rte_mbuf_best_mempool_ops()); 1011ea0c20eaSOlivier Matz rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf, 1012c7f5dba7SAnatoly Burakov mb_mempool_cache, 0, mbuf_seg_size, 1013c7f5dba7SAnatoly Burakov heap_socket); 1014c7f5dba7SAnatoly Burakov break; 1015c7f5dba7SAnatoly Burakov } 101672512e18SViacheslav Ovsiienko case MP_ALLOC_XBUF: 101772512e18SViacheslav Ovsiienko { 101872512e18SViacheslav Ovsiienko struct rte_pktmbuf_extmem *ext_mem; 101972512e18SViacheslav Ovsiienko unsigned int ext_num; 102072512e18SViacheslav Ovsiienko 102172512e18SViacheslav Ovsiienko ext_num = setup_extbuf(nb_mbuf, mbuf_seg_size, 102272512e18SViacheslav Ovsiienko socket_id, pool_name, &ext_mem); 102372512e18SViacheslav Ovsiienko if (ext_num == 0) 102472512e18SViacheslav Ovsiienko rte_exit(EXIT_FAILURE, 102572512e18SViacheslav Ovsiienko "Can't create pinned data buffers\n"); 102672512e18SViacheslav Ovsiienko 102772512e18SViacheslav Ovsiienko TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n", 102872512e18SViacheslav Ovsiienko rte_mbuf_best_mempool_ops()); 102972512e18SViacheslav Ovsiienko rte_mp = rte_pktmbuf_pool_create_extbuf 103072512e18SViacheslav Ovsiienko (pool_name, nb_mbuf, mb_mempool_cache, 103172512e18SViacheslav Ovsiienko 0, mbuf_seg_size, socket_id, 103272512e18SViacheslav Ovsiienko ext_mem, ext_num); 103372512e18SViacheslav Ovsiienko free(ext_mem); 103472512e18SViacheslav Ovsiienko break; 103572512e18SViacheslav Ovsiienko } 1036c7f5dba7SAnatoly Burakov default: 1037c7f5dba7SAnatoly Burakov { 1038c7f5dba7SAnatoly Burakov rte_exit(EXIT_FAILURE, "Invalid mempool creation mode\n"); 1039c7f5dba7SAnatoly Burakov } 1040bece7b6cSChristian Ehrhardt } 1041148f963fSBruce Richardson 104224427bb9SOlivier Matz err: 1043af75078fSIntel if (rte_mp == NULL) { 1044d1eb542eSOlivier Matz rte_exit(EXIT_FAILURE, 1045d1eb542eSOlivier Matz "Creation of mbuf pool for socket %u failed: %s\n", 1046d1eb542eSOlivier Matz socket_id, rte_strerror(rte_errno)); 1047148f963fSBruce Richardson } else if (verbose_level > 0) { 1048591a9d79SStephen Hemminger rte_mempool_dump(stdout, rte_mp); 1049af75078fSIntel } 1050401b744dSShahaf Shuler return rte_mp; 1051af75078fSIntel } 1052af75078fSIntel 105320a0286fSLiu Xiaofeng /* 105420a0286fSLiu Xiaofeng * Check given socket id is valid or not with NUMA mode, 105520a0286fSLiu Xiaofeng * if valid, return 0, else return -1 105620a0286fSLiu Xiaofeng */ 105720a0286fSLiu Xiaofeng static int 105820a0286fSLiu Xiaofeng check_socket_id(const unsigned int socket_id) 105920a0286fSLiu Xiaofeng { 106020a0286fSLiu Xiaofeng static int warning_once = 0; 106120a0286fSLiu Xiaofeng 1062c9cafcc8SShahaf Shuler if (new_socket_id(socket_id)) { 106320a0286fSLiu Xiaofeng if (!warning_once && numa_support) 106420a0286fSLiu Xiaofeng printf("Warning: NUMA should be configured manually by" 106520a0286fSLiu Xiaofeng " using --port-numa-config and" 106620a0286fSLiu Xiaofeng " --ring-numa-config parameters along with" 106720a0286fSLiu Xiaofeng " --numa.\n"); 106820a0286fSLiu Xiaofeng warning_once = 1; 106920a0286fSLiu Xiaofeng return -1; 107020a0286fSLiu Xiaofeng } 107120a0286fSLiu Xiaofeng return 0; 107220a0286fSLiu Xiaofeng } 107320a0286fSLiu Xiaofeng 10743f7311baSWei Dai /* 10753f7311baSWei Dai * Get the allowed maximum number of RX queues. 10763f7311baSWei Dai * *pid return the port id which has minimal value of 10773f7311baSWei Dai * max_rx_queues in all ports. 10783f7311baSWei Dai */ 10793f7311baSWei Dai queueid_t 10803f7311baSWei Dai get_allowed_max_nb_rxq(portid_t *pid) 10813f7311baSWei Dai { 10829e6b36c3SDavid Marchand queueid_t allowed_max_rxq = RTE_MAX_QUEUES_PER_PORT; 10836f51deb9SIvan Ilchenko bool max_rxq_valid = false; 10843f7311baSWei Dai portid_t pi; 10853f7311baSWei Dai struct rte_eth_dev_info dev_info; 10863f7311baSWei Dai 10873f7311baSWei Dai RTE_ETH_FOREACH_DEV(pi) { 10886f51deb9SIvan Ilchenko if (eth_dev_info_get_print_err(pi, &dev_info) != 0) 10896f51deb9SIvan Ilchenko continue; 10906f51deb9SIvan Ilchenko 10916f51deb9SIvan Ilchenko max_rxq_valid = true; 10923f7311baSWei Dai if (dev_info.max_rx_queues < allowed_max_rxq) { 10933f7311baSWei Dai allowed_max_rxq = dev_info.max_rx_queues; 10943f7311baSWei Dai *pid = pi; 10953f7311baSWei Dai } 10963f7311baSWei Dai } 10976f51deb9SIvan Ilchenko return max_rxq_valid ? allowed_max_rxq : 0; 10983f7311baSWei Dai } 10993f7311baSWei Dai 11003f7311baSWei Dai /* 11013f7311baSWei Dai * Check input rxq is valid or not. 11023f7311baSWei Dai * If input rxq is not greater than any of maximum number 11033f7311baSWei Dai * of RX queues of all ports, it is valid. 11043f7311baSWei Dai * if valid, return 0, else return -1 11053f7311baSWei Dai */ 11063f7311baSWei Dai int 11073f7311baSWei Dai check_nb_rxq(queueid_t rxq) 11083f7311baSWei Dai { 11093f7311baSWei Dai queueid_t allowed_max_rxq; 11103f7311baSWei Dai portid_t pid = 0; 11113f7311baSWei Dai 11123f7311baSWei Dai allowed_max_rxq = get_allowed_max_nb_rxq(&pid); 11133f7311baSWei Dai if (rxq > allowed_max_rxq) { 11143f7311baSWei Dai printf("Fail: input rxq (%u) can't be greater " 11153f7311baSWei Dai "than max_rx_queues (%u) of port %u\n", 11163f7311baSWei Dai rxq, 11173f7311baSWei Dai allowed_max_rxq, 11183f7311baSWei Dai pid); 11193f7311baSWei Dai return -1; 11203f7311baSWei Dai } 11213f7311baSWei Dai return 0; 11223f7311baSWei Dai } 11233f7311baSWei Dai 112436db4f6cSWei Dai /* 112536db4f6cSWei Dai * Get the allowed maximum number of TX queues. 112636db4f6cSWei Dai * *pid return the port id which has minimal value of 112736db4f6cSWei Dai * max_tx_queues in all ports. 112836db4f6cSWei Dai */ 112936db4f6cSWei Dai queueid_t 113036db4f6cSWei Dai get_allowed_max_nb_txq(portid_t *pid) 113136db4f6cSWei Dai { 11329e6b36c3SDavid Marchand queueid_t allowed_max_txq = RTE_MAX_QUEUES_PER_PORT; 11336f51deb9SIvan Ilchenko bool max_txq_valid = false; 113436db4f6cSWei Dai portid_t pi; 113536db4f6cSWei Dai struct rte_eth_dev_info dev_info; 113636db4f6cSWei Dai 113736db4f6cSWei Dai RTE_ETH_FOREACH_DEV(pi) { 11386f51deb9SIvan Ilchenko if (eth_dev_info_get_print_err(pi, &dev_info) != 0) 11396f51deb9SIvan Ilchenko continue; 11406f51deb9SIvan Ilchenko 11416f51deb9SIvan Ilchenko max_txq_valid = true; 114236db4f6cSWei Dai if (dev_info.max_tx_queues < allowed_max_txq) { 114336db4f6cSWei Dai allowed_max_txq = dev_info.max_tx_queues; 114436db4f6cSWei Dai *pid = pi; 114536db4f6cSWei Dai } 114636db4f6cSWei Dai } 11476f51deb9SIvan Ilchenko return max_txq_valid ? allowed_max_txq : 0; 114836db4f6cSWei Dai } 114936db4f6cSWei Dai 115036db4f6cSWei Dai /* 115136db4f6cSWei Dai * Check input txq is valid or not. 115236db4f6cSWei Dai * If input txq is not greater than any of maximum number 115336db4f6cSWei Dai * of TX queues of all ports, it is valid. 115436db4f6cSWei Dai * if valid, return 0, else return -1 115536db4f6cSWei Dai */ 115636db4f6cSWei Dai int 115736db4f6cSWei Dai check_nb_txq(queueid_t txq) 115836db4f6cSWei Dai { 115936db4f6cSWei Dai queueid_t allowed_max_txq; 116036db4f6cSWei Dai portid_t pid = 0; 116136db4f6cSWei Dai 116236db4f6cSWei Dai allowed_max_txq = get_allowed_max_nb_txq(&pid); 116336db4f6cSWei Dai if (txq > allowed_max_txq) { 116436db4f6cSWei Dai printf("Fail: input txq (%u) can't be greater " 116536db4f6cSWei Dai "than max_tx_queues (%u) of port %u\n", 116636db4f6cSWei Dai txq, 116736db4f6cSWei Dai allowed_max_txq, 116836db4f6cSWei Dai pid); 116936db4f6cSWei Dai return -1; 117036db4f6cSWei Dai } 117136db4f6cSWei Dai return 0; 117236db4f6cSWei Dai } 117336db4f6cSWei Dai 11741c69df45SOri Kam /* 117599e040d3SLijun Ou * Get the allowed maximum number of RXDs of every rx queue. 117699e040d3SLijun Ou * *pid return the port id which has minimal value of 117799e040d3SLijun Ou * max_rxd in all queues of all ports. 117899e040d3SLijun Ou */ 117999e040d3SLijun Ou static uint16_t 118099e040d3SLijun Ou get_allowed_max_nb_rxd(portid_t *pid) 118199e040d3SLijun Ou { 118299e040d3SLijun Ou uint16_t allowed_max_rxd = UINT16_MAX; 118399e040d3SLijun Ou portid_t pi; 118499e040d3SLijun Ou struct rte_eth_dev_info dev_info; 118599e040d3SLijun Ou 118699e040d3SLijun Ou RTE_ETH_FOREACH_DEV(pi) { 118799e040d3SLijun Ou if (eth_dev_info_get_print_err(pi, &dev_info) != 0) 118899e040d3SLijun Ou continue; 118999e040d3SLijun Ou 119099e040d3SLijun Ou if (dev_info.rx_desc_lim.nb_max < allowed_max_rxd) { 119199e040d3SLijun Ou allowed_max_rxd = dev_info.rx_desc_lim.nb_max; 119299e040d3SLijun Ou *pid = pi; 119399e040d3SLijun Ou } 119499e040d3SLijun Ou } 119599e040d3SLijun Ou return allowed_max_rxd; 119699e040d3SLijun Ou } 119799e040d3SLijun Ou 119899e040d3SLijun Ou /* 119999e040d3SLijun Ou * Get the allowed minimal number of RXDs of every rx queue. 120099e040d3SLijun Ou * *pid return the port id which has minimal value of 120199e040d3SLijun Ou * min_rxd in all queues of all ports. 120299e040d3SLijun Ou */ 120399e040d3SLijun Ou static uint16_t 120499e040d3SLijun Ou get_allowed_min_nb_rxd(portid_t *pid) 120599e040d3SLijun Ou { 120699e040d3SLijun Ou uint16_t allowed_min_rxd = 0; 120799e040d3SLijun Ou portid_t pi; 120899e040d3SLijun Ou struct rte_eth_dev_info dev_info; 120999e040d3SLijun Ou 121099e040d3SLijun Ou RTE_ETH_FOREACH_DEV(pi) { 121199e040d3SLijun Ou if (eth_dev_info_get_print_err(pi, &dev_info) != 0) 121299e040d3SLijun Ou continue; 121399e040d3SLijun Ou 121499e040d3SLijun Ou if (dev_info.rx_desc_lim.nb_min > allowed_min_rxd) { 121599e040d3SLijun Ou allowed_min_rxd = dev_info.rx_desc_lim.nb_min; 121699e040d3SLijun Ou *pid = pi; 121799e040d3SLijun Ou } 121899e040d3SLijun Ou } 121999e040d3SLijun Ou 122099e040d3SLijun Ou return allowed_min_rxd; 122199e040d3SLijun Ou } 122299e040d3SLijun Ou 122399e040d3SLijun Ou /* 122499e040d3SLijun Ou * Check input rxd is valid or not. 122599e040d3SLijun Ou * If input rxd is not greater than any of maximum number 122699e040d3SLijun Ou * of RXDs of every Rx queues and is not less than any of 122799e040d3SLijun Ou * minimal number of RXDs of every Rx queues, it is valid. 122899e040d3SLijun Ou * if valid, return 0, else return -1 122999e040d3SLijun Ou */ 123099e040d3SLijun Ou int 123199e040d3SLijun Ou check_nb_rxd(queueid_t rxd) 123299e040d3SLijun Ou { 123399e040d3SLijun Ou uint16_t allowed_max_rxd; 123499e040d3SLijun Ou uint16_t allowed_min_rxd; 123599e040d3SLijun Ou portid_t pid = 0; 123699e040d3SLijun Ou 123799e040d3SLijun Ou allowed_max_rxd = get_allowed_max_nb_rxd(&pid); 123899e040d3SLijun Ou if (rxd > allowed_max_rxd) { 123999e040d3SLijun Ou printf("Fail: input rxd (%u) can't be greater " 124099e040d3SLijun Ou "than max_rxds (%u) of port %u\n", 124199e040d3SLijun Ou rxd, 124299e040d3SLijun Ou allowed_max_rxd, 124399e040d3SLijun Ou pid); 124499e040d3SLijun Ou return -1; 124599e040d3SLijun Ou } 124699e040d3SLijun Ou 124799e040d3SLijun Ou allowed_min_rxd = get_allowed_min_nb_rxd(&pid); 124899e040d3SLijun Ou if (rxd < allowed_min_rxd) { 124999e040d3SLijun Ou printf("Fail: input rxd (%u) can't be less " 125099e040d3SLijun Ou "than min_rxds (%u) of port %u\n", 125199e040d3SLijun Ou rxd, 125299e040d3SLijun Ou allowed_min_rxd, 125399e040d3SLijun Ou pid); 125499e040d3SLijun Ou return -1; 125599e040d3SLijun Ou } 125699e040d3SLijun Ou 125799e040d3SLijun Ou return 0; 125899e040d3SLijun Ou } 125999e040d3SLijun Ou 126099e040d3SLijun Ou /* 126199e040d3SLijun Ou * Get the allowed maximum number of TXDs of every rx queues. 126299e040d3SLijun Ou * *pid return the port id which has minimal value of 126399e040d3SLijun Ou * max_txd in every tx queue. 126499e040d3SLijun Ou */ 126599e040d3SLijun Ou static uint16_t 126699e040d3SLijun Ou get_allowed_max_nb_txd(portid_t *pid) 126799e040d3SLijun Ou { 126899e040d3SLijun Ou uint16_t allowed_max_txd = UINT16_MAX; 126999e040d3SLijun Ou portid_t pi; 127099e040d3SLijun Ou struct rte_eth_dev_info dev_info; 127199e040d3SLijun Ou 127299e040d3SLijun Ou RTE_ETH_FOREACH_DEV(pi) { 127399e040d3SLijun Ou if (eth_dev_info_get_print_err(pi, &dev_info) != 0) 127499e040d3SLijun Ou continue; 127599e040d3SLijun Ou 127699e040d3SLijun Ou if (dev_info.tx_desc_lim.nb_max < allowed_max_txd) { 127799e040d3SLijun Ou allowed_max_txd = dev_info.tx_desc_lim.nb_max; 127899e040d3SLijun Ou *pid = pi; 127999e040d3SLijun Ou } 128099e040d3SLijun Ou } 128199e040d3SLijun Ou return allowed_max_txd; 128299e040d3SLijun Ou } 128399e040d3SLijun Ou 128499e040d3SLijun Ou /* 128599e040d3SLijun Ou * Get the allowed maximum number of TXDs of every tx queues. 128699e040d3SLijun Ou * *pid return the port id which has minimal value of 128799e040d3SLijun Ou * min_txd in every tx queue. 128899e040d3SLijun Ou */ 128999e040d3SLijun Ou static uint16_t 129099e040d3SLijun Ou get_allowed_min_nb_txd(portid_t *pid) 129199e040d3SLijun Ou { 129299e040d3SLijun Ou uint16_t allowed_min_txd = 0; 129399e040d3SLijun Ou portid_t pi; 129499e040d3SLijun Ou struct rte_eth_dev_info dev_info; 129599e040d3SLijun Ou 129699e040d3SLijun Ou RTE_ETH_FOREACH_DEV(pi) { 129799e040d3SLijun Ou if (eth_dev_info_get_print_err(pi, &dev_info) != 0) 129899e040d3SLijun Ou continue; 129999e040d3SLijun Ou 130099e040d3SLijun Ou if (dev_info.tx_desc_lim.nb_min > allowed_min_txd) { 130199e040d3SLijun Ou allowed_min_txd = dev_info.tx_desc_lim.nb_min; 130299e040d3SLijun Ou *pid = pi; 130399e040d3SLijun Ou } 130499e040d3SLijun Ou } 130599e040d3SLijun Ou 130699e040d3SLijun Ou return allowed_min_txd; 130799e040d3SLijun Ou } 130899e040d3SLijun Ou 130999e040d3SLijun Ou /* 131099e040d3SLijun Ou * Check input txd is valid or not. 131199e040d3SLijun Ou * If input txd is not greater than any of maximum number 131299e040d3SLijun Ou * of TXDs of every Rx queues, it is valid. 131399e040d3SLijun Ou * if valid, return 0, else return -1 131499e040d3SLijun Ou */ 131599e040d3SLijun Ou int 131699e040d3SLijun Ou check_nb_txd(queueid_t txd) 131799e040d3SLijun Ou { 131899e040d3SLijun Ou uint16_t allowed_max_txd; 131999e040d3SLijun Ou uint16_t allowed_min_txd; 132099e040d3SLijun Ou portid_t pid = 0; 132199e040d3SLijun Ou 132299e040d3SLijun Ou allowed_max_txd = get_allowed_max_nb_txd(&pid); 132399e040d3SLijun Ou if (txd > allowed_max_txd) { 132499e040d3SLijun Ou printf("Fail: input txd (%u) can't be greater " 132599e040d3SLijun Ou "than max_txds (%u) of port %u\n", 132699e040d3SLijun Ou txd, 132799e040d3SLijun Ou allowed_max_txd, 132899e040d3SLijun Ou pid); 132999e040d3SLijun Ou return -1; 133099e040d3SLijun Ou } 133199e040d3SLijun Ou 133299e040d3SLijun Ou allowed_min_txd = get_allowed_min_nb_txd(&pid); 133399e040d3SLijun Ou if (txd < allowed_min_txd) { 133499e040d3SLijun Ou printf("Fail: input txd (%u) can't be less " 133599e040d3SLijun Ou "than min_txds (%u) of port %u\n", 133699e040d3SLijun Ou txd, 133799e040d3SLijun Ou allowed_min_txd, 133899e040d3SLijun Ou pid); 133999e040d3SLijun Ou return -1; 134099e040d3SLijun Ou } 134199e040d3SLijun Ou return 0; 134299e040d3SLijun Ou } 134399e040d3SLijun Ou 134499e040d3SLijun Ou 134599e040d3SLijun Ou /* 13461c69df45SOri Kam * Get the allowed maximum number of hairpin queues. 13471c69df45SOri Kam * *pid return the port id which has minimal value of 13481c69df45SOri Kam * max_hairpin_queues in all ports. 13491c69df45SOri Kam */ 13501c69df45SOri Kam queueid_t 13511c69df45SOri Kam get_allowed_max_nb_hairpinq(portid_t *pid) 13521c69df45SOri Kam { 13539e6b36c3SDavid Marchand queueid_t allowed_max_hairpinq = RTE_MAX_QUEUES_PER_PORT; 13541c69df45SOri Kam portid_t pi; 13551c69df45SOri Kam struct rte_eth_hairpin_cap cap; 13561c69df45SOri Kam 13571c69df45SOri Kam RTE_ETH_FOREACH_DEV(pi) { 13581c69df45SOri Kam if (rte_eth_dev_hairpin_capability_get(pi, &cap) != 0) { 13591c69df45SOri Kam *pid = pi; 13601c69df45SOri Kam return 0; 13611c69df45SOri Kam } 13621c69df45SOri Kam if (cap.max_nb_queues < allowed_max_hairpinq) { 13631c69df45SOri Kam allowed_max_hairpinq = cap.max_nb_queues; 13641c69df45SOri Kam *pid = pi; 13651c69df45SOri Kam } 13661c69df45SOri Kam } 13671c69df45SOri Kam return allowed_max_hairpinq; 13681c69df45SOri Kam } 13691c69df45SOri Kam 13701c69df45SOri Kam /* 13711c69df45SOri Kam * Check input hairpin is valid or not. 13721c69df45SOri Kam * If input hairpin is not greater than any of maximum number 13731c69df45SOri Kam * of hairpin queues of all ports, it is valid. 13741c69df45SOri Kam * if valid, return 0, else return -1 13751c69df45SOri Kam */ 13761c69df45SOri Kam int 13771c69df45SOri Kam check_nb_hairpinq(queueid_t hairpinq) 13781c69df45SOri Kam { 13791c69df45SOri Kam queueid_t allowed_max_hairpinq; 13801c69df45SOri Kam portid_t pid = 0; 13811c69df45SOri Kam 13821c69df45SOri Kam allowed_max_hairpinq = get_allowed_max_nb_hairpinq(&pid); 13831c69df45SOri Kam if (hairpinq > allowed_max_hairpinq) { 13841c69df45SOri Kam printf("Fail: input hairpin (%u) can't be greater " 13851c69df45SOri Kam "than max_hairpin_queues (%u) of port %u\n", 13861c69df45SOri Kam hairpinq, allowed_max_hairpinq, pid); 13871c69df45SOri Kam return -1; 13881c69df45SOri Kam } 13891c69df45SOri Kam return 0; 13901c69df45SOri Kam } 13911c69df45SOri Kam 1392af75078fSIntel static void 1393af75078fSIntel init_config(void) 1394af75078fSIntel { 1395ce8d5614SIntel portid_t pid; 1396af75078fSIntel struct rte_port *port; 1397af75078fSIntel struct rte_mempool *mbp; 1398af75078fSIntel unsigned int nb_mbuf_per_pool; 1399af75078fSIntel lcoreid_t lc_id; 14007acf894dSStephen Hurd uint8_t port_per_socket[RTE_MAX_NUMA_NODES]; 1401b7091f1dSJiayu Hu struct rte_gro_param gro_param; 140252f38a20SJiayu Hu uint32_t gso_types; 140333f9630fSSunil Kumar Kori uint16_t data_size; 140433f9630fSSunil Kumar Kori bool warning = 0; 1405c73a9071SWei Dai int k; 14066f51deb9SIvan Ilchenko int ret; 1407af75078fSIntel 14087acf894dSStephen Hurd memset(port_per_socket,0,RTE_MAX_NUMA_NODES); 1409487f9a59SYulong Pei 1410af75078fSIntel /* Configuration of logical cores. */ 1411af75078fSIntel fwd_lcores = rte_zmalloc("testpmd: fwd_lcores", 1412af75078fSIntel sizeof(struct fwd_lcore *) * nb_lcores, 1413fdf20fa7SSergio Gonzalez Monroy RTE_CACHE_LINE_SIZE); 1414af75078fSIntel if (fwd_lcores == NULL) { 1415ce8d5614SIntel rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) " 1416ce8d5614SIntel "failed\n", nb_lcores); 1417af75078fSIntel } 1418af75078fSIntel for (lc_id = 0; lc_id < nb_lcores; lc_id++) { 1419af75078fSIntel fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore", 1420af75078fSIntel sizeof(struct fwd_lcore), 1421fdf20fa7SSergio Gonzalez Monroy RTE_CACHE_LINE_SIZE); 1422af75078fSIntel if (fwd_lcores[lc_id] == NULL) { 1423ce8d5614SIntel rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) " 1424ce8d5614SIntel "failed\n"); 1425af75078fSIntel } 1426af75078fSIntel fwd_lcores[lc_id]->cpuid_idx = lc_id; 1427af75078fSIntel } 1428af75078fSIntel 14297d89b261SGaetan Rivet RTE_ETH_FOREACH_DEV(pid) { 1430ce8d5614SIntel port = &ports[pid]; 14318b9bd0efSMoti Haimovsky /* Apply default TxRx configuration for all ports */ 1432fd8c20aaSShahaf Shuler port->dev_conf.txmode = tx_mode; 1433384161e0SShahaf Shuler port->dev_conf.rxmode = rx_mode; 14346f51deb9SIvan Ilchenko 14356f51deb9SIvan Ilchenko ret = eth_dev_info_get_print_err(pid, &port->dev_info); 14366f51deb9SIvan Ilchenko if (ret != 0) 14376f51deb9SIvan Ilchenko rte_exit(EXIT_FAILURE, 14386f51deb9SIvan Ilchenko "rte_eth_dev_info_get() failed\n"); 14397c45f6c0SFerruh Yigit 144007e5f7bdSShahaf Shuler if (!(port->dev_info.tx_offload_capa & 144107e5f7bdSShahaf Shuler DEV_TX_OFFLOAD_MBUF_FAST_FREE)) 144207e5f7bdSShahaf Shuler port->dev_conf.txmode.offloads &= 144307e5f7bdSShahaf Shuler ~DEV_TX_OFFLOAD_MBUF_FAST_FREE; 1444b6ea6408SIntel if (numa_support) { 1445b6ea6408SIntel if (port_numa[pid] != NUMA_NO_CONFIG) 1446b6ea6408SIntel port_per_socket[port_numa[pid]]++; 1447b6ea6408SIntel else { 1448b6ea6408SIntel uint32_t socket_id = rte_eth_dev_socket_id(pid); 144920a0286fSLiu Xiaofeng 145029841336SPhil Yang /* 145129841336SPhil Yang * if socket_id is invalid, 145229841336SPhil Yang * set to the first available socket. 145329841336SPhil Yang */ 145420a0286fSLiu Xiaofeng if (check_socket_id(socket_id) < 0) 145529841336SPhil Yang socket_id = socket_ids[0]; 1456b6ea6408SIntel port_per_socket[socket_id]++; 1457b6ea6408SIntel } 1458b6ea6408SIntel } 1459b6ea6408SIntel 1460c73a9071SWei Dai /* Apply Rx offloads configuration */ 1461c73a9071SWei Dai for (k = 0; k < port->dev_info.max_rx_queues; k++) 1462c73a9071SWei Dai port->rx_conf[k].offloads = 1463c73a9071SWei Dai port->dev_conf.rxmode.offloads; 1464c73a9071SWei Dai /* Apply Tx offloads configuration */ 1465c73a9071SWei Dai for (k = 0; k < port->dev_info.max_tx_queues; k++) 1466c73a9071SWei Dai port->tx_conf[k].offloads = 1467c73a9071SWei Dai port->dev_conf.txmode.offloads; 1468c73a9071SWei Dai 1469ce8d5614SIntel /* set flag to initialize port/queue */ 1470ce8d5614SIntel port->need_reconfig = 1; 1471ce8d5614SIntel port->need_reconfig_queues = 1; 1472c18feafaSDekel Peled port->tx_metadata = 0; 147333f9630fSSunil Kumar Kori 147433f9630fSSunil Kumar Kori /* Check for maximum number of segments per MTU. Accordingly 147533f9630fSSunil Kumar Kori * update the mbuf data size. 147633f9630fSSunil Kumar Kori */ 1477163fbaafSFerruh Yigit if (port->dev_info.rx_desc_lim.nb_mtu_seg_max != UINT16_MAX && 1478163fbaafSFerruh Yigit port->dev_info.rx_desc_lim.nb_mtu_seg_max != 0) { 147933f9630fSSunil Kumar Kori data_size = rx_mode.max_rx_pkt_len / 148033f9630fSSunil Kumar Kori port->dev_info.rx_desc_lim.nb_mtu_seg_max; 148133f9630fSSunil Kumar Kori 148233f9630fSSunil Kumar Kori if ((data_size + RTE_PKTMBUF_HEADROOM) > 148333f9630fSSunil Kumar Kori mbuf_data_size) { 148433f9630fSSunil Kumar Kori mbuf_data_size = data_size + 148533f9630fSSunil Kumar Kori RTE_PKTMBUF_HEADROOM; 148633f9630fSSunil Kumar Kori warning = 1; 1487ce8d5614SIntel } 148833f9630fSSunil Kumar Kori } 148933f9630fSSunil Kumar Kori } 149033f9630fSSunil Kumar Kori 149133f9630fSSunil Kumar Kori if (warning) 149233f9630fSSunil Kumar Kori TESTPMD_LOG(WARNING, "Configured mbuf size %hu\n", 149333f9630fSSunil Kumar Kori mbuf_data_size); 1494ce8d5614SIntel 14953ab64341SOlivier Matz /* 14963ab64341SOlivier Matz * Create pools of mbuf. 14973ab64341SOlivier Matz * If NUMA support is disabled, create a single pool of mbuf in 14983ab64341SOlivier Matz * socket 0 memory by default. 14993ab64341SOlivier Matz * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1. 15003ab64341SOlivier Matz * 15013ab64341SOlivier Matz * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and 15023ab64341SOlivier Matz * nb_txd can be configured at run time. 15033ab64341SOlivier Matz */ 15043ab64341SOlivier Matz if (param_total_num_mbufs) 15053ab64341SOlivier Matz nb_mbuf_per_pool = param_total_num_mbufs; 15063ab64341SOlivier Matz else { 15073ab64341SOlivier Matz nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX + 15083ab64341SOlivier Matz (nb_lcores * mb_mempool_cache) + 15093ab64341SOlivier Matz RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST; 15103ab64341SOlivier Matz nb_mbuf_per_pool *= RTE_MAX_ETHPORTS; 15113ab64341SOlivier Matz } 15123ab64341SOlivier Matz 1513b6ea6408SIntel if (numa_support) { 1514b6ea6408SIntel uint8_t i; 1515ce8d5614SIntel 1516c9cafcc8SShahaf Shuler for (i = 0; i < num_sockets; i++) 1517401b744dSShahaf Shuler mempools[i] = mbuf_pool_create(mbuf_data_size, 1518401b744dSShahaf Shuler nb_mbuf_per_pool, 1519c9cafcc8SShahaf Shuler socket_ids[i]); 15203ab64341SOlivier Matz } else { 15213ab64341SOlivier Matz if (socket_num == UMA_NO_CONFIG) 1522401b744dSShahaf Shuler mempools[0] = mbuf_pool_create(mbuf_data_size, 1523401b744dSShahaf Shuler nb_mbuf_per_pool, 0); 15243ab64341SOlivier Matz else 1525401b744dSShahaf Shuler mempools[socket_num] = mbuf_pool_create 1526401b744dSShahaf Shuler (mbuf_data_size, 1527401b744dSShahaf Shuler nb_mbuf_per_pool, 15283ab64341SOlivier Matz socket_num); 15293ab64341SOlivier Matz } 1530b6ea6408SIntel 1531b6ea6408SIntel init_port_config(); 15325886ae07SAdrien Mazarguil 153352f38a20SJiayu Hu gso_types = DEV_TX_OFFLOAD_TCP_TSO | DEV_TX_OFFLOAD_VXLAN_TNL_TSO | 1534aaacd052SJiayu Hu DEV_TX_OFFLOAD_GRE_TNL_TSO | DEV_TX_OFFLOAD_UDP_TSO; 15355886ae07SAdrien Mazarguil /* 15365886ae07SAdrien Mazarguil * Records which Mbuf pool to use by each logical core, if needed. 15375886ae07SAdrien Mazarguil */ 15385886ae07SAdrien Mazarguil for (lc_id = 0; lc_id < nb_lcores; lc_id++) { 15398fd8bebcSAdrien Mazarguil mbp = mbuf_pool_find( 15408fd8bebcSAdrien Mazarguil rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id])); 15418fd8bebcSAdrien Mazarguil 15425886ae07SAdrien Mazarguil if (mbp == NULL) 15435886ae07SAdrien Mazarguil mbp = mbuf_pool_find(0); 15445886ae07SAdrien Mazarguil fwd_lcores[lc_id]->mbp = mbp; 154552f38a20SJiayu Hu /* initialize GSO context */ 154652f38a20SJiayu Hu fwd_lcores[lc_id]->gso_ctx.direct_pool = mbp; 154752f38a20SJiayu Hu fwd_lcores[lc_id]->gso_ctx.indirect_pool = mbp; 154852f38a20SJiayu Hu fwd_lcores[lc_id]->gso_ctx.gso_types = gso_types; 154935b2d13fSOlivier Matz fwd_lcores[lc_id]->gso_ctx.gso_size = RTE_ETHER_MAX_LEN - 155035b2d13fSOlivier Matz RTE_ETHER_CRC_LEN; 155152f38a20SJiayu Hu fwd_lcores[lc_id]->gso_ctx.flag = 0; 15525886ae07SAdrien Mazarguil } 15535886ae07SAdrien Mazarguil 1554ce8d5614SIntel /* Configuration of packet forwarding streams. */ 1555ce8d5614SIntel if (init_fwd_streams() < 0) 1556ce8d5614SIntel rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n"); 15570c0db76fSBernard Iremonger 15580c0db76fSBernard Iremonger fwd_config_setup(); 1559b7091f1dSJiayu Hu 1560b7091f1dSJiayu Hu /* create a gro context for each lcore */ 1561b7091f1dSJiayu Hu gro_param.gro_types = RTE_GRO_TCP_IPV4; 1562b7091f1dSJiayu Hu gro_param.max_flow_num = GRO_MAX_FLUSH_CYCLES; 1563b7091f1dSJiayu Hu gro_param.max_item_per_flow = MAX_PKT_BURST; 1564b7091f1dSJiayu Hu for (lc_id = 0; lc_id < nb_lcores; lc_id++) { 1565b7091f1dSJiayu Hu gro_param.socket_id = rte_lcore_to_socket_id( 1566b7091f1dSJiayu Hu fwd_lcores_cpuids[lc_id]); 1567b7091f1dSJiayu Hu fwd_lcores[lc_id]->gro_ctx = rte_gro_ctx_create(&gro_param); 1568b7091f1dSJiayu Hu if (fwd_lcores[lc_id]->gro_ctx == NULL) { 1569b7091f1dSJiayu Hu rte_exit(EXIT_FAILURE, 1570b7091f1dSJiayu Hu "rte_gro_ctx_create() failed\n"); 1571b7091f1dSJiayu Hu } 1572b7091f1dSJiayu Hu } 1573ce8d5614SIntel } 1574ce8d5614SIntel 15752950a769SDeclan Doherty 15762950a769SDeclan Doherty void 1577a21d5a4bSDeclan Doherty reconfig(portid_t new_port_id, unsigned socket_id) 15782950a769SDeclan Doherty { 15792950a769SDeclan Doherty struct rte_port *port; 15806f51deb9SIvan Ilchenko int ret; 15812950a769SDeclan Doherty 15822950a769SDeclan Doherty /* Reconfiguration of Ethernet ports. */ 15832950a769SDeclan Doherty port = &ports[new_port_id]; 15846f51deb9SIvan Ilchenko 15856f51deb9SIvan Ilchenko ret = eth_dev_info_get_print_err(new_port_id, &port->dev_info); 15866f51deb9SIvan Ilchenko if (ret != 0) 15876f51deb9SIvan Ilchenko return; 15882950a769SDeclan Doherty 15892950a769SDeclan Doherty /* set flag to initialize port/queue */ 15902950a769SDeclan Doherty port->need_reconfig = 1; 15912950a769SDeclan Doherty port->need_reconfig_queues = 1; 1592a21d5a4bSDeclan Doherty port->socket_id = socket_id; 15932950a769SDeclan Doherty 15942950a769SDeclan Doherty init_port_config(); 15952950a769SDeclan Doherty } 15962950a769SDeclan Doherty 15972950a769SDeclan Doherty 1598ce8d5614SIntel int 1599ce8d5614SIntel init_fwd_streams(void) 1600ce8d5614SIntel { 1601ce8d5614SIntel portid_t pid; 1602ce8d5614SIntel struct rte_port *port; 1603ce8d5614SIntel streamid_t sm_id, nb_fwd_streams_new; 16045a8fb55cSReshma Pattan queueid_t q; 1605ce8d5614SIntel 1606ce8d5614SIntel /* set socket id according to numa or not */ 16077d89b261SGaetan Rivet RTE_ETH_FOREACH_DEV(pid) { 1608ce8d5614SIntel port = &ports[pid]; 1609ce8d5614SIntel if (nb_rxq > port->dev_info.max_rx_queues) { 1610ce8d5614SIntel printf("Fail: nb_rxq(%d) is greater than " 1611ce8d5614SIntel "max_rx_queues(%d)\n", nb_rxq, 1612ce8d5614SIntel port->dev_info.max_rx_queues); 1613ce8d5614SIntel return -1; 1614ce8d5614SIntel } 1615ce8d5614SIntel if (nb_txq > port->dev_info.max_tx_queues) { 1616ce8d5614SIntel printf("Fail: nb_txq(%d) is greater than " 1617ce8d5614SIntel "max_tx_queues(%d)\n", nb_txq, 1618ce8d5614SIntel port->dev_info.max_tx_queues); 1619ce8d5614SIntel return -1; 1620ce8d5614SIntel } 162120a0286fSLiu Xiaofeng if (numa_support) { 162220a0286fSLiu Xiaofeng if (port_numa[pid] != NUMA_NO_CONFIG) 162320a0286fSLiu Xiaofeng port->socket_id = port_numa[pid]; 162420a0286fSLiu Xiaofeng else { 1625b6ea6408SIntel port->socket_id = rte_eth_dev_socket_id(pid); 162620a0286fSLiu Xiaofeng 162729841336SPhil Yang /* 162829841336SPhil Yang * if socket_id is invalid, 162929841336SPhil Yang * set to the first available socket. 163029841336SPhil Yang */ 163120a0286fSLiu Xiaofeng if (check_socket_id(port->socket_id) < 0) 163229841336SPhil Yang port->socket_id = socket_ids[0]; 163320a0286fSLiu Xiaofeng } 163420a0286fSLiu Xiaofeng } 1635b6ea6408SIntel else { 1636b6ea6408SIntel if (socket_num == UMA_NO_CONFIG) 1637af75078fSIntel port->socket_id = 0; 1638b6ea6408SIntel else 1639b6ea6408SIntel port->socket_id = socket_num; 1640b6ea6408SIntel } 1641af75078fSIntel } 1642af75078fSIntel 16435a8fb55cSReshma Pattan q = RTE_MAX(nb_rxq, nb_txq); 16445a8fb55cSReshma Pattan if (q == 0) { 16455a8fb55cSReshma Pattan printf("Fail: Cannot allocate fwd streams as number of queues is 0\n"); 16465a8fb55cSReshma Pattan return -1; 16475a8fb55cSReshma Pattan } 16485a8fb55cSReshma Pattan nb_fwd_streams_new = (streamid_t)(nb_ports * q); 1649ce8d5614SIntel if (nb_fwd_streams_new == nb_fwd_streams) 1650ce8d5614SIntel return 0; 1651ce8d5614SIntel /* clear the old */ 1652ce8d5614SIntel if (fwd_streams != NULL) { 1653ce8d5614SIntel for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) { 1654ce8d5614SIntel if (fwd_streams[sm_id] == NULL) 1655ce8d5614SIntel continue; 1656ce8d5614SIntel rte_free(fwd_streams[sm_id]); 1657ce8d5614SIntel fwd_streams[sm_id] = NULL; 1658af75078fSIntel } 1659ce8d5614SIntel rte_free(fwd_streams); 1660ce8d5614SIntel fwd_streams = NULL; 1661ce8d5614SIntel } 1662ce8d5614SIntel 1663ce8d5614SIntel /* init new */ 1664ce8d5614SIntel nb_fwd_streams = nb_fwd_streams_new; 16651f84c469SMatan Azrad if (nb_fwd_streams) { 1666ce8d5614SIntel fwd_streams = rte_zmalloc("testpmd: fwd_streams", 16671f84c469SMatan Azrad sizeof(struct fwd_stream *) * nb_fwd_streams, 16681f84c469SMatan Azrad RTE_CACHE_LINE_SIZE); 1669ce8d5614SIntel if (fwd_streams == NULL) 16701f84c469SMatan Azrad rte_exit(EXIT_FAILURE, "rte_zmalloc(%d" 16711f84c469SMatan Azrad " (struct fwd_stream *)) failed\n", 16721f84c469SMatan Azrad nb_fwd_streams); 1673ce8d5614SIntel 1674af75078fSIntel for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) { 16751f84c469SMatan Azrad fwd_streams[sm_id] = rte_zmalloc("testpmd:" 16761f84c469SMatan Azrad " struct fwd_stream", sizeof(struct fwd_stream), 16771f84c469SMatan Azrad RTE_CACHE_LINE_SIZE); 1678ce8d5614SIntel if (fwd_streams[sm_id] == NULL) 16791f84c469SMatan Azrad rte_exit(EXIT_FAILURE, "rte_zmalloc" 16801f84c469SMatan Azrad "(struct fwd_stream) failed\n"); 16811f84c469SMatan Azrad } 1682af75078fSIntel } 1683ce8d5614SIntel 1684ce8d5614SIntel return 0; 1685af75078fSIntel } 1686af75078fSIntel 1687af75078fSIntel #ifdef RTE_TEST_PMD_RECORD_BURST_STATS 1688af75078fSIntel static void 1689af75078fSIntel pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs) 1690af75078fSIntel { 16917569b8c1SHonnappa Nagarahalli uint64_t total_burst, sburst; 169285de481aSHonnappa Nagarahalli uint64_t nb_burst; 16937569b8c1SHonnappa Nagarahalli uint64_t burst_stats[4]; 16947569b8c1SHonnappa Nagarahalli uint16_t pktnb_stats[4]; 1695af75078fSIntel uint16_t nb_pkt; 16967569b8c1SHonnappa Nagarahalli int burst_percent[4], sburstp; 16977569b8c1SHonnappa Nagarahalli int i; 1698af75078fSIntel 1699af75078fSIntel /* 1700af75078fSIntel * First compute the total number of packet bursts and the 1701af75078fSIntel * two highest numbers of bursts of the same number of packets. 1702af75078fSIntel */ 17037569b8c1SHonnappa Nagarahalli memset(&burst_stats, 0x0, sizeof(burst_stats)); 17047569b8c1SHonnappa Nagarahalli memset(&pktnb_stats, 0x0, sizeof(pktnb_stats)); 17057569b8c1SHonnappa Nagarahalli 17067569b8c1SHonnappa Nagarahalli /* Show stats for 0 burst size always */ 17077569b8c1SHonnappa Nagarahalli total_burst = pbs->pkt_burst_spread[0]; 17087569b8c1SHonnappa Nagarahalli burst_stats[0] = pbs->pkt_burst_spread[0]; 17097569b8c1SHonnappa Nagarahalli pktnb_stats[0] = 0; 17107569b8c1SHonnappa Nagarahalli 17117569b8c1SHonnappa Nagarahalli /* Find the next 2 burst sizes with highest occurrences. */ 17127569b8c1SHonnappa Nagarahalli for (nb_pkt = 1; nb_pkt < MAX_PKT_BURST; nb_pkt++) { 1713af75078fSIntel nb_burst = pbs->pkt_burst_spread[nb_pkt]; 17147569b8c1SHonnappa Nagarahalli 1715af75078fSIntel if (nb_burst == 0) 1716af75078fSIntel continue; 17177569b8c1SHonnappa Nagarahalli 1718af75078fSIntel total_burst += nb_burst; 17197569b8c1SHonnappa Nagarahalli 17207569b8c1SHonnappa Nagarahalli if (nb_burst > burst_stats[1]) { 17217569b8c1SHonnappa Nagarahalli burst_stats[2] = burst_stats[1]; 17227569b8c1SHonnappa Nagarahalli pktnb_stats[2] = pktnb_stats[1]; 1723fe613657SDaniel Shelepov burst_stats[1] = nb_burst; 1724fe613657SDaniel Shelepov pktnb_stats[1] = nb_pkt; 17257569b8c1SHonnappa Nagarahalli } else if (nb_burst > burst_stats[2]) { 17267569b8c1SHonnappa Nagarahalli burst_stats[2] = nb_burst; 17277569b8c1SHonnappa Nagarahalli pktnb_stats[2] = nb_pkt; 1728af75078fSIntel } 1729af75078fSIntel } 1730af75078fSIntel if (total_burst == 0) 1731af75078fSIntel return; 17327569b8c1SHonnappa Nagarahalli 17337569b8c1SHonnappa Nagarahalli printf(" %s-bursts : %"PRIu64" [", rx_tx, total_burst); 17347569b8c1SHonnappa Nagarahalli for (i = 0, sburst = 0, sburstp = 0; i < 4; i++) { 17357569b8c1SHonnappa Nagarahalli if (i == 3) { 17367569b8c1SHonnappa Nagarahalli printf("%d%% of other]\n", 100 - sburstp); 1737af75078fSIntel return; 1738af75078fSIntel } 17397569b8c1SHonnappa Nagarahalli 17407569b8c1SHonnappa Nagarahalli sburst += burst_stats[i]; 17417569b8c1SHonnappa Nagarahalli if (sburst == total_burst) { 17427569b8c1SHonnappa Nagarahalli printf("%d%% of %d pkts]\n", 17437569b8c1SHonnappa Nagarahalli 100 - sburstp, (int) pktnb_stats[i]); 1744af75078fSIntel return; 1745af75078fSIntel } 17467569b8c1SHonnappa Nagarahalli 17477569b8c1SHonnappa Nagarahalli burst_percent[i] = 17487569b8c1SHonnappa Nagarahalli (double)burst_stats[i] / total_burst * 100; 17497569b8c1SHonnappa Nagarahalli printf("%d%% of %d pkts + ", 17507569b8c1SHonnappa Nagarahalli burst_percent[i], (int) pktnb_stats[i]); 17517569b8c1SHonnappa Nagarahalli sburstp += burst_percent[i]; 1752af75078fSIntel } 1753af75078fSIntel } 1754af75078fSIntel #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */ 1755af75078fSIntel 1756af75078fSIntel static void 1757af75078fSIntel fwd_stream_stats_display(streamid_t stream_id) 1758af75078fSIntel { 1759af75078fSIntel struct fwd_stream *fs; 1760af75078fSIntel static const char *fwd_top_stats_border = "-------"; 1761af75078fSIntel 1762af75078fSIntel fs = fwd_streams[stream_id]; 1763af75078fSIntel if ((fs->rx_packets == 0) && (fs->tx_packets == 0) && 1764af75078fSIntel (fs->fwd_dropped == 0)) 1765af75078fSIntel return; 1766af75078fSIntel printf("\n %s Forward Stats for RX Port=%2d/Queue=%2d -> " 1767af75078fSIntel "TX Port=%2d/Queue=%2d %s\n", 1768af75078fSIntel fwd_top_stats_border, fs->rx_port, fs->rx_queue, 1769af75078fSIntel fs->tx_port, fs->tx_queue, fwd_top_stats_border); 1770c185d42cSDavid Marchand printf(" RX-packets: %-14"PRIu64" TX-packets: %-14"PRIu64 1771c185d42cSDavid Marchand " TX-dropped: %-14"PRIu64, 1772af75078fSIntel fs->rx_packets, fs->tx_packets, fs->fwd_dropped); 1773af75078fSIntel 1774af75078fSIntel /* if checksum mode */ 1775af75078fSIntel if (cur_fwd_eng == &csum_fwd_engine) { 1776c185d42cSDavid Marchand printf(" RX- bad IP checksum: %-14"PRIu64 1777c185d42cSDavid Marchand " Rx- bad L4 checksum: %-14"PRIu64 1778c185d42cSDavid Marchand " Rx- bad outer L4 checksum: %-14"PRIu64"\n", 177958d475b7SJerin Jacob fs->rx_bad_ip_csum, fs->rx_bad_l4_csum, 178058d475b7SJerin Jacob fs->rx_bad_outer_l4_csum); 178194d65546SDavid Marchand } else { 178294d65546SDavid Marchand printf("\n"); 1783af75078fSIntel } 1784af75078fSIntel 1785af75078fSIntel #ifdef RTE_TEST_PMD_RECORD_BURST_STATS 1786af75078fSIntel pkt_burst_stats_display("RX", &fs->rx_burst_stats); 1787af75078fSIntel pkt_burst_stats_display("TX", &fs->tx_burst_stats); 1788af75078fSIntel #endif 1789af75078fSIntel } 1790af75078fSIntel 179153324971SDavid Marchand void 179253324971SDavid Marchand fwd_stats_display(void) 179353324971SDavid Marchand { 179453324971SDavid Marchand static const char *fwd_stats_border = "----------------------"; 179553324971SDavid Marchand static const char *acc_stats_border = "+++++++++++++++"; 179653324971SDavid Marchand struct { 179753324971SDavid Marchand struct fwd_stream *rx_stream; 179853324971SDavid Marchand struct fwd_stream *tx_stream; 179953324971SDavid Marchand uint64_t tx_dropped; 180053324971SDavid Marchand uint64_t rx_bad_ip_csum; 180153324971SDavid Marchand uint64_t rx_bad_l4_csum; 180253324971SDavid Marchand uint64_t rx_bad_outer_l4_csum; 180353324971SDavid Marchand } ports_stats[RTE_MAX_ETHPORTS]; 180453324971SDavid Marchand uint64_t total_rx_dropped = 0; 180553324971SDavid Marchand uint64_t total_tx_dropped = 0; 180653324971SDavid Marchand uint64_t total_rx_nombuf = 0; 180753324971SDavid Marchand struct rte_eth_stats stats; 180853324971SDavid Marchand uint64_t fwd_cycles = 0; 180953324971SDavid Marchand uint64_t total_recv = 0; 181053324971SDavid Marchand uint64_t total_xmit = 0; 181153324971SDavid Marchand struct rte_port *port; 181253324971SDavid Marchand streamid_t sm_id; 181353324971SDavid Marchand portid_t pt_id; 181453324971SDavid Marchand int i; 181553324971SDavid Marchand 181653324971SDavid Marchand memset(ports_stats, 0, sizeof(ports_stats)); 181753324971SDavid Marchand 181853324971SDavid Marchand for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) { 181953324971SDavid Marchand struct fwd_stream *fs = fwd_streams[sm_id]; 182053324971SDavid Marchand 182153324971SDavid Marchand if (cur_fwd_config.nb_fwd_streams > 182253324971SDavid Marchand cur_fwd_config.nb_fwd_ports) { 182353324971SDavid Marchand fwd_stream_stats_display(sm_id); 182453324971SDavid Marchand } else { 182553324971SDavid Marchand ports_stats[fs->tx_port].tx_stream = fs; 182653324971SDavid Marchand ports_stats[fs->rx_port].rx_stream = fs; 182753324971SDavid Marchand } 182853324971SDavid Marchand 182953324971SDavid Marchand ports_stats[fs->tx_port].tx_dropped += fs->fwd_dropped; 183053324971SDavid Marchand 183153324971SDavid Marchand ports_stats[fs->rx_port].rx_bad_ip_csum += fs->rx_bad_ip_csum; 183253324971SDavid Marchand ports_stats[fs->rx_port].rx_bad_l4_csum += fs->rx_bad_l4_csum; 183353324971SDavid Marchand ports_stats[fs->rx_port].rx_bad_outer_l4_csum += 183453324971SDavid Marchand fs->rx_bad_outer_l4_csum; 183553324971SDavid Marchand 1836*bc700b67SDharmik Thakkar if (record_core_cycles) 183753324971SDavid Marchand fwd_cycles += fs->core_cycles; 183853324971SDavid Marchand } 183953324971SDavid Marchand for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) { 184053324971SDavid Marchand uint8_t j; 184153324971SDavid Marchand 184253324971SDavid Marchand pt_id = fwd_ports_ids[i]; 184353324971SDavid Marchand port = &ports[pt_id]; 184453324971SDavid Marchand 184553324971SDavid Marchand rte_eth_stats_get(pt_id, &stats); 184653324971SDavid Marchand stats.ipackets -= port->stats.ipackets; 184753324971SDavid Marchand stats.opackets -= port->stats.opackets; 184853324971SDavid Marchand stats.ibytes -= port->stats.ibytes; 184953324971SDavid Marchand stats.obytes -= port->stats.obytes; 185053324971SDavid Marchand stats.imissed -= port->stats.imissed; 185153324971SDavid Marchand stats.oerrors -= port->stats.oerrors; 185253324971SDavid Marchand stats.rx_nombuf -= port->stats.rx_nombuf; 185353324971SDavid Marchand 185453324971SDavid Marchand total_recv += stats.ipackets; 185553324971SDavid Marchand total_xmit += stats.opackets; 185653324971SDavid Marchand total_rx_dropped += stats.imissed; 185753324971SDavid Marchand total_tx_dropped += ports_stats[pt_id].tx_dropped; 185853324971SDavid Marchand total_tx_dropped += stats.oerrors; 185953324971SDavid Marchand total_rx_nombuf += stats.rx_nombuf; 186053324971SDavid Marchand 186153324971SDavid Marchand printf("\n %s Forward statistics for port %-2d %s\n", 186253324971SDavid Marchand fwd_stats_border, pt_id, fwd_stats_border); 186353324971SDavid Marchand 186453324971SDavid Marchand if (!port->rx_queue_stats_mapping_enabled && 186553324971SDavid Marchand !port->tx_queue_stats_mapping_enabled) { 186653324971SDavid Marchand printf(" RX-packets: %-14"PRIu64 186753324971SDavid Marchand " RX-dropped: %-14"PRIu64 186853324971SDavid Marchand "RX-total: %-"PRIu64"\n", 186953324971SDavid Marchand stats.ipackets, stats.imissed, 187053324971SDavid Marchand stats.ipackets + stats.imissed); 187153324971SDavid Marchand 187253324971SDavid Marchand if (cur_fwd_eng == &csum_fwd_engine) 187353324971SDavid Marchand printf(" Bad-ipcsum: %-14"PRIu64 187453324971SDavid Marchand " Bad-l4csum: %-14"PRIu64 187553324971SDavid Marchand "Bad-outer-l4csum: %-14"PRIu64"\n", 187653324971SDavid Marchand ports_stats[pt_id].rx_bad_ip_csum, 187753324971SDavid Marchand ports_stats[pt_id].rx_bad_l4_csum, 187853324971SDavid Marchand ports_stats[pt_id].rx_bad_outer_l4_csum); 187953324971SDavid Marchand if (stats.ierrors + stats.rx_nombuf > 0) { 188053324971SDavid Marchand printf(" RX-error: %-"PRIu64"\n", 188153324971SDavid Marchand stats.ierrors); 188253324971SDavid Marchand printf(" RX-nombufs: %-14"PRIu64"\n", 188353324971SDavid Marchand stats.rx_nombuf); 188453324971SDavid Marchand } 188553324971SDavid Marchand 188653324971SDavid Marchand printf(" TX-packets: %-14"PRIu64 188753324971SDavid Marchand " TX-dropped: %-14"PRIu64 188853324971SDavid Marchand "TX-total: %-"PRIu64"\n", 188953324971SDavid Marchand stats.opackets, ports_stats[pt_id].tx_dropped, 189053324971SDavid Marchand stats.opackets + ports_stats[pt_id].tx_dropped); 189153324971SDavid Marchand } else { 189253324971SDavid Marchand printf(" RX-packets: %14"PRIu64 189353324971SDavid Marchand " RX-dropped:%14"PRIu64 189453324971SDavid Marchand " RX-total:%14"PRIu64"\n", 189553324971SDavid Marchand stats.ipackets, stats.imissed, 189653324971SDavid Marchand stats.ipackets + stats.imissed); 189753324971SDavid Marchand 189853324971SDavid Marchand if (cur_fwd_eng == &csum_fwd_engine) 189953324971SDavid Marchand printf(" Bad-ipcsum:%14"PRIu64 190053324971SDavid Marchand " Bad-l4csum:%14"PRIu64 190153324971SDavid Marchand " Bad-outer-l4csum: %-14"PRIu64"\n", 190253324971SDavid Marchand ports_stats[pt_id].rx_bad_ip_csum, 190353324971SDavid Marchand ports_stats[pt_id].rx_bad_l4_csum, 190453324971SDavid Marchand ports_stats[pt_id].rx_bad_outer_l4_csum); 190553324971SDavid Marchand if ((stats.ierrors + stats.rx_nombuf) > 0) { 190653324971SDavid Marchand printf(" RX-error:%"PRIu64"\n", stats.ierrors); 190753324971SDavid Marchand printf(" RX-nombufs: %14"PRIu64"\n", 190853324971SDavid Marchand stats.rx_nombuf); 190953324971SDavid Marchand } 191053324971SDavid Marchand 191153324971SDavid Marchand printf(" TX-packets: %14"PRIu64 191253324971SDavid Marchand " TX-dropped:%14"PRIu64 191353324971SDavid Marchand " TX-total:%14"PRIu64"\n", 191453324971SDavid Marchand stats.opackets, ports_stats[pt_id].tx_dropped, 191553324971SDavid Marchand stats.opackets + ports_stats[pt_id].tx_dropped); 191653324971SDavid Marchand } 191753324971SDavid Marchand 191853324971SDavid Marchand #ifdef RTE_TEST_PMD_RECORD_BURST_STATS 191953324971SDavid Marchand if (ports_stats[pt_id].rx_stream) 192053324971SDavid Marchand pkt_burst_stats_display("RX", 192153324971SDavid Marchand &ports_stats[pt_id].rx_stream->rx_burst_stats); 192253324971SDavid Marchand if (ports_stats[pt_id].tx_stream) 192353324971SDavid Marchand pkt_burst_stats_display("TX", 192453324971SDavid Marchand &ports_stats[pt_id].tx_stream->tx_burst_stats); 192553324971SDavid Marchand #endif 192653324971SDavid Marchand 192753324971SDavid Marchand if (port->rx_queue_stats_mapping_enabled) { 192853324971SDavid Marchand printf("\n"); 192953324971SDavid Marchand for (j = 0; j < RTE_ETHDEV_QUEUE_STAT_CNTRS; j++) { 193053324971SDavid Marchand printf(" Stats reg %2d RX-packets:%14"PRIu64 193153324971SDavid Marchand " RX-errors:%14"PRIu64 193253324971SDavid Marchand " RX-bytes:%14"PRIu64"\n", 193353324971SDavid Marchand j, stats.q_ipackets[j], 193453324971SDavid Marchand stats.q_errors[j], stats.q_ibytes[j]); 193553324971SDavid Marchand } 193653324971SDavid Marchand printf("\n"); 193753324971SDavid Marchand } 193853324971SDavid Marchand if (port->tx_queue_stats_mapping_enabled) { 193953324971SDavid Marchand for (j = 0; j < RTE_ETHDEV_QUEUE_STAT_CNTRS; j++) { 194053324971SDavid Marchand printf(" Stats reg %2d TX-packets:%14"PRIu64 194153324971SDavid Marchand " TX-bytes:%14" 194253324971SDavid Marchand PRIu64"\n", 194353324971SDavid Marchand j, stats.q_opackets[j], 194453324971SDavid Marchand stats.q_obytes[j]); 194553324971SDavid Marchand } 194653324971SDavid Marchand } 194753324971SDavid Marchand 194853324971SDavid Marchand printf(" %s--------------------------------%s\n", 194953324971SDavid Marchand fwd_stats_border, fwd_stats_border); 195053324971SDavid Marchand } 195153324971SDavid Marchand 195253324971SDavid Marchand printf("\n %s Accumulated forward statistics for all ports" 195353324971SDavid Marchand "%s\n", 195453324971SDavid Marchand acc_stats_border, acc_stats_border); 195553324971SDavid Marchand printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: " 195653324971SDavid Marchand "%-"PRIu64"\n" 195753324971SDavid Marchand " TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: " 195853324971SDavid Marchand "%-"PRIu64"\n", 195953324971SDavid Marchand total_recv, total_rx_dropped, total_recv + total_rx_dropped, 196053324971SDavid Marchand total_xmit, total_tx_dropped, total_xmit + total_tx_dropped); 196153324971SDavid Marchand if (total_rx_nombuf > 0) 196253324971SDavid Marchand printf(" RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf); 196353324971SDavid Marchand printf(" %s++++++++++++++++++++++++++++++++++++++++++++++" 196453324971SDavid Marchand "%s\n", 196553324971SDavid Marchand acc_stats_border, acc_stats_border); 1966*bc700b67SDharmik Thakkar if (record_core_cycles) { 19674c0497b1SDharmik Thakkar #define CYC_PER_MHZ 1E6 19683a164e00SPhil Yang if (total_recv > 0 || total_xmit > 0) { 19693a164e00SPhil Yang uint64_t total_pkts = 0; 19703a164e00SPhil Yang if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 || 19713a164e00SPhil Yang strcmp(cur_fwd_eng->fwd_mode_name, "flowgen") == 0) 19723a164e00SPhil Yang total_pkts = total_xmit; 19733a164e00SPhil Yang else 19743a164e00SPhil Yang total_pkts = total_recv; 19753a164e00SPhil Yang 19761920832aSDharmik Thakkar printf("\n CPU cycles/packet=%.2F (total cycles=" 19773a164e00SPhil Yang "%"PRIu64" / total %s packets=%"PRIu64") at %"PRIu64 19784c0497b1SDharmik Thakkar " MHz Clock\n", 19793a164e00SPhil Yang (double) fwd_cycles / total_pkts, 19803a164e00SPhil Yang fwd_cycles, cur_fwd_eng->fwd_mode_name, total_pkts, 19814c0497b1SDharmik Thakkar (uint64_t)(rte_get_tsc_hz() / CYC_PER_MHZ)); 19823a164e00SPhil Yang } 1983*bc700b67SDharmik Thakkar } 198453324971SDavid Marchand } 198553324971SDavid Marchand 198653324971SDavid Marchand void 198753324971SDavid Marchand fwd_stats_reset(void) 198853324971SDavid Marchand { 198953324971SDavid Marchand streamid_t sm_id; 199053324971SDavid Marchand portid_t pt_id; 199153324971SDavid Marchand int i; 199253324971SDavid Marchand 199353324971SDavid Marchand for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) { 199453324971SDavid Marchand pt_id = fwd_ports_ids[i]; 199553324971SDavid Marchand rte_eth_stats_get(pt_id, &ports[pt_id].stats); 199653324971SDavid Marchand } 199753324971SDavid Marchand for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) { 199853324971SDavid Marchand struct fwd_stream *fs = fwd_streams[sm_id]; 199953324971SDavid Marchand 200053324971SDavid Marchand fs->rx_packets = 0; 200153324971SDavid Marchand fs->tx_packets = 0; 200253324971SDavid Marchand fs->fwd_dropped = 0; 200353324971SDavid Marchand fs->rx_bad_ip_csum = 0; 200453324971SDavid Marchand fs->rx_bad_l4_csum = 0; 200553324971SDavid Marchand fs->rx_bad_outer_l4_csum = 0; 200653324971SDavid Marchand 200753324971SDavid Marchand #ifdef RTE_TEST_PMD_RECORD_BURST_STATS 200853324971SDavid Marchand memset(&fs->rx_burst_stats, 0, sizeof(fs->rx_burst_stats)); 200953324971SDavid Marchand memset(&fs->tx_burst_stats, 0, sizeof(fs->tx_burst_stats)); 201053324971SDavid Marchand #endif 201153324971SDavid Marchand fs->core_cycles = 0; 201253324971SDavid Marchand } 201353324971SDavid Marchand } 201453324971SDavid Marchand 2015af75078fSIntel static void 20167741e4cfSIntel flush_fwd_rx_queues(void) 2017af75078fSIntel { 2018af75078fSIntel struct rte_mbuf *pkts_burst[MAX_PKT_BURST]; 2019af75078fSIntel portid_t rxp; 20207741e4cfSIntel portid_t port_id; 2021af75078fSIntel queueid_t rxq; 2022af75078fSIntel uint16_t nb_rx; 2023af75078fSIntel uint16_t i; 2024af75078fSIntel uint8_t j; 2025f487715fSReshma Pattan uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0; 2026594302c7SJames Poole uint64_t timer_period; 2027f487715fSReshma Pattan 2028f487715fSReshma Pattan /* convert to number of cycles */ 2029594302c7SJames Poole timer_period = rte_get_timer_hz(); /* 1 second timeout */ 2030af75078fSIntel 2031af75078fSIntel for (j = 0; j < 2; j++) { 20327741e4cfSIntel for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) { 2033af75078fSIntel for (rxq = 0; rxq < nb_rxq; rxq++) { 20347741e4cfSIntel port_id = fwd_ports_ids[rxp]; 2035f487715fSReshma Pattan /** 2036f487715fSReshma Pattan * testpmd can stuck in the below do while loop 2037f487715fSReshma Pattan * if rte_eth_rx_burst() always returns nonzero 2038f487715fSReshma Pattan * packets. So timer is added to exit this loop 2039f487715fSReshma Pattan * after 1sec timer expiry. 2040f487715fSReshma Pattan */ 2041f487715fSReshma Pattan prev_tsc = rte_rdtsc(); 2042af75078fSIntel do { 20437741e4cfSIntel nb_rx = rte_eth_rx_burst(port_id, rxq, 2044013af9b6SIntel pkts_burst, MAX_PKT_BURST); 2045af75078fSIntel for (i = 0; i < nb_rx; i++) 2046af75078fSIntel rte_pktmbuf_free(pkts_burst[i]); 2047f487715fSReshma Pattan 2048f487715fSReshma Pattan cur_tsc = rte_rdtsc(); 2049f487715fSReshma Pattan diff_tsc = cur_tsc - prev_tsc; 2050f487715fSReshma Pattan timer_tsc += diff_tsc; 2051f487715fSReshma Pattan } while ((nb_rx > 0) && 2052f487715fSReshma Pattan (timer_tsc < timer_period)); 2053f487715fSReshma Pattan timer_tsc = 0; 2054af75078fSIntel } 2055af75078fSIntel } 2056af75078fSIntel rte_delay_ms(10); /* wait 10 milli-seconds before retrying */ 2057af75078fSIntel } 2058af75078fSIntel } 2059af75078fSIntel 2060af75078fSIntel static void 2061af75078fSIntel run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd) 2062af75078fSIntel { 2063af75078fSIntel struct fwd_stream **fsm; 2064af75078fSIntel streamid_t nb_fs; 2065af75078fSIntel streamid_t sm_id; 20667e4441c8SRemy Horton #ifdef RTE_LIBRTE_BITRATE 20677e4441c8SRemy Horton uint64_t tics_per_1sec; 20687e4441c8SRemy Horton uint64_t tics_datum; 20697e4441c8SRemy Horton uint64_t tics_current; 20704918a357SXiaoyun Li uint16_t i, cnt_ports; 2071af75078fSIntel 20724918a357SXiaoyun Li cnt_ports = nb_ports; 20737e4441c8SRemy Horton tics_datum = rte_rdtsc(); 20747e4441c8SRemy Horton tics_per_1sec = rte_get_timer_hz(); 20757e4441c8SRemy Horton #endif 2076af75078fSIntel fsm = &fwd_streams[fc->stream_idx]; 2077af75078fSIntel nb_fs = fc->stream_nb; 2078af75078fSIntel do { 2079af75078fSIntel for (sm_id = 0; sm_id < nb_fs; sm_id++) 2080af75078fSIntel (*pkt_fwd)(fsm[sm_id]); 20817e4441c8SRemy Horton #ifdef RTE_LIBRTE_BITRATE 2082e25e6c70SRemy Horton if (bitrate_enabled != 0 && 2083e25e6c70SRemy Horton bitrate_lcore_id == rte_lcore_id()) { 20847e4441c8SRemy Horton tics_current = rte_rdtsc(); 20857e4441c8SRemy Horton if (tics_current - tics_datum >= tics_per_1sec) { 20867e4441c8SRemy Horton /* Periodic bitrate calculation */ 20874918a357SXiaoyun Li for (i = 0; i < cnt_ports; i++) 2088e25e6c70SRemy Horton rte_stats_bitrate_calc(bitrate_data, 20894918a357SXiaoyun Li ports_ids[i]); 20907e4441c8SRemy Horton tics_datum = tics_current; 20917e4441c8SRemy Horton } 2092e25e6c70SRemy Horton } 20937e4441c8SRemy Horton #endif 209462d3216dSReshma Pattan #ifdef RTE_LIBRTE_LATENCY_STATS 209565eb1e54SPablo de Lara if (latencystats_enabled != 0 && 209665eb1e54SPablo de Lara latencystats_lcore_id == rte_lcore_id()) 209762d3216dSReshma Pattan rte_latencystats_update(); 209862d3216dSReshma Pattan #endif 209962d3216dSReshma Pattan 2100af75078fSIntel } while (! fc->stopped); 2101af75078fSIntel } 2102af75078fSIntel 2103af75078fSIntel static int 2104af75078fSIntel start_pkt_forward_on_core(void *fwd_arg) 2105af75078fSIntel { 2106af75078fSIntel run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg, 2107af75078fSIntel cur_fwd_config.fwd_eng->packet_fwd); 2108af75078fSIntel return 0; 2109af75078fSIntel } 2110af75078fSIntel 2111af75078fSIntel /* 2112af75078fSIntel * Run the TXONLY packet forwarding engine to send a single burst of packets. 2113af75078fSIntel * Used to start communication flows in network loopback test configurations. 2114af75078fSIntel */ 2115af75078fSIntel static int 2116af75078fSIntel run_one_txonly_burst_on_core(void *fwd_arg) 2117af75078fSIntel { 2118af75078fSIntel struct fwd_lcore *fwd_lc; 2119af75078fSIntel struct fwd_lcore tmp_lcore; 2120af75078fSIntel 2121af75078fSIntel fwd_lc = (struct fwd_lcore *) fwd_arg; 2122af75078fSIntel tmp_lcore = *fwd_lc; 2123af75078fSIntel tmp_lcore.stopped = 1; 2124af75078fSIntel run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd); 2125af75078fSIntel return 0; 2126af75078fSIntel } 2127af75078fSIntel 2128af75078fSIntel /* 2129af75078fSIntel * Launch packet forwarding: 2130af75078fSIntel * - Setup per-port forwarding context. 2131af75078fSIntel * - launch logical cores with their forwarding configuration. 2132af75078fSIntel */ 2133af75078fSIntel static void 2134af75078fSIntel launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore) 2135af75078fSIntel { 2136af75078fSIntel port_fwd_begin_t port_fwd_begin; 2137af75078fSIntel unsigned int i; 2138af75078fSIntel unsigned int lc_id; 2139af75078fSIntel int diag; 2140af75078fSIntel 2141af75078fSIntel port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin; 2142af75078fSIntel if (port_fwd_begin != NULL) { 2143af75078fSIntel for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) 2144af75078fSIntel (*port_fwd_begin)(fwd_ports_ids[i]); 2145af75078fSIntel } 2146af75078fSIntel for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) { 2147af75078fSIntel lc_id = fwd_lcores_cpuids[i]; 2148af75078fSIntel if ((interactive == 0) || (lc_id != rte_lcore_id())) { 2149af75078fSIntel fwd_lcores[i]->stopped = 0; 2150af75078fSIntel diag = rte_eal_remote_launch(pkt_fwd_on_lcore, 2151af75078fSIntel fwd_lcores[i], lc_id); 2152af75078fSIntel if (diag != 0) 2153af75078fSIntel printf("launch lcore %u failed - diag=%d\n", 2154af75078fSIntel lc_id, diag); 2155af75078fSIntel } 2156af75078fSIntel } 2157af75078fSIntel } 2158af75078fSIntel 2159af75078fSIntel /* 2160af75078fSIntel * Launch packet forwarding configuration. 2161af75078fSIntel */ 2162af75078fSIntel void 2163af75078fSIntel start_packet_forwarding(int with_tx_first) 2164af75078fSIntel { 2165af75078fSIntel port_fwd_begin_t port_fwd_begin; 2166af75078fSIntel port_fwd_end_t port_fwd_end; 2167af75078fSIntel struct rte_port *port; 2168af75078fSIntel unsigned int i; 2169af75078fSIntel portid_t pt_id; 2170af75078fSIntel 21715a8fb55cSReshma Pattan if (strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") == 0 && !nb_rxq) 21725a8fb55cSReshma Pattan rte_exit(EXIT_FAILURE, "rxq are 0, cannot use rxonly fwd mode\n"); 21735a8fb55cSReshma Pattan 21745a8fb55cSReshma Pattan if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 && !nb_txq) 21755a8fb55cSReshma Pattan rte_exit(EXIT_FAILURE, "txq are 0, cannot use txonly fwd mode\n"); 21765a8fb55cSReshma Pattan 21775a8fb55cSReshma Pattan if ((strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") != 0 && 21785a8fb55cSReshma Pattan strcmp(cur_fwd_eng->fwd_mode_name, "txonly") != 0) && 21795a8fb55cSReshma Pattan (!nb_rxq || !nb_txq)) 21805a8fb55cSReshma Pattan rte_exit(EXIT_FAILURE, 21815a8fb55cSReshma Pattan "Either rxq or txq are 0, cannot use %s fwd mode\n", 21825a8fb55cSReshma Pattan cur_fwd_eng->fwd_mode_name); 21835a8fb55cSReshma Pattan 2184ce8d5614SIntel if (all_ports_started() == 0) { 2185ce8d5614SIntel printf("Not all ports were started\n"); 2186ce8d5614SIntel return; 2187ce8d5614SIntel } 2188af75078fSIntel if (test_done == 0) { 2189af75078fSIntel printf("Packet forwarding already started\n"); 2190af75078fSIntel return; 2191af75078fSIntel } 2192edf87b4aSBernard Iremonger 2193edf87b4aSBernard Iremonger 21947741e4cfSIntel if(dcb_test) { 21957741e4cfSIntel for (i = 0; i < nb_fwd_ports; i++) { 21967741e4cfSIntel pt_id = fwd_ports_ids[i]; 21977741e4cfSIntel port = &ports[pt_id]; 21987741e4cfSIntel if (!port->dcb_flag) { 21997741e4cfSIntel printf("In DCB mode, all forwarding ports must " 22007741e4cfSIntel "be configured in this mode.\n"); 2201013af9b6SIntel return; 2202013af9b6SIntel } 22037741e4cfSIntel } 22047741e4cfSIntel if (nb_fwd_lcores == 1) { 22057741e4cfSIntel printf("In DCB mode,the nb forwarding cores " 22067741e4cfSIntel "should be larger than 1.\n"); 22077741e4cfSIntel return; 22087741e4cfSIntel } 22097741e4cfSIntel } 2210af75078fSIntel test_done = 0; 22117741e4cfSIntel 221247a767b2SMatan Azrad fwd_config_setup(); 221347a767b2SMatan Azrad 22147741e4cfSIntel if(!no_flush_rx) 22157741e4cfSIntel flush_fwd_rx_queues(); 22167741e4cfSIntel 2217933617d8SZhihong Wang pkt_fwd_config_display(&cur_fwd_config); 2218af75078fSIntel rxtx_config_display(); 2219af75078fSIntel 222053324971SDavid Marchand fwd_stats_reset(); 2221af75078fSIntel for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) { 2222af75078fSIntel pt_id = fwd_ports_ids[i]; 2223af75078fSIntel port = &ports[pt_id]; 2224013af9b6SIntel map_port_queue_stats_mapping_registers(pt_id, port); 2225af75078fSIntel } 2226af75078fSIntel if (with_tx_first) { 2227af75078fSIntel port_fwd_begin = tx_only_engine.port_fwd_begin; 2228af75078fSIntel if (port_fwd_begin != NULL) { 2229af75078fSIntel for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) 2230af75078fSIntel (*port_fwd_begin)(fwd_ports_ids[i]); 2231af75078fSIntel } 2232acbf77a6SZhihong Wang while (with_tx_first--) { 2233acbf77a6SZhihong Wang launch_packet_forwarding( 2234acbf77a6SZhihong Wang run_one_txonly_burst_on_core); 2235af75078fSIntel rte_eal_mp_wait_lcore(); 2236acbf77a6SZhihong Wang } 2237af75078fSIntel port_fwd_end = tx_only_engine.port_fwd_end; 2238af75078fSIntel if (port_fwd_end != NULL) { 2239af75078fSIntel for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) 2240af75078fSIntel (*port_fwd_end)(fwd_ports_ids[i]); 2241af75078fSIntel } 2242af75078fSIntel } 2243af75078fSIntel launch_packet_forwarding(start_pkt_forward_on_core); 2244af75078fSIntel } 2245af75078fSIntel 2246af75078fSIntel void 2247af75078fSIntel stop_packet_forwarding(void) 2248af75078fSIntel { 2249af75078fSIntel port_fwd_end_t port_fwd_end; 2250af75078fSIntel lcoreid_t lc_id; 225153324971SDavid Marchand portid_t pt_id; 225253324971SDavid Marchand int i; 2253af75078fSIntel 2254af75078fSIntel if (test_done) { 2255af75078fSIntel printf("Packet forwarding not started\n"); 2256af75078fSIntel return; 2257af75078fSIntel } 2258af75078fSIntel printf("Telling cores to stop..."); 2259af75078fSIntel for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) 2260af75078fSIntel fwd_lcores[lc_id]->stopped = 1; 2261af75078fSIntel printf("\nWaiting for lcores to finish...\n"); 2262af75078fSIntel rte_eal_mp_wait_lcore(); 2263af75078fSIntel port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end; 2264af75078fSIntel if (port_fwd_end != NULL) { 2265af75078fSIntel for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) { 2266af75078fSIntel pt_id = fwd_ports_ids[i]; 2267af75078fSIntel (*port_fwd_end)(pt_id); 2268af75078fSIntel } 2269af75078fSIntel } 2270c185d42cSDavid Marchand 227153324971SDavid Marchand fwd_stats_display(); 227258d475b7SJerin Jacob 2273af75078fSIntel printf("\nDone.\n"); 2274af75078fSIntel test_done = 1; 2275af75078fSIntel } 2276af75078fSIntel 2277cfae07fdSOuyang Changchun void 2278cfae07fdSOuyang Changchun dev_set_link_up(portid_t pid) 2279cfae07fdSOuyang Changchun { 2280492ab604SZhiyong Yang if (rte_eth_dev_set_link_up(pid) < 0) 2281cfae07fdSOuyang Changchun printf("\nSet link up fail.\n"); 2282cfae07fdSOuyang Changchun } 2283cfae07fdSOuyang Changchun 2284cfae07fdSOuyang Changchun void 2285cfae07fdSOuyang Changchun dev_set_link_down(portid_t pid) 2286cfae07fdSOuyang Changchun { 2287492ab604SZhiyong Yang if (rte_eth_dev_set_link_down(pid) < 0) 2288cfae07fdSOuyang Changchun printf("\nSet link down fail.\n"); 2289cfae07fdSOuyang Changchun } 2290cfae07fdSOuyang Changchun 2291ce8d5614SIntel static int 2292ce8d5614SIntel all_ports_started(void) 2293ce8d5614SIntel { 2294ce8d5614SIntel portid_t pi; 2295ce8d5614SIntel struct rte_port *port; 2296ce8d5614SIntel 22977d89b261SGaetan Rivet RTE_ETH_FOREACH_DEV(pi) { 2298ce8d5614SIntel port = &ports[pi]; 2299ce8d5614SIntel /* Check if there is a port which is not started */ 230041b05095SBernard Iremonger if ((port->port_status != RTE_PORT_STARTED) && 230141b05095SBernard Iremonger (port->slave_flag == 0)) 2302ce8d5614SIntel return 0; 2303ce8d5614SIntel } 2304ce8d5614SIntel 2305ce8d5614SIntel /* No port is not started */ 2306ce8d5614SIntel return 1; 2307ce8d5614SIntel } 2308ce8d5614SIntel 2309148f963fSBruce Richardson int 23106018eb8cSShahaf Shuler port_is_stopped(portid_t port_id) 23116018eb8cSShahaf Shuler { 23126018eb8cSShahaf Shuler struct rte_port *port = &ports[port_id]; 23136018eb8cSShahaf Shuler 23146018eb8cSShahaf Shuler if ((port->port_status != RTE_PORT_STOPPED) && 23156018eb8cSShahaf Shuler (port->slave_flag == 0)) 23166018eb8cSShahaf Shuler return 0; 23176018eb8cSShahaf Shuler return 1; 23186018eb8cSShahaf Shuler } 23196018eb8cSShahaf Shuler 23206018eb8cSShahaf Shuler int 2321edab33b1STetsuya Mukawa all_ports_stopped(void) 2322edab33b1STetsuya Mukawa { 2323edab33b1STetsuya Mukawa portid_t pi; 2324edab33b1STetsuya Mukawa 23257d89b261SGaetan Rivet RTE_ETH_FOREACH_DEV(pi) { 23266018eb8cSShahaf Shuler if (!port_is_stopped(pi)) 2327edab33b1STetsuya Mukawa return 0; 2328edab33b1STetsuya Mukawa } 2329edab33b1STetsuya Mukawa 2330edab33b1STetsuya Mukawa return 1; 2331edab33b1STetsuya Mukawa } 2332edab33b1STetsuya Mukawa 2333edab33b1STetsuya Mukawa int 2334edab33b1STetsuya Mukawa port_is_started(portid_t port_id) 2335edab33b1STetsuya Mukawa { 2336edab33b1STetsuya Mukawa if (port_id_is_invalid(port_id, ENABLED_WARN)) 2337edab33b1STetsuya Mukawa return 0; 2338edab33b1STetsuya Mukawa 2339edab33b1STetsuya Mukawa if (ports[port_id].port_status != RTE_PORT_STARTED) 2340edab33b1STetsuya Mukawa return 0; 2341edab33b1STetsuya Mukawa 2342edab33b1STetsuya Mukawa return 1; 2343edab33b1STetsuya Mukawa } 2344edab33b1STetsuya Mukawa 23451c69df45SOri Kam /* Configure the Rx and Tx hairpin queues for the selected port. */ 23461c69df45SOri Kam static int 23471c69df45SOri Kam setup_hairpin_queues(portid_t pi) 23481c69df45SOri Kam { 23491c69df45SOri Kam queueid_t qi; 23501c69df45SOri Kam struct rte_eth_hairpin_conf hairpin_conf = { 23511c69df45SOri Kam .peer_count = 1, 23521c69df45SOri Kam }; 23531c69df45SOri Kam int i; 23541c69df45SOri Kam int diag; 23551c69df45SOri Kam struct rte_port *port = &ports[pi]; 23561c69df45SOri Kam 23571c69df45SOri Kam for (qi = nb_txq, i = 0; qi < nb_hairpinq + nb_txq; qi++) { 23581c69df45SOri Kam hairpin_conf.peers[0].port = pi; 23591c69df45SOri Kam hairpin_conf.peers[0].queue = i + nb_rxq; 23601c69df45SOri Kam diag = rte_eth_tx_hairpin_queue_setup 23611c69df45SOri Kam (pi, qi, nb_txd, &hairpin_conf); 23621c69df45SOri Kam i++; 23631c69df45SOri Kam if (diag == 0) 23641c69df45SOri Kam continue; 23651c69df45SOri Kam 23661c69df45SOri Kam /* Fail to setup rx queue, return */ 23671c69df45SOri Kam if (rte_atomic16_cmpset(&(port->port_status), 23681c69df45SOri Kam RTE_PORT_HANDLING, 23691c69df45SOri Kam RTE_PORT_STOPPED) == 0) 23701c69df45SOri Kam printf("Port %d can not be set back " 23711c69df45SOri Kam "to stopped\n", pi); 23721c69df45SOri Kam printf("Fail to configure port %d hairpin " 23731c69df45SOri Kam "queues\n", pi); 23741c69df45SOri Kam /* try to reconfigure queues next time */ 23751c69df45SOri Kam port->need_reconfig_queues = 1; 23761c69df45SOri Kam return -1; 23771c69df45SOri Kam } 23781c69df45SOri Kam for (qi = nb_rxq, i = 0; qi < nb_hairpinq + nb_rxq; qi++) { 23791c69df45SOri Kam hairpin_conf.peers[0].port = pi; 23801c69df45SOri Kam hairpin_conf.peers[0].queue = i + nb_txq; 23811c69df45SOri Kam diag = rte_eth_rx_hairpin_queue_setup 23821c69df45SOri Kam (pi, qi, nb_rxd, &hairpin_conf); 23831c69df45SOri Kam i++; 23841c69df45SOri Kam if (diag == 0) 23851c69df45SOri Kam continue; 23861c69df45SOri Kam 23871c69df45SOri Kam /* Fail to setup rx queue, return */ 23881c69df45SOri Kam if (rte_atomic16_cmpset(&(port->port_status), 23891c69df45SOri Kam RTE_PORT_HANDLING, 23901c69df45SOri Kam RTE_PORT_STOPPED) == 0) 23911c69df45SOri Kam printf("Port %d can not be set back " 23921c69df45SOri Kam "to stopped\n", pi); 23931c69df45SOri Kam printf("Fail to configure port %d hairpin " 23941c69df45SOri Kam "queues\n", pi); 23951c69df45SOri Kam /* try to reconfigure queues next time */ 23961c69df45SOri Kam port->need_reconfig_queues = 1; 23971c69df45SOri Kam return -1; 23981c69df45SOri Kam } 23991c69df45SOri Kam return 0; 24001c69df45SOri Kam } 24011c69df45SOri Kam 2402edab33b1STetsuya Mukawa int 2403ce8d5614SIntel start_port(portid_t pid) 2404ce8d5614SIntel { 240592d2703eSMichael Qiu int diag, need_check_link_status = -1; 2406ce8d5614SIntel portid_t pi; 2407ce8d5614SIntel queueid_t qi; 2408ce8d5614SIntel struct rte_port *port; 24096d13ea8eSOlivier Matz struct rte_ether_addr mac_addr; 24101c69df45SOri Kam struct rte_eth_hairpin_cap cap; 2411ce8d5614SIntel 24124468635fSMichael Qiu if (port_id_is_invalid(pid, ENABLED_WARN)) 24134468635fSMichael Qiu return 0; 24144468635fSMichael Qiu 2415ce8d5614SIntel if(dcb_config) 2416ce8d5614SIntel dcb_test = 1; 24177d89b261SGaetan Rivet RTE_ETH_FOREACH_DEV(pi) { 2418edab33b1STetsuya Mukawa if (pid != pi && pid != (portid_t)RTE_PORT_ALL) 2419ce8d5614SIntel continue; 2420ce8d5614SIntel 242192d2703eSMichael Qiu need_check_link_status = 0; 2422ce8d5614SIntel port = &ports[pi]; 2423ce8d5614SIntel if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED, 2424ce8d5614SIntel RTE_PORT_HANDLING) == 0) { 2425ce8d5614SIntel printf("Port %d is now not stopped\n", pi); 2426ce8d5614SIntel continue; 2427ce8d5614SIntel } 2428ce8d5614SIntel 2429ce8d5614SIntel if (port->need_reconfig > 0) { 2430ce8d5614SIntel port->need_reconfig = 0; 2431ce8d5614SIntel 24327ee3e944SVasily Philipov if (flow_isolate_all) { 24337ee3e944SVasily Philipov int ret = port_flow_isolate(pi, 1); 24347ee3e944SVasily Philipov if (ret) { 24357ee3e944SVasily Philipov printf("Failed to apply isolated" 24367ee3e944SVasily Philipov " mode on port %d\n", pi); 24377ee3e944SVasily Philipov return -1; 24387ee3e944SVasily Philipov } 24397ee3e944SVasily Philipov } 2440b5b38ed8SRaslan Darawsheh configure_rxtx_dump_callbacks(0); 24415706de65SJulien Cretin printf("Configuring Port %d (socket %u)\n", pi, 244220a0286fSLiu Xiaofeng port->socket_id); 24431c69df45SOri Kam if (nb_hairpinq > 0 && 24441c69df45SOri Kam rte_eth_dev_hairpin_capability_get(pi, &cap)) { 24451c69df45SOri Kam printf("Port %d doesn't support hairpin " 24461c69df45SOri Kam "queues\n", pi); 24471c69df45SOri Kam return -1; 24481c69df45SOri Kam } 2449ce8d5614SIntel /* configure port */ 24501c69df45SOri Kam diag = rte_eth_dev_configure(pi, nb_rxq + nb_hairpinq, 24511c69df45SOri Kam nb_txq + nb_hairpinq, 2452ce8d5614SIntel &(port->dev_conf)); 2453ce8d5614SIntel if (diag != 0) { 2454ce8d5614SIntel if (rte_atomic16_cmpset(&(port->port_status), 2455ce8d5614SIntel RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0) 2456ce8d5614SIntel printf("Port %d can not be set back " 2457ce8d5614SIntel "to stopped\n", pi); 2458ce8d5614SIntel printf("Fail to configure port %d\n", pi); 2459ce8d5614SIntel /* try to reconfigure port next time */ 2460ce8d5614SIntel port->need_reconfig = 1; 2461148f963fSBruce Richardson return -1; 2462ce8d5614SIntel } 2463ce8d5614SIntel } 2464ce8d5614SIntel if (port->need_reconfig_queues > 0) { 2465ce8d5614SIntel port->need_reconfig_queues = 0; 2466ce8d5614SIntel /* setup tx queues */ 2467ce8d5614SIntel for (qi = 0; qi < nb_txq; qi++) { 2468b6ea6408SIntel if ((numa_support) && 2469b6ea6408SIntel (txring_numa[pi] != NUMA_NO_CONFIG)) 2470b6ea6408SIntel diag = rte_eth_tx_queue_setup(pi, qi, 2471d44f8a48SQi Zhang port->nb_tx_desc[qi], 2472d44f8a48SQi Zhang txring_numa[pi], 2473d44f8a48SQi Zhang &(port->tx_conf[qi])); 2474b6ea6408SIntel else 2475b6ea6408SIntel diag = rte_eth_tx_queue_setup(pi, qi, 2476d44f8a48SQi Zhang port->nb_tx_desc[qi], 2477d44f8a48SQi Zhang port->socket_id, 2478d44f8a48SQi Zhang &(port->tx_conf[qi])); 2479b6ea6408SIntel 2480ce8d5614SIntel if (diag == 0) 2481ce8d5614SIntel continue; 2482ce8d5614SIntel 2483ce8d5614SIntel /* Fail to setup tx queue, return */ 2484ce8d5614SIntel if (rte_atomic16_cmpset(&(port->port_status), 2485ce8d5614SIntel RTE_PORT_HANDLING, 2486ce8d5614SIntel RTE_PORT_STOPPED) == 0) 2487ce8d5614SIntel printf("Port %d can not be set back " 2488ce8d5614SIntel "to stopped\n", pi); 2489d44f8a48SQi Zhang printf("Fail to configure port %d tx queues\n", 2490d44f8a48SQi Zhang pi); 2491ce8d5614SIntel /* try to reconfigure queues next time */ 2492ce8d5614SIntel port->need_reconfig_queues = 1; 2493148f963fSBruce Richardson return -1; 2494ce8d5614SIntel } 2495ce8d5614SIntel for (qi = 0; qi < nb_rxq; qi++) { 2496d44f8a48SQi Zhang /* setup rx queues */ 2497b6ea6408SIntel if ((numa_support) && 2498b6ea6408SIntel (rxring_numa[pi] != NUMA_NO_CONFIG)) { 2499b6ea6408SIntel struct rte_mempool * mp = 2500b6ea6408SIntel mbuf_pool_find(rxring_numa[pi]); 2501b6ea6408SIntel if (mp == NULL) { 2502b6ea6408SIntel printf("Failed to setup RX queue:" 2503b6ea6408SIntel "No mempool allocation" 2504b6ea6408SIntel " on the socket %d\n", 2505b6ea6408SIntel rxring_numa[pi]); 2506148f963fSBruce Richardson return -1; 2507b6ea6408SIntel } 2508b6ea6408SIntel 2509b6ea6408SIntel diag = rte_eth_rx_queue_setup(pi, qi, 2510d4930794SFerruh Yigit port->nb_rx_desc[qi], 2511d44f8a48SQi Zhang rxring_numa[pi], 2512d44f8a48SQi Zhang &(port->rx_conf[qi]), 2513d44f8a48SQi Zhang mp); 25141e1d6bddSBernard Iremonger } else { 25151e1d6bddSBernard Iremonger struct rte_mempool *mp = 25161e1d6bddSBernard Iremonger mbuf_pool_find(port->socket_id); 25171e1d6bddSBernard Iremonger if (mp == NULL) { 25181e1d6bddSBernard Iremonger printf("Failed to setup RX queue:" 25191e1d6bddSBernard Iremonger "No mempool allocation" 25201e1d6bddSBernard Iremonger " on the socket %d\n", 25211e1d6bddSBernard Iremonger port->socket_id); 25221e1d6bddSBernard Iremonger return -1; 2523b6ea6408SIntel } 2524b6ea6408SIntel diag = rte_eth_rx_queue_setup(pi, qi, 2525d4930794SFerruh Yigit port->nb_rx_desc[qi], 2526d44f8a48SQi Zhang port->socket_id, 2527d44f8a48SQi Zhang &(port->rx_conf[qi]), 2528d44f8a48SQi Zhang mp); 25291e1d6bddSBernard Iremonger } 2530ce8d5614SIntel if (diag == 0) 2531ce8d5614SIntel continue; 2532ce8d5614SIntel 2533ce8d5614SIntel /* Fail to setup rx queue, return */ 2534ce8d5614SIntel if (rte_atomic16_cmpset(&(port->port_status), 2535ce8d5614SIntel RTE_PORT_HANDLING, 2536ce8d5614SIntel RTE_PORT_STOPPED) == 0) 2537ce8d5614SIntel printf("Port %d can not be set back " 2538ce8d5614SIntel "to stopped\n", pi); 2539d44f8a48SQi Zhang printf("Fail to configure port %d rx queues\n", 2540d44f8a48SQi Zhang pi); 2541ce8d5614SIntel /* try to reconfigure queues next time */ 2542ce8d5614SIntel port->need_reconfig_queues = 1; 2543148f963fSBruce Richardson return -1; 2544ce8d5614SIntel } 25451c69df45SOri Kam /* setup hairpin queues */ 25461c69df45SOri Kam if (setup_hairpin_queues(pi) != 0) 25471c69df45SOri Kam return -1; 2548ce8d5614SIntel } 2549b5b38ed8SRaslan Darawsheh configure_rxtx_dump_callbacks(verbose_level); 2550b0a9354aSPavan Nikhilesh if (clear_ptypes) { 2551b0a9354aSPavan Nikhilesh diag = rte_eth_dev_set_ptypes(pi, RTE_PTYPE_UNKNOWN, 2552b0a9354aSPavan Nikhilesh NULL, 0); 2553b0a9354aSPavan Nikhilesh if (diag < 0) 2554b0a9354aSPavan Nikhilesh printf( 2555b0a9354aSPavan Nikhilesh "Port %d: Failed to disable Ptype parsing\n", 2556b0a9354aSPavan Nikhilesh pi); 2557b0a9354aSPavan Nikhilesh } 2558b0a9354aSPavan Nikhilesh 2559ce8d5614SIntel /* start port */ 2560ce8d5614SIntel if (rte_eth_dev_start(pi) < 0) { 2561ce8d5614SIntel printf("Fail to start port %d\n", pi); 2562ce8d5614SIntel 2563ce8d5614SIntel /* Fail to setup rx queue, return */ 2564ce8d5614SIntel if (rte_atomic16_cmpset(&(port->port_status), 2565ce8d5614SIntel RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0) 2566ce8d5614SIntel printf("Port %d can not be set back to " 2567ce8d5614SIntel "stopped\n", pi); 2568ce8d5614SIntel continue; 2569ce8d5614SIntel } 2570ce8d5614SIntel 2571ce8d5614SIntel if (rte_atomic16_cmpset(&(port->port_status), 2572ce8d5614SIntel RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0) 2573ce8d5614SIntel printf("Port %d can not be set into started\n", pi); 2574ce8d5614SIntel 2575a5279d25SIgor Romanov if (eth_macaddr_get_print_err(pi, &mac_addr) == 0) 2576d8c89163SZijie Pan printf("Port %d: %02X:%02X:%02X:%02X:%02X:%02X\n", pi, 25772950a769SDeclan Doherty mac_addr.addr_bytes[0], mac_addr.addr_bytes[1], 25782950a769SDeclan Doherty mac_addr.addr_bytes[2], mac_addr.addr_bytes[3], 25792950a769SDeclan Doherty mac_addr.addr_bytes[4], mac_addr.addr_bytes[5]); 2580d8c89163SZijie Pan 2581ce8d5614SIntel /* at least one port started, need checking link status */ 2582ce8d5614SIntel need_check_link_status = 1; 2583ce8d5614SIntel } 2584ce8d5614SIntel 258592d2703eSMichael Qiu if (need_check_link_status == 1 && !no_link_check) 2586edab33b1STetsuya Mukawa check_all_ports_link_status(RTE_PORT_ALL); 258792d2703eSMichael Qiu else if (need_check_link_status == 0) 2588ce8d5614SIntel printf("Please stop the ports first\n"); 2589ce8d5614SIntel 2590ce8d5614SIntel printf("Done\n"); 2591148f963fSBruce Richardson return 0; 2592ce8d5614SIntel } 2593ce8d5614SIntel 2594ce8d5614SIntel void 2595ce8d5614SIntel stop_port(portid_t pid) 2596ce8d5614SIntel { 2597ce8d5614SIntel portid_t pi; 2598ce8d5614SIntel struct rte_port *port; 2599ce8d5614SIntel int need_check_link_status = 0; 2600ce8d5614SIntel 2601ce8d5614SIntel if (dcb_test) { 2602ce8d5614SIntel dcb_test = 0; 2603ce8d5614SIntel dcb_config = 0; 2604ce8d5614SIntel } 26054468635fSMichael Qiu 26064468635fSMichael Qiu if (port_id_is_invalid(pid, ENABLED_WARN)) 26074468635fSMichael Qiu return; 26084468635fSMichael Qiu 2609ce8d5614SIntel printf("Stopping ports...\n"); 2610ce8d5614SIntel 26117d89b261SGaetan Rivet RTE_ETH_FOREACH_DEV(pi) { 26124468635fSMichael Qiu if (pid != pi && pid != (portid_t)RTE_PORT_ALL) 2613ce8d5614SIntel continue; 2614ce8d5614SIntel 2615a8ef3e3aSBernard Iremonger if (port_is_forwarding(pi) != 0 && test_done == 0) { 2616a8ef3e3aSBernard Iremonger printf("Please remove port %d from forwarding configuration.\n", pi); 2617a8ef3e3aSBernard Iremonger continue; 2618a8ef3e3aSBernard Iremonger } 2619a8ef3e3aSBernard Iremonger 26200e545d30SBernard Iremonger if (port_is_bonding_slave(pi)) { 26210e545d30SBernard Iremonger printf("Please remove port %d from bonded device.\n", pi); 26220e545d30SBernard Iremonger continue; 26230e545d30SBernard Iremonger } 26240e545d30SBernard Iremonger 2625ce8d5614SIntel port = &ports[pi]; 2626ce8d5614SIntel if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED, 2627ce8d5614SIntel RTE_PORT_HANDLING) == 0) 2628ce8d5614SIntel continue; 2629ce8d5614SIntel 2630ce8d5614SIntel rte_eth_dev_stop(pi); 2631ce8d5614SIntel 2632ce8d5614SIntel if (rte_atomic16_cmpset(&(port->port_status), 2633ce8d5614SIntel RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0) 2634ce8d5614SIntel printf("Port %d can not be set into stopped\n", pi); 2635ce8d5614SIntel need_check_link_status = 1; 2636ce8d5614SIntel } 2637bc202406SDavid Marchand if (need_check_link_status && !no_link_check) 2638edab33b1STetsuya Mukawa check_all_ports_link_status(RTE_PORT_ALL); 2639ce8d5614SIntel 2640ce8d5614SIntel printf("Done\n"); 2641ce8d5614SIntel } 2642ce8d5614SIntel 2643ce6959bfSWisam Jaddo static void 26444f1de450SThomas Monjalon remove_invalid_ports_in(portid_t *array, portid_t *total) 2645ce6959bfSWisam Jaddo { 26464f1de450SThomas Monjalon portid_t i; 26474f1de450SThomas Monjalon portid_t new_total = 0; 2648ce6959bfSWisam Jaddo 26494f1de450SThomas Monjalon for (i = 0; i < *total; i++) 26504f1de450SThomas Monjalon if (!port_id_is_invalid(array[i], DISABLED_WARN)) { 26514f1de450SThomas Monjalon array[new_total] = array[i]; 26524f1de450SThomas Monjalon new_total++; 2653ce6959bfSWisam Jaddo } 26544f1de450SThomas Monjalon *total = new_total; 26554f1de450SThomas Monjalon } 26564f1de450SThomas Monjalon 26574f1de450SThomas Monjalon static void 26584f1de450SThomas Monjalon remove_invalid_ports(void) 26594f1de450SThomas Monjalon { 26604f1de450SThomas Monjalon remove_invalid_ports_in(ports_ids, &nb_ports); 26614f1de450SThomas Monjalon remove_invalid_ports_in(fwd_ports_ids, &nb_fwd_ports); 26624f1de450SThomas Monjalon nb_cfg_ports = nb_fwd_ports; 2663ce6959bfSWisam Jaddo } 2664ce6959bfSWisam Jaddo 2665ce8d5614SIntel void 2666ce8d5614SIntel close_port(portid_t pid) 2667ce8d5614SIntel { 2668ce8d5614SIntel portid_t pi; 2669ce8d5614SIntel struct rte_port *port; 2670ce8d5614SIntel 26714468635fSMichael Qiu if (port_id_is_invalid(pid, ENABLED_WARN)) 26724468635fSMichael Qiu return; 26734468635fSMichael Qiu 2674ce8d5614SIntel printf("Closing ports...\n"); 2675ce8d5614SIntel 26767d89b261SGaetan Rivet RTE_ETH_FOREACH_DEV(pi) { 26774468635fSMichael Qiu if (pid != pi && pid != (portid_t)RTE_PORT_ALL) 2678ce8d5614SIntel continue; 2679ce8d5614SIntel 2680a8ef3e3aSBernard Iremonger if (port_is_forwarding(pi) != 0 && test_done == 0) { 2681a8ef3e3aSBernard Iremonger printf("Please remove port %d from forwarding configuration.\n", pi); 2682a8ef3e3aSBernard Iremonger continue; 2683a8ef3e3aSBernard Iremonger } 2684a8ef3e3aSBernard Iremonger 26850e545d30SBernard Iremonger if (port_is_bonding_slave(pi)) { 26860e545d30SBernard Iremonger printf("Please remove port %d from bonded device.\n", pi); 26870e545d30SBernard Iremonger continue; 26880e545d30SBernard Iremonger } 26890e545d30SBernard Iremonger 2690ce8d5614SIntel port = &ports[pi]; 2691ce8d5614SIntel if (rte_atomic16_cmpset(&(port->port_status), 2692d4e8ad64SMichael Qiu RTE_PORT_CLOSED, RTE_PORT_CLOSED) == 1) { 2693d4e8ad64SMichael Qiu printf("Port %d is already closed\n", pi); 2694d4e8ad64SMichael Qiu continue; 2695d4e8ad64SMichael Qiu } 2696d4e8ad64SMichael Qiu 2697d4e8ad64SMichael Qiu if (rte_atomic16_cmpset(&(port->port_status), 2698ce8d5614SIntel RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) { 2699ce8d5614SIntel printf("Port %d is now not stopped\n", pi); 2700ce8d5614SIntel continue; 2701ce8d5614SIntel } 2702ce8d5614SIntel 2703938a184aSAdrien Mazarguil if (port->flow_list) 2704938a184aSAdrien Mazarguil port_flow_flush(pi); 2705ce8d5614SIntel rte_eth_dev_close(pi); 2706ce8d5614SIntel 27074f1de450SThomas Monjalon remove_invalid_ports(); 270823ea57a2SThomas Monjalon 2709ce8d5614SIntel if (rte_atomic16_cmpset(&(port->port_status), 2710ce8d5614SIntel RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0) 2711b38bb262SPablo de Lara printf("Port %d cannot be set to closed\n", pi); 2712ce8d5614SIntel } 2713ce8d5614SIntel 2714ce8d5614SIntel printf("Done\n"); 2715ce8d5614SIntel } 2716ce8d5614SIntel 2717edab33b1STetsuya Mukawa void 271897f1e196SWei Dai reset_port(portid_t pid) 271997f1e196SWei Dai { 272097f1e196SWei Dai int diag; 272197f1e196SWei Dai portid_t pi; 272297f1e196SWei Dai struct rte_port *port; 272397f1e196SWei Dai 272497f1e196SWei Dai if (port_id_is_invalid(pid, ENABLED_WARN)) 272597f1e196SWei Dai return; 272697f1e196SWei Dai 27271cde1b9aSShougang Wang if ((pid == (portid_t)RTE_PORT_ALL && !all_ports_stopped()) || 27281cde1b9aSShougang Wang (pid != (portid_t)RTE_PORT_ALL && !port_is_stopped(pid))) { 27291cde1b9aSShougang Wang printf("Can not reset port(s), please stop port(s) first.\n"); 27301cde1b9aSShougang Wang return; 27311cde1b9aSShougang Wang } 27321cde1b9aSShougang Wang 273397f1e196SWei Dai printf("Resetting ports...\n"); 273497f1e196SWei Dai 273597f1e196SWei Dai RTE_ETH_FOREACH_DEV(pi) { 273697f1e196SWei Dai if (pid != pi && pid != (portid_t)RTE_PORT_ALL) 273797f1e196SWei Dai continue; 273897f1e196SWei Dai 273997f1e196SWei Dai if (port_is_forwarding(pi) != 0 && test_done == 0) { 274097f1e196SWei Dai printf("Please remove port %d from forwarding " 274197f1e196SWei Dai "configuration.\n", pi); 274297f1e196SWei Dai continue; 274397f1e196SWei Dai } 274497f1e196SWei Dai 274597f1e196SWei Dai if (port_is_bonding_slave(pi)) { 274697f1e196SWei Dai printf("Please remove port %d from bonded device.\n", 274797f1e196SWei Dai pi); 274897f1e196SWei Dai continue; 274997f1e196SWei Dai } 275097f1e196SWei Dai 275197f1e196SWei Dai diag = rte_eth_dev_reset(pi); 275297f1e196SWei Dai if (diag == 0) { 275397f1e196SWei Dai port = &ports[pi]; 275497f1e196SWei Dai port->need_reconfig = 1; 275597f1e196SWei Dai port->need_reconfig_queues = 1; 275697f1e196SWei Dai } else { 275797f1e196SWei Dai printf("Failed to reset port %d. diag=%d\n", pi, diag); 275897f1e196SWei Dai } 275997f1e196SWei Dai } 276097f1e196SWei Dai 276197f1e196SWei Dai printf("Done\n"); 276297f1e196SWei Dai } 276397f1e196SWei Dai 276497f1e196SWei Dai void 2765edab33b1STetsuya Mukawa attach_port(char *identifier) 2766ce8d5614SIntel { 27674f1ed78eSThomas Monjalon portid_t pi; 2768c9cce428SThomas Monjalon struct rte_dev_iterator iterator; 2769ce8d5614SIntel 2770edab33b1STetsuya Mukawa printf("Attaching a new port...\n"); 2771edab33b1STetsuya Mukawa 2772edab33b1STetsuya Mukawa if (identifier == NULL) { 2773edab33b1STetsuya Mukawa printf("Invalid parameters are specified\n"); 2774edab33b1STetsuya Mukawa return; 2775ce8d5614SIntel } 2776ce8d5614SIntel 277775b66decSIlya Maximets if (rte_dev_probe(identifier) < 0) { 2778c9cce428SThomas Monjalon TESTPMD_LOG(ERR, "Failed to attach port %s\n", identifier); 2779edab33b1STetsuya Mukawa return; 2780c9cce428SThomas Monjalon } 2781c9cce428SThomas Monjalon 27824f1ed78eSThomas Monjalon /* first attach mode: event */ 27834f1ed78eSThomas Monjalon if (setup_on_probe_event) { 27844f1ed78eSThomas Monjalon /* new ports are detected on RTE_ETH_EVENT_NEW event */ 27854f1ed78eSThomas Monjalon for (pi = 0; pi < RTE_MAX_ETHPORTS; pi++) 27864f1ed78eSThomas Monjalon if (ports[pi].port_status == RTE_PORT_HANDLING && 27874f1ed78eSThomas Monjalon ports[pi].need_setup != 0) 27884f1ed78eSThomas Monjalon setup_attached_port(pi); 27894f1ed78eSThomas Monjalon return; 27904f1ed78eSThomas Monjalon } 27914f1ed78eSThomas Monjalon 27924f1ed78eSThomas Monjalon /* second attach mode: iterator */ 279386fa5de1SThomas Monjalon RTE_ETH_FOREACH_MATCHING_DEV(pi, identifier, &iterator) { 27944f1ed78eSThomas Monjalon /* setup ports matching the devargs used for probing */ 279586fa5de1SThomas Monjalon if (port_is_forwarding(pi)) 279686fa5de1SThomas Monjalon continue; /* port was already attached before */ 2797c9cce428SThomas Monjalon setup_attached_port(pi); 2798c9cce428SThomas Monjalon } 279986fa5de1SThomas Monjalon } 2800c9cce428SThomas Monjalon 2801c9cce428SThomas Monjalon static void 2802c9cce428SThomas Monjalon setup_attached_port(portid_t pi) 2803c9cce428SThomas Monjalon { 2804c9cce428SThomas Monjalon unsigned int socket_id; 280534fc1051SIvan Ilchenko int ret; 2806edab33b1STetsuya Mukawa 2807931126baSBernard Iremonger socket_id = (unsigned)rte_eth_dev_socket_id(pi); 280829841336SPhil Yang /* if socket_id is invalid, set to the first available socket. */ 2809931126baSBernard Iremonger if (check_socket_id(socket_id) < 0) 281029841336SPhil Yang socket_id = socket_ids[0]; 2811931126baSBernard Iremonger reconfig(pi, socket_id); 281234fc1051SIvan Ilchenko ret = rte_eth_promiscuous_enable(pi); 281334fc1051SIvan Ilchenko if (ret != 0) 281434fc1051SIvan Ilchenko printf("Error during enabling promiscuous mode for port %u: %s - ignore\n", 281534fc1051SIvan Ilchenko pi, rte_strerror(-ret)); 2816edab33b1STetsuya Mukawa 28174f1de450SThomas Monjalon ports_ids[nb_ports++] = pi; 28184f1de450SThomas Monjalon fwd_ports_ids[nb_fwd_ports++] = pi; 28194f1de450SThomas Monjalon nb_cfg_ports = nb_fwd_ports; 28204f1ed78eSThomas Monjalon ports[pi].need_setup = 0; 2821edab33b1STetsuya Mukawa ports[pi].port_status = RTE_PORT_STOPPED; 2822edab33b1STetsuya Mukawa 2823edab33b1STetsuya Mukawa printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports); 2824edab33b1STetsuya Mukawa printf("Done\n"); 2825edab33b1STetsuya Mukawa } 2826edab33b1STetsuya Mukawa 28270654d4a8SThomas Monjalon static void 28280654d4a8SThomas Monjalon detach_device(struct rte_device *dev) 28295f4ec54fSChen Jing D(Mark) { 2830f8e5baa2SThomas Monjalon portid_t sibling; 2831f8e5baa2SThomas Monjalon 2832f8e5baa2SThomas Monjalon if (dev == NULL) { 2833f8e5baa2SThomas Monjalon printf("Device already removed\n"); 2834f8e5baa2SThomas Monjalon return; 2835f8e5baa2SThomas Monjalon } 2836f8e5baa2SThomas Monjalon 28370654d4a8SThomas Monjalon printf("Removing a device...\n"); 2838938a184aSAdrien Mazarguil 283975b66decSIlya Maximets if (rte_dev_remove(dev) < 0) { 2840f8e5baa2SThomas Monjalon TESTPMD_LOG(ERR, "Failed to detach device %s\n", dev->name); 2841edab33b1STetsuya Mukawa return; 28423070419eSGaetan Rivet } 28437ca262b8SViacheslav Ovsiienko RTE_ETH_FOREACH_DEV_OF(sibling, dev) { 2844f8e5baa2SThomas Monjalon /* reset mapping between old ports and removed device */ 2845f8e5baa2SThomas Monjalon rte_eth_devices[sibling].device = NULL; 2846f8e5baa2SThomas Monjalon if (ports[sibling].port_status != RTE_PORT_CLOSED) { 2847f8e5baa2SThomas Monjalon /* sibling ports are forced to be closed */ 2848f8e5baa2SThomas Monjalon ports[sibling].port_status = RTE_PORT_CLOSED; 2849f8e5baa2SThomas Monjalon printf("Port %u is closed\n", sibling); 2850f8e5baa2SThomas Monjalon } 2851f8e5baa2SThomas Monjalon } 2852f8e5baa2SThomas Monjalon 28534f1de450SThomas Monjalon remove_invalid_ports(); 285403ce2c53SMatan Azrad 28550654d4a8SThomas Monjalon printf("Device is detached\n"); 2856f8e5baa2SThomas Monjalon printf("Now total ports is %d\n", nb_ports); 2857edab33b1STetsuya Mukawa printf("Done\n"); 2858edab33b1STetsuya Mukawa return; 28595f4ec54fSChen Jing D(Mark) } 28605f4ec54fSChen Jing D(Mark) 2861af75078fSIntel void 28620654d4a8SThomas Monjalon detach_port_device(portid_t port_id) 28630654d4a8SThomas Monjalon { 28640654d4a8SThomas Monjalon if (port_id_is_invalid(port_id, ENABLED_WARN)) 28650654d4a8SThomas Monjalon return; 28660654d4a8SThomas Monjalon 28670654d4a8SThomas Monjalon if (ports[port_id].port_status != RTE_PORT_CLOSED) { 28680654d4a8SThomas Monjalon if (ports[port_id].port_status != RTE_PORT_STOPPED) { 28690654d4a8SThomas Monjalon printf("Port not stopped\n"); 28700654d4a8SThomas Monjalon return; 28710654d4a8SThomas Monjalon } 28720654d4a8SThomas Monjalon printf("Port was not closed\n"); 28730654d4a8SThomas Monjalon if (ports[port_id].flow_list) 28740654d4a8SThomas Monjalon port_flow_flush(port_id); 28750654d4a8SThomas Monjalon } 28760654d4a8SThomas Monjalon 28770654d4a8SThomas Monjalon detach_device(rte_eth_devices[port_id].device); 28780654d4a8SThomas Monjalon } 28790654d4a8SThomas Monjalon 28800654d4a8SThomas Monjalon void 28815edee5f6SThomas Monjalon detach_devargs(char *identifier) 288255e51c96SNithin Dabilpuram { 288355e51c96SNithin Dabilpuram struct rte_dev_iterator iterator; 288455e51c96SNithin Dabilpuram struct rte_devargs da; 288555e51c96SNithin Dabilpuram portid_t port_id; 288655e51c96SNithin Dabilpuram 288755e51c96SNithin Dabilpuram printf("Removing a device...\n"); 288855e51c96SNithin Dabilpuram 288955e51c96SNithin Dabilpuram memset(&da, 0, sizeof(da)); 289055e51c96SNithin Dabilpuram if (rte_devargs_parsef(&da, "%s", identifier)) { 289155e51c96SNithin Dabilpuram printf("cannot parse identifier\n"); 289255e51c96SNithin Dabilpuram if (da.args) 289355e51c96SNithin Dabilpuram free(da.args); 289455e51c96SNithin Dabilpuram return; 289555e51c96SNithin Dabilpuram } 289655e51c96SNithin Dabilpuram 289755e51c96SNithin Dabilpuram RTE_ETH_FOREACH_MATCHING_DEV(port_id, identifier, &iterator) { 289855e51c96SNithin Dabilpuram if (ports[port_id].port_status != RTE_PORT_CLOSED) { 289955e51c96SNithin Dabilpuram if (ports[port_id].port_status != RTE_PORT_STOPPED) { 290055e51c96SNithin Dabilpuram printf("Port %u not stopped\n", port_id); 2901149677c9SStephen Hemminger rte_eth_iterator_cleanup(&iterator); 290255e51c96SNithin Dabilpuram return; 290355e51c96SNithin Dabilpuram } 290455e51c96SNithin Dabilpuram 290555e51c96SNithin Dabilpuram /* sibling ports are forced to be closed */ 290655e51c96SNithin Dabilpuram if (ports[port_id].flow_list) 290755e51c96SNithin Dabilpuram port_flow_flush(port_id); 290855e51c96SNithin Dabilpuram ports[port_id].port_status = RTE_PORT_CLOSED; 290955e51c96SNithin Dabilpuram printf("Port %u is now closed\n", port_id); 291055e51c96SNithin Dabilpuram } 291155e51c96SNithin Dabilpuram } 291255e51c96SNithin Dabilpuram 291355e51c96SNithin Dabilpuram if (rte_eal_hotplug_remove(da.bus->name, da.name) != 0) { 291455e51c96SNithin Dabilpuram TESTPMD_LOG(ERR, "Failed to detach device %s(%s)\n", 291555e51c96SNithin Dabilpuram da.name, da.bus->name); 291655e51c96SNithin Dabilpuram return; 291755e51c96SNithin Dabilpuram } 291855e51c96SNithin Dabilpuram 291955e51c96SNithin Dabilpuram remove_invalid_ports(); 292055e51c96SNithin Dabilpuram 292155e51c96SNithin Dabilpuram printf("Device %s is detached\n", identifier); 292255e51c96SNithin Dabilpuram printf("Now total ports is %d\n", nb_ports); 292355e51c96SNithin Dabilpuram printf("Done\n"); 292455e51c96SNithin Dabilpuram } 292555e51c96SNithin Dabilpuram 292655e51c96SNithin Dabilpuram void 2927af75078fSIntel pmd_test_exit(void) 2928af75078fSIntel { 2929af75078fSIntel portid_t pt_id; 2930fb73e096SJeff Guo int ret; 2931401b744dSShahaf Shuler int i; 2932af75078fSIntel 29338210ec25SPablo de Lara if (test_done == 0) 29348210ec25SPablo de Lara stop_packet_forwarding(); 29358210ec25SPablo de Lara 29363a0968c8SShahaf Shuler for (i = 0 ; i < RTE_MAX_NUMA_NODES ; i++) { 29373a0968c8SShahaf Shuler if (mempools[i]) { 29383a0968c8SShahaf Shuler if (mp_alloc_type == MP_ALLOC_ANON) 29393a0968c8SShahaf Shuler rte_mempool_mem_iter(mempools[i], dma_unmap_cb, 29403a0968c8SShahaf Shuler NULL); 29413a0968c8SShahaf Shuler } 29423a0968c8SShahaf Shuler } 2943d3a274ceSZhihong Wang if (ports != NULL) { 2944d3a274ceSZhihong Wang no_link_check = 1; 29457d89b261SGaetan Rivet RTE_ETH_FOREACH_DEV(pt_id) { 294608fd782bSCristian Dumitrescu printf("\nStopping port %d...\n", pt_id); 2947af75078fSIntel fflush(stdout); 2948d3a274ceSZhihong Wang stop_port(pt_id); 294908fd782bSCristian Dumitrescu } 295008fd782bSCristian Dumitrescu RTE_ETH_FOREACH_DEV(pt_id) { 295108fd782bSCristian Dumitrescu printf("\nShutting down port %d...\n", pt_id); 295208fd782bSCristian Dumitrescu fflush(stdout); 2953d3a274ceSZhihong Wang close_port(pt_id); 2954af75078fSIntel } 2955d3a274ceSZhihong Wang } 2956fb73e096SJeff Guo 2957fb73e096SJeff Guo if (hot_plug) { 2958fb73e096SJeff Guo ret = rte_dev_event_monitor_stop(); 29592049c511SJeff Guo if (ret) { 2960fb73e096SJeff Guo RTE_LOG(ERR, EAL, 2961fb73e096SJeff Guo "fail to stop device event monitor."); 29622049c511SJeff Guo return; 29632049c511SJeff Guo } 2964fb73e096SJeff Guo 29652049c511SJeff Guo ret = rte_dev_event_callback_unregister(NULL, 2966cc1bf307SJeff Guo dev_event_callback, NULL); 29672049c511SJeff Guo if (ret < 0) { 2968fb73e096SJeff Guo RTE_LOG(ERR, EAL, 29692049c511SJeff Guo "fail to unregister device event callback.\n"); 29702049c511SJeff Guo return; 29712049c511SJeff Guo } 29722049c511SJeff Guo 29732049c511SJeff Guo ret = rte_dev_hotplug_handle_disable(); 29742049c511SJeff Guo if (ret) { 29752049c511SJeff Guo RTE_LOG(ERR, EAL, 29762049c511SJeff Guo "fail to disable hotplug handling.\n"); 29772049c511SJeff Guo return; 29782049c511SJeff Guo } 2979fb73e096SJeff Guo } 2980401b744dSShahaf Shuler for (i = 0 ; i < RTE_MAX_NUMA_NODES ; i++) { 2981401b744dSShahaf Shuler if (mempools[i]) 2982401b744dSShahaf Shuler rte_mempool_free(mempools[i]); 2983401b744dSShahaf Shuler } 2984fb73e096SJeff Guo 2985d3a274ceSZhihong Wang printf("\nBye...\n"); 2986af75078fSIntel } 2987af75078fSIntel 2988af75078fSIntel typedef void (*cmd_func_t)(void); 2989af75078fSIntel struct pmd_test_command { 2990af75078fSIntel const char *cmd_name; 2991af75078fSIntel cmd_func_t cmd_func; 2992af75078fSIntel }; 2993af75078fSIntel 2994ce8d5614SIntel /* Check the link status of all ports in up to 9s, and print them finally */ 2995af75078fSIntel static void 2996edab33b1STetsuya Mukawa check_all_ports_link_status(uint32_t port_mask) 2997af75078fSIntel { 2998ce8d5614SIntel #define CHECK_INTERVAL 100 /* 100ms */ 2999ce8d5614SIntel #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */ 3000f8244c63SZhiyong Yang portid_t portid; 3001f8244c63SZhiyong Yang uint8_t count, all_ports_up, print_flag = 0; 3002ce8d5614SIntel struct rte_eth_link link; 3003e661a08bSIgor Romanov int ret; 3004ce8d5614SIntel 3005ce8d5614SIntel printf("Checking link statuses...\n"); 3006ce8d5614SIntel fflush(stdout); 3007ce8d5614SIntel for (count = 0; count <= MAX_CHECK_TIME; count++) { 3008ce8d5614SIntel all_ports_up = 1; 30097d89b261SGaetan Rivet RTE_ETH_FOREACH_DEV(portid) { 3010ce8d5614SIntel if ((port_mask & (1 << portid)) == 0) 3011ce8d5614SIntel continue; 3012ce8d5614SIntel memset(&link, 0, sizeof(link)); 3013e661a08bSIgor Romanov ret = rte_eth_link_get_nowait(portid, &link); 3014e661a08bSIgor Romanov if (ret < 0) { 3015e661a08bSIgor Romanov all_ports_up = 0; 3016e661a08bSIgor Romanov if (print_flag == 1) 3017e661a08bSIgor Romanov printf("Port %u link get failed: %s\n", 3018e661a08bSIgor Romanov portid, rte_strerror(-ret)); 3019e661a08bSIgor Romanov continue; 3020e661a08bSIgor Romanov } 3021ce8d5614SIntel /* print link status if flag set */ 3022ce8d5614SIntel if (print_flag == 1) { 3023ce8d5614SIntel if (link.link_status) 3024f8244c63SZhiyong Yang printf( 3025f8244c63SZhiyong Yang "Port%d Link Up. speed %u Mbps- %s\n", 3026f8244c63SZhiyong Yang portid, link.link_speed, 3027ce8d5614SIntel (link.link_duplex == ETH_LINK_FULL_DUPLEX) ? 3028a357d09dSIvan Dyukov ("full-duplex") : ("half-duplex")); 3029ce8d5614SIntel else 3030f8244c63SZhiyong Yang printf("Port %d Link Down\n", portid); 3031ce8d5614SIntel continue; 3032ce8d5614SIntel } 3033ce8d5614SIntel /* clear all_ports_up flag if any link down */ 303409419f23SThomas Monjalon if (link.link_status == ETH_LINK_DOWN) { 3035ce8d5614SIntel all_ports_up = 0; 3036ce8d5614SIntel break; 3037ce8d5614SIntel } 3038ce8d5614SIntel } 3039ce8d5614SIntel /* after finally printing all link status, get out */ 3040ce8d5614SIntel if (print_flag == 1) 3041ce8d5614SIntel break; 3042ce8d5614SIntel 3043ce8d5614SIntel if (all_ports_up == 0) { 3044ce8d5614SIntel fflush(stdout); 3045ce8d5614SIntel rte_delay_ms(CHECK_INTERVAL); 3046ce8d5614SIntel } 3047ce8d5614SIntel 3048ce8d5614SIntel /* set the print_flag if all ports up or timeout */ 3049ce8d5614SIntel if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) { 3050ce8d5614SIntel print_flag = 1; 3051ce8d5614SIntel } 30528ea656f8SGaetan Rivet 30538ea656f8SGaetan Rivet if (lsc_interrupt) 30548ea656f8SGaetan Rivet break; 3055ce8d5614SIntel } 3056af75078fSIntel } 3057af75078fSIntel 3058cc1bf307SJeff Guo /* 3059cc1bf307SJeff Guo * This callback is for remove a port for a device. It has limitation because 3060cc1bf307SJeff Guo * it is not for multiple port removal for a device. 3061cc1bf307SJeff Guo * TODO: the device detach invoke will plan to be removed from user side to 3062cc1bf307SJeff Guo * eal. And convert all PMDs to free port resources on ether device closing. 3063cc1bf307SJeff Guo */ 3064284c908cSGaetan Rivet static void 3065cc1bf307SJeff Guo rmv_port_callback(void *arg) 3066284c908cSGaetan Rivet { 30673b97888aSMatan Azrad int need_to_start = 0; 30680da2a62bSMatan Azrad int org_no_link_check = no_link_check; 306928caa76aSZhiyong Yang portid_t port_id = (intptr_t)arg; 30700654d4a8SThomas Monjalon struct rte_device *dev; 3071284c908cSGaetan Rivet 3072284c908cSGaetan Rivet RTE_ETH_VALID_PORTID_OR_RET(port_id); 3073284c908cSGaetan Rivet 30743b97888aSMatan Azrad if (!test_done && port_is_forwarding(port_id)) { 30753b97888aSMatan Azrad need_to_start = 1; 30763b97888aSMatan Azrad stop_packet_forwarding(); 30773b97888aSMatan Azrad } 30780da2a62bSMatan Azrad no_link_check = 1; 3079284c908cSGaetan Rivet stop_port(port_id); 30800da2a62bSMatan Azrad no_link_check = org_no_link_check; 30810654d4a8SThomas Monjalon 30820654d4a8SThomas Monjalon /* Save rte_device pointer before closing ethdev port */ 30830654d4a8SThomas Monjalon dev = rte_eth_devices[port_id].device; 3084284c908cSGaetan Rivet close_port(port_id); 30850654d4a8SThomas Monjalon detach_device(dev); /* might be already removed or have more ports */ 30860654d4a8SThomas Monjalon 30873b97888aSMatan Azrad if (need_to_start) 30883b97888aSMatan Azrad start_packet_forwarding(0); 3089284c908cSGaetan Rivet } 3090284c908cSGaetan Rivet 309176ad4a2dSGaetan Rivet /* This function is used by the interrupt thread */ 3092d6af1a13SBernard Iremonger static int 3093f8244c63SZhiyong Yang eth_event_callback(portid_t port_id, enum rte_eth_event_type type, void *param, 3094d6af1a13SBernard Iremonger void *ret_param) 309576ad4a2dSGaetan Rivet { 309676ad4a2dSGaetan Rivet RTE_SET_USED(param); 3097d6af1a13SBernard Iremonger RTE_SET_USED(ret_param); 309876ad4a2dSGaetan Rivet 309976ad4a2dSGaetan Rivet if (type >= RTE_ETH_EVENT_MAX) { 3100f431e010SHerakliusz Lipiec fprintf(stderr, "\nPort %" PRIu16 ": %s called upon invalid event %d\n", 310176ad4a2dSGaetan Rivet port_id, __func__, type); 310276ad4a2dSGaetan Rivet fflush(stderr); 31033af72783SGaetan Rivet } else if (event_print_mask & (UINT32_C(1) << type)) { 3104f431e010SHerakliusz Lipiec printf("\nPort %" PRIu16 ": %s event\n", port_id, 310597b5d8b5SThomas Monjalon eth_event_desc[type]); 310676ad4a2dSGaetan Rivet fflush(stdout); 310776ad4a2dSGaetan Rivet } 3108284c908cSGaetan Rivet 3109284c908cSGaetan Rivet switch (type) { 31104f1ed78eSThomas Monjalon case RTE_ETH_EVENT_NEW: 31114f1ed78eSThomas Monjalon ports[port_id].need_setup = 1; 31124f1ed78eSThomas Monjalon ports[port_id].port_status = RTE_PORT_HANDLING; 31134f1ed78eSThomas Monjalon break; 3114284c908cSGaetan Rivet case RTE_ETH_EVENT_INTR_RMV: 31154f1ed78eSThomas Monjalon if (port_id_is_invalid(port_id, DISABLED_WARN)) 31164f1ed78eSThomas Monjalon break; 3117284c908cSGaetan Rivet if (rte_eal_alarm_set(100000, 3118cc1bf307SJeff Guo rmv_port_callback, (void *)(intptr_t)port_id)) 3119284c908cSGaetan Rivet fprintf(stderr, "Could not set up deferred device removal\n"); 3120284c908cSGaetan Rivet break; 3121284c908cSGaetan Rivet default: 3122284c908cSGaetan Rivet break; 3123284c908cSGaetan Rivet } 3124d6af1a13SBernard Iremonger return 0; 312576ad4a2dSGaetan Rivet } 312676ad4a2dSGaetan Rivet 312797b5d8b5SThomas Monjalon static int 312897b5d8b5SThomas Monjalon register_eth_event_callback(void) 312997b5d8b5SThomas Monjalon { 313097b5d8b5SThomas Monjalon int ret; 313197b5d8b5SThomas Monjalon enum rte_eth_event_type event; 313297b5d8b5SThomas Monjalon 313397b5d8b5SThomas Monjalon for (event = RTE_ETH_EVENT_UNKNOWN; 313497b5d8b5SThomas Monjalon event < RTE_ETH_EVENT_MAX; event++) { 313597b5d8b5SThomas Monjalon ret = rte_eth_dev_callback_register(RTE_ETH_ALL, 313697b5d8b5SThomas Monjalon event, 313797b5d8b5SThomas Monjalon eth_event_callback, 313897b5d8b5SThomas Monjalon NULL); 313997b5d8b5SThomas Monjalon if (ret != 0) { 314097b5d8b5SThomas Monjalon TESTPMD_LOG(ERR, "Failed to register callback for " 314197b5d8b5SThomas Monjalon "%s event\n", eth_event_desc[event]); 314297b5d8b5SThomas Monjalon return -1; 314397b5d8b5SThomas Monjalon } 314497b5d8b5SThomas Monjalon } 314597b5d8b5SThomas Monjalon 314697b5d8b5SThomas Monjalon return 0; 314797b5d8b5SThomas Monjalon } 314897b5d8b5SThomas Monjalon 3149fb73e096SJeff Guo /* This function is used by the interrupt thread */ 3150fb73e096SJeff Guo static void 3151cc1bf307SJeff Guo dev_event_callback(const char *device_name, enum rte_dev_event_type type, 3152fb73e096SJeff Guo __rte_unused void *arg) 3153fb73e096SJeff Guo { 31542049c511SJeff Guo uint16_t port_id; 31552049c511SJeff Guo int ret; 31562049c511SJeff Guo 3157fb73e096SJeff Guo if (type >= RTE_DEV_EVENT_MAX) { 3158fb73e096SJeff Guo fprintf(stderr, "%s called upon invalid event %d\n", 3159fb73e096SJeff Guo __func__, type); 3160fb73e096SJeff Guo fflush(stderr); 3161fb73e096SJeff Guo } 3162fb73e096SJeff Guo 3163fb73e096SJeff Guo switch (type) { 3164fb73e096SJeff Guo case RTE_DEV_EVENT_REMOVE: 3165cc1bf307SJeff Guo RTE_LOG(DEBUG, EAL, "The device: %s has been removed!\n", 3166fb73e096SJeff Guo device_name); 31672049c511SJeff Guo ret = rte_eth_dev_get_port_by_name(device_name, &port_id); 31682049c511SJeff Guo if (ret) { 31692049c511SJeff Guo RTE_LOG(ERR, EAL, "can not get port by device %s!\n", 31702049c511SJeff Guo device_name); 31712049c511SJeff Guo return; 31722049c511SJeff Guo } 3173cc1bf307SJeff Guo /* 3174cc1bf307SJeff Guo * Because the user's callback is invoked in eal interrupt 3175cc1bf307SJeff Guo * callback, the interrupt callback need to be finished before 3176cc1bf307SJeff Guo * it can be unregistered when detaching device. So finish 3177cc1bf307SJeff Guo * callback soon and use a deferred removal to detach device 3178cc1bf307SJeff Guo * is need. It is a workaround, once the device detaching be 3179cc1bf307SJeff Guo * moved into the eal in the future, the deferred removal could 3180cc1bf307SJeff Guo * be deleted. 3181cc1bf307SJeff Guo */ 3182cc1bf307SJeff Guo if (rte_eal_alarm_set(100000, 3183cc1bf307SJeff Guo rmv_port_callback, (void *)(intptr_t)port_id)) 3184cc1bf307SJeff Guo RTE_LOG(ERR, EAL, 3185cc1bf307SJeff Guo "Could not set up deferred device removal\n"); 3186fb73e096SJeff Guo break; 3187fb73e096SJeff Guo case RTE_DEV_EVENT_ADD: 3188fb73e096SJeff Guo RTE_LOG(ERR, EAL, "The device: %s has been added!\n", 3189fb73e096SJeff Guo device_name); 3190fb73e096SJeff Guo /* TODO: After finish kernel driver binding, 3191fb73e096SJeff Guo * begin to attach port. 3192fb73e096SJeff Guo */ 3193fb73e096SJeff Guo break; 3194fb73e096SJeff Guo default: 3195fb73e096SJeff Guo break; 3196fb73e096SJeff Guo } 3197fb73e096SJeff Guo } 3198fb73e096SJeff Guo 3199013af9b6SIntel static int 320028caa76aSZhiyong Yang set_tx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port) 3201af75078fSIntel { 3202013af9b6SIntel uint16_t i; 3203af75078fSIntel int diag; 3204013af9b6SIntel uint8_t mapping_found = 0; 3205af75078fSIntel 3206013af9b6SIntel for (i = 0; i < nb_tx_queue_stats_mappings; i++) { 3207013af9b6SIntel if ((tx_queue_stats_mappings[i].port_id == port_id) && 3208013af9b6SIntel (tx_queue_stats_mappings[i].queue_id < nb_txq )) { 3209013af9b6SIntel diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id, 3210013af9b6SIntel tx_queue_stats_mappings[i].queue_id, 3211013af9b6SIntel tx_queue_stats_mappings[i].stats_counter_id); 3212013af9b6SIntel if (diag != 0) 3213013af9b6SIntel return diag; 3214013af9b6SIntel mapping_found = 1; 3215af75078fSIntel } 3216013af9b6SIntel } 3217013af9b6SIntel if (mapping_found) 3218013af9b6SIntel port->tx_queue_stats_mapping_enabled = 1; 3219013af9b6SIntel return 0; 3220013af9b6SIntel } 3221013af9b6SIntel 3222013af9b6SIntel static int 322328caa76aSZhiyong Yang set_rx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port) 3224013af9b6SIntel { 3225013af9b6SIntel uint16_t i; 3226013af9b6SIntel int diag; 3227013af9b6SIntel uint8_t mapping_found = 0; 3228013af9b6SIntel 3229013af9b6SIntel for (i = 0; i < nb_rx_queue_stats_mappings; i++) { 3230013af9b6SIntel if ((rx_queue_stats_mappings[i].port_id == port_id) && 3231013af9b6SIntel (rx_queue_stats_mappings[i].queue_id < nb_rxq )) { 3232013af9b6SIntel diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id, 3233013af9b6SIntel rx_queue_stats_mappings[i].queue_id, 3234013af9b6SIntel rx_queue_stats_mappings[i].stats_counter_id); 3235013af9b6SIntel if (diag != 0) 3236013af9b6SIntel return diag; 3237013af9b6SIntel mapping_found = 1; 3238013af9b6SIntel } 3239013af9b6SIntel } 3240013af9b6SIntel if (mapping_found) 3241013af9b6SIntel port->rx_queue_stats_mapping_enabled = 1; 3242013af9b6SIntel return 0; 3243013af9b6SIntel } 3244013af9b6SIntel 3245013af9b6SIntel static void 324628caa76aSZhiyong Yang map_port_queue_stats_mapping_registers(portid_t pi, struct rte_port *port) 3247013af9b6SIntel { 3248013af9b6SIntel int diag = 0; 3249013af9b6SIntel 3250013af9b6SIntel diag = set_tx_queue_stats_mapping_registers(pi, port); 3251af75078fSIntel if (diag != 0) { 3252013af9b6SIntel if (diag == -ENOTSUP) { 3253013af9b6SIntel port->tx_queue_stats_mapping_enabled = 0; 3254013af9b6SIntel printf("TX queue stats mapping not supported port id=%d\n", pi); 3255013af9b6SIntel } 3256013af9b6SIntel else 3257013af9b6SIntel rte_exit(EXIT_FAILURE, 3258013af9b6SIntel "set_tx_queue_stats_mapping_registers " 3259013af9b6SIntel "failed for port id=%d diag=%d\n", 3260af75078fSIntel pi, diag); 3261af75078fSIntel } 3262013af9b6SIntel 3263013af9b6SIntel diag = set_rx_queue_stats_mapping_registers(pi, port); 3264af75078fSIntel if (diag != 0) { 3265013af9b6SIntel if (diag == -ENOTSUP) { 3266013af9b6SIntel port->rx_queue_stats_mapping_enabled = 0; 3267013af9b6SIntel printf("RX queue stats mapping not supported port id=%d\n", pi); 3268013af9b6SIntel } 3269013af9b6SIntel else 3270013af9b6SIntel rte_exit(EXIT_FAILURE, 3271013af9b6SIntel "set_rx_queue_stats_mapping_registers " 3272013af9b6SIntel "failed for port id=%d diag=%d\n", 3273af75078fSIntel pi, diag); 3274af75078fSIntel } 3275af75078fSIntel } 3276af75078fSIntel 3277f2c5125aSPablo de Lara static void 3278f2c5125aSPablo de Lara rxtx_port_config(struct rte_port *port) 3279f2c5125aSPablo de Lara { 3280d44f8a48SQi Zhang uint16_t qid; 32815e91aeefSWei Zhao uint64_t offloads; 3282f2c5125aSPablo de Lara 3283d44f8a48SQi Zhang for (qid = 0; qid < nb_rxq; qid++) { 32845e91aeefSWei Zhao offloads = port->rx_conf[qid].offloads; 3285d44f8a48SQi Zhang port->rx_conf[qid] = port->dev_info.default_rxconf; 3286575e0fd1SWei Zhao if (offloads != 0) 3287575e0fd1SWei Zhao port->rx_conf[qid].offloads = offloads; 3288d44f8a48SQi Zhang 3289d44f8a48SQi Zhang /* Check if any Rx parameters have been passed */ 3290f2c5125aSPablo de Lara if (rx_pthresh != RTE_PMD_PARAM_UNSET) 3291d44f8a48SQi Zhang port->rx_conf[qid].rx_thresh.pthresh = rx_pthresh; 3292f2c5125aSPablo de Lara 3293f2c5125aSPablo de Lara if (rx_hthresh != RTE_PMD_PARAM_UNSET) 3294d44f8a48SQi Zhang port->rx_conf[qid].rx_thresh.hthresh = rx_hthresh; 3295f2c5125aSPablo de Lara 3296f2c5125aSPablo de Lara if (rx_wthresh != RTE_PMD_PARAM_UNSET) 3297d44f8a48SQi Zhang port->rx_conf[qid].rx_thresh.wthresh = rx_wthresh; 3298f2c5125aSPablo de Lara 3299f2c5125aSPablo de Lara if (rx_free_thresh != RTE_PMD_PARAM_UNSET) 3300d44f8a48SQi Zhang port->rx_conf[qid].rx_free_thresh = rx_free_thresh; 3301f2c5125aSPablo de Lara 3302f2c5125aSPablo de Lara if (rx_drop_en != RTE_PMD_PARAM_UNSET) 3303d44f8a48SQi Zhang port->rx_conf[qid].rx_drop_en = rx_drop_en; 3304f2c5125aSPablo de Lara 3305d44f8a48SQi Zhang port->nb_rx_desc[qid] = nb_rxd; 3306d44f8a48SQi Zhang } 3307d44f8a48SQi Zhang 3308d44f8a48SQi Zhang for (qid = 0; qid < nb_txq; qid++) { 33095e91aeefSWei Zhao offloads = port->tx_conf[qid].offloads; 3310d44f8a48SQi Zhang port->tx_conf[qid] = port->dev_info.default_txconf; 3311575e0fd1SWei Zhao if (offloads != 0) 3312575e0fd1SWei Zhao port->tx_conf[qid].offloads = offloads; 3313d44f8a48SQi Zhang 3314d44f8a48SQi Zhang /* Check if any Tx parameters have been passed */ 3315f2c5125aSPablo de Lara if (tx_pthresh != RTE_PMD_PARAM_UNSET) 3316d44f8a48SQi Zhang port->tx_conf[qid].tx_thresh.pthresh = tx_pthresh; 3317f2c5125aSPablo de Lara 3318f2c5125aSPablo de Lara if (tx_hthresh != RTE_PMD_PARAM_UNSET) 3319d44f8a48SQi Zhang port->tx_conf[qid].tx_thresh.hthresh = tx_hthresh; 3320f2c5125aSPablo de Lara 3321f2c5125aSPablo de Lara if (tx_wthresh != RTE_PMD_PARAM_UNSET) 3322d44f8a48SQi Zhang port->tx_conf[qid].tx_thresh.wthresh = tx_wthresh; 3323f2c5125aSPablo de Lara 3324f2c5125aSPablo de Lara if (tx_rs_thresh != RTE_PMD_PARAM_UNSET) 3325d44f8a48SQi Zhang port->tx_conf[qid].tx_rs_thresh = tx_rs_thresh; 3326f2c5125aSPablo de Lara 3327f2c5125aSPablo de Lara if (tx_free_thresh != RTE_PMD_PARAM_UNSET) 3328d44f8a48SQi Zhang port->tx_conf[qid].tx_free_thresh = tx_free_thresh; 3329d44f8a48SQi Zhang 3330d44f8a48SQi Zhang port->nb_tx_desc[qid] = nb_txd; 3331d44f8a48SQi Zhang } 3332f2c5125aSPablo de Lara } 3333f2c5125aSPablo de Lara 3334013af9b6SIntel void 3335013af9b6SIntel init_port_config(void) 3336013af9b6SIntel { 3337013af9b6SIntel portid_t pid; 3338013af9b6SIntel struct rte_port *port; 33396f51deb9SIvan Ilchenko int ret; 3340013af9b6SIntel 33417d89b261SGaetan Rivet RTE_ETH_FOREACH_DEV(pid) { 3342013af9b6SIntel port = &ports[pid]; 3343013af9b6SIntel port->dev_conf.fdir_conf = fdir_conf; 33446f51deb9SIvan Ilchenko 33456f51deb9SIvan Ilchenko ret = eth_dev_info_get_print_err(pid, &port->dev_info); 33466f51deb9SIvan Ilchenko if (ret != 0) 33476f51deb9SIvan Ilchenko return; 33486f51deb9SIvan Ilchenko 33493ce690d3SBruce Richardson if (nb_rxq > 1) { 3350013af9b6SIntel port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL; 335190892962SQi Zhang port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 3352422515b9SAdrien Mazarguil rss_hf & port->dev_info.flow_type_rss_offloads; 3353af75078fSIntel } else { 3354013af9b6SIntel port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL; 3355013af9b6SIntel port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0; 3356af75078fSIntel } 33573ce690d3SBruce Richardson 33585f592039SJingjing Wu if (port->dcb_flag == 0) { 33593ce690d3SBruce Richardson if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0) 3360f9295aa2SXiaoyu Min port->dev_conf.rxmode.mq_mode = 3361f9295aa2SXiaoyu Min (enum rte_eth_rx_mq_mode) 3362f9295aa2SXiaoyu Min (rx_mq_mode & ETH_MQ_RX_RSS); 33633ce690d3SBruce Richardson else 33643ce690d3SBruce Richardson port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE; 33653ce690d3SBruce Richardson } 33663ce690d3SBruce Richardson 3367f2c5125aSPablo de Lara rxtx_port_config(port); 3368013af9b6SIntel 3369a5279d25SIgor Romanov ret = eth_macaddr_get_print_err(pid, &port->eth_addr); 3370a5279d25SIgor Romanov if (ret != 0) 3371a5279d25SIgor Romanov return; 3372013af9b6SIntel 3373013af9b6SIntel map_port_queue_stats_mapping_registers(pid, port); 337450c4440eSThomas Monjalon #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS 3375e261265eSRadu Nicolau rte_pmd_ixgbe_bypass_init(pid); 33767b7e5ba7SIntel #endif 33778ea656f8SGaetan Rivet 33788ea656f8SGaetan Rivet if (lsc_interrupt && 33798ea656f8SGaetan Rivet (rte_eth_devices[pid].data->dev_flags & 33808ea656f8SGaetan Rivet RTE_ETH_DEV_INTR_LSC)) 33818ea656f8SGaetan Rivet port->dev_conf.intr_conf.lsc = 1; 3382284c908cSGaetan Rivet if (rmv_interrupt && 3383284c908cSGaetan Rivet (rte_eth_devices[pid].data->dev_flags & 3384284c908cSGaetan Rivet RTE_ETH_DEV_INTR_RMV)) 3385284c908cSGaetan Rivet port->dev_conf.intr_conf.rmv = 1; 3386013af9b6SIntel } 3387013af9b6SIntel } 3388013af9b6SIntel 338941b05095SBernard Iremonger void set_port_slave_flag(portid_t slave_pid) 339041b05095SBernard Iremonger { 339141b05095SBernard Iremonger struct rte_port *port; 339241b05095SBernard Iremonger 339341b05095SBernard Iremonger port = &ports[slave_pid]; 339441b05095SBernard Iremonger port->slave_flag = 1; 339541b05095SBernard Iremonger } 339641b05095SBernard Iremonger 339741b05095SBernard Iremonger void clear_port_slave_flag(portid_t slave_pid) 339841b05095SBernard Iremonger { 339941b05095SBernard Iremonger struct rte_port *port; 340041b05095SBernard Iremonger 340141b05095SBernard Iremonger port = &ports[slave_pid]; 340241b05095SBernard Iremonger port->slave_flag = 0; 340341b05095SBernard Iremonger } 340441b05095SBernard Iremonger 34050e545d30SBernard Iremonger uint8_t port_is_bonding_slave(portid_t slave_pid) 34060e545d30SBernard Iremonger { 34070e545d30SBernard Iremonger struct rte_port *port; 34080e545d30SBernard Iremonger 34090e545d30SBernard Iremonger port = &ports[slave_pid]; 3410b8b8b344SMatan Azrad if ((rte_eth_devices[slave_pid].data->dev_flags & 3411b8b8b344SMatan Azrad RTE_ETH_DEV_BONDED_SLAVE) || (port->slave_flag == 1)) 3412b8b8b344SMatan Azrad return 1; 3413b8b8b344SMatan Azrad return 0; 34140e545d30SBernard Iremonger } 34150e545d30SBernard Iremonger 3416013af9b6SIntel const uint16_t vlan_tags[] = { 3417013af9b6SIntel 0, 1, 2, 3, 4, 5, 6, 7, 3418013af9b6SIntel 8, 9, 10, 11, 12, 13, 14, 15, 3419013af9b6SIntel 16, 17, 18, 19, 20, 21, 22, 23, 3420013af9b6SIntel 24, 25, 26, 27, 28, 29, 30, 31 3421013af9b6SIntel }; 3422013af9b6SIntel 3423013af9b6SIntel static int 3424ac7c491cSKonstantin Ananyev get_eth_dcb_conf(portid_t pid, struct rte_eth_conf *eth_conf, 34251a572499SJingjing Wu enum dcb_mode_enable dcb_mode, 34261a572499SJingjing Wu enum rte_eth_nb_tcs num_tcs, 34271a572499SJingjing Wu uint8_t pfc_en) 3428013af9b6SIntel { 3429013af9b6SIntel uint8_t i; 3430ac7c491cSKonstantin Ananyev int32_t rc; 3431ac7c491cSKonstantin Ananyev struct rte_eth_rss_conf rss_conf; 3432af75078fSIntel 3433af75078fSIntel /* 3434013af9b6SIntel * Builds up the correct configuration for dcb+vt based on the vlan tags array 3435013af9b6SIntel * given above, and the number of traffic classes available for use. 3436af75078fSIntel */ 34371a572499SJingjing Wu if (dcb_mode == DCB_VT_ENABLED) { 34381a572499SJingjing Wu struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf = 34391a572499SJingjing Wu ð_conf->rx_adv_conf.vmdq_dcb_conf; 34401a572499SJingjing Wu struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf = 34411a572499SJingjing Wu ð_conf->tx_adv_conf.vmdq_dcb_tx_conf; 3442013af9b6SIntel 3443547d946cSNirmoy Das /* VMDQ+DCB RX and TX configurations */ 34441a572499SJingjing Wu vmdq_rx_conf->enable_default_pool = 0; 34451a572499SJingjing Wu vmdq_rx_conf->default_pool = 0; 34461a572499SJingjing Wu vmdq_rx_conf->nb_queue_pools = 34471a572499SJingjing Wu (num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS); 34481a572499SJingjing Wu vmdq_tx_conf->nb_queue_pools = 34491a572499SJingjing Wu (num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS); 3450013af9b6SIntel 34511a572499SJingjing Wu vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools; 34521a572499SJingjing Wu for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) { 34531a572499SJingjing Wu vmdq_rx_conf->pool_map[i].vlan_id = vlan_tags[i]; 34541a572499SJingjing Wu vmdq_rx_conf->pool_map[i].pools = 34551a572499SJingjing Wu 1 << (i % vmdq_rx_conf->nb_queue_pools); 3456af75078fSIntel } 3457013af9b6SIntel for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) { 3458f59908feSWei Dai vmdq_rx_conf->dcb_tc[i] = i % num_tcs; 3459f59908feSWei Dai vmdq_tx_conf->dcb_tc[i] = i % num_tcs; 3460013af9b6SIntel } 3461013af9b6SIntel 3462013af9b6SIntel /* set DCB mode of RX and TX of multiple queues */ 3463f9295aa2SXiaoyu Min eth_conf->rxmode.mq_mode = 3464f9295aa2SXiaoyu Min (enum rte_eth_rx_mq_mode) 3465f9295aa2SXiaoyu Min (rx_mq_mode & ETH_MQ_RX_VMDQ_DCB); 346632e7aa0bSIntel eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB; 34671a572499SJingjing Wu } else { 34681a572499SJingjing Wu struct rte_eth_dcb_rx_conf *rx_conf = 34691a572499SJingjing Wu ð_conf->rx_adv_conf.dcb_rx_conf; 34701a572499SJingjing Wu struct rte_eth_dcb_tx_conf *tx_conf = 34711a572499SJingjing Wu ð_conf->tx_adv_conf.dcb_tx_conf; 3472013af9b6SIntel 34735139bc12STing Xu memset(&rss_conf, 0, sizeof(struct rte_eth_rss_conf)); 34745139bc12STing Xu 3475ac7c491cSKonstantin Ananyev rc = rte_eth_dev_rss_hash_conf_get(pid, &rss_conf); 3476ac7c491cSKonstantin Ananyev if (rc != 0) 3477ac7c491cSKonstantin Ananyev return rc; 3478ac7c491cSKonstantin Ananyev 34791a572499SJingjing Wu rx_conf->nb_tcs = num_tcs; 34801a572499SJingjing Wu tx_conf->nb_tcs = num_tcs; 34811a572499SJingjing Wu 3482bcd0e432SJingjing Wu for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) { 3483bcd0e432SJingjing Wu rx_conf->dcb_tc[i] = i % num_tcs; 3484bcd0e432SJingjing Wu tx_conf->dcb_tc[i] = i % num_tcs; 3485013af9b6SIntel } 3486ac7c491cSKonstantin Ananyev 3487f9295aa2SXiaoyu Min eth_conf->rxmode.mq_mode = 3488f9295aa2SXiaoyu Min (enum rte_eth_rx_mq_mode) 3489f9295aa2SXiaoyu Min (rx_mq_mode & ETH_MQ_RX_DCB_RSS); 3490ac7c491cSKonstantin Ananyev eth_conf->rx_adv_conf.rss_conf = rss_conf; 349132e7aa0bSIntel eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB; 34921a572499SJingjing Wu } 34931a572499SJingjing Wu 34941a572499SJingjing Wu if (pfc_en) 34951a572499SJingjing Wu eth_conf->dcb_capability_en = 34961a572499SJingjing Wu ETH_DCB_PG_SUPPORT | ETH_DCB_PFC_SUPPORT; 3497013af9b6SIntel else 3498013af9b6SIntel eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT; 3499013af9b6SIntel 3500013af9b6SIntel return 0; 3501013af9b6SIntel } 3502013af9b6SIntel 3503013af9b6SIntel int 35041a572499SJingjing Wu init_port_dcb_config(portid_t pid, 35051a572499SJingjing Wu enum dcb_mode_enable dcb_mode, 35061a572499SJingjing Wu enum rte_eth_nb_tcs num_tcs, 35071a572499SJingjing Wu uint8_t pfc_en) 3508013af9b6SIntel { 3509013af9b6SIntel struct rte_eth_conf port_conf; 3510013af9b6SIntel struct rte_port *rte_port; 3511013af9b6SIntel int retval; 3512013af9b6SIntel uint16_t i; 3513013af9b6SIntel 35142a977b89SWenzhuo Lu rte_port = &ports[pid]; 3515013af9b6SIntel 3516013af9b6SIntel memset(&port_conf, 0, sizeof(struct rte_eth_conf)); 3517013af9b6SIntel /* Enter DCB configuration status */ 3518013af9b6SIntel dcb_config = 1; 3519013af9b6SIntel 3520d5354e89SYanglong Wu port_conf.rxmode = rte_port->dev_conf.rxmode; 3521d5354e89SYanglong Wu port_conf.txmode = rte_port->dev_conf.txmode; 3522d5354e89SYanglong Wu 3523013af9b6SIntel /*set configuration of DCB in vt mode and DCB in non-vt mode*/ 3524ac7c491cSKonstantin Ananyev retval = get_eth_dcb_conf(pid, &port_conf, dcb_mode, num_tcs, pfc_en); 3525013af9b6SIntel if (retval < 0) 3526013af9b6SIntel return retval; 35270074d02fSShahaf Shuler port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER; 3528013af9b6SIntel 35292f203d44SQi Zhang /* re-configure the device . */ 35302b0e0ebaSChenbo Xia retval = rte_eth_dev_configure(pid, nb_rxq, nb_rxq, &port_conf); 35312b0e0ebaSChenbo Xia if (retval < 0) 35322b0e0ebaSChenbo Xia return retval; 35336f51deb9SIvan Ilchenko 35346f51deb9SIvan Ilchenko retval = eth_dev_info_get_print_err(pid, &rte_port->dev_info); 35356f51deb9SIvan Ilchenko if (retval != 0) 35366f51deb9SIvan Ilchenko return retval; 35372a977b89SWenzhuo Lu 35382a977b89SWenzhuo Lu /* If dev_info.vmdq_pool_base is greater than 0, 35392a977b89SWenzhuo Lu * the queue id of vmdq pools is started after pf queues. 35402a977b89SWenzhuo Lu */ 35412a977b89SWenzhuo Lu if (dcb_mode == DCB_VT_ENABLED && 35422a977b89SWenzhuo Lu rte_port->dev_info.vmdq_pool_base > 0) { 35432a977b89SWenzhuo Lu printf("VMDQ_DCB multi-queue mode is nonsensical" 35442a977b89SWenzhuo Lu " for port %d.", pid); 35452a977b89SWenzhuo Lu return -1; 35462a977b89SWenzhuo Lu } 35472a977b89SWenzhuo Lu 35482a977b89SWenzhuo Lu /* Assume the ports in testpmd have the same dcb capability 35492a977b89SWenzhuo Lu * and has the same number of rxq and txq in dcb mode 35502a977b89SWenzhuo Lu */ 35512a977b89SWenzhuo Lu if (dcb_mode == DCB_VT_ENABLED) { 355286ef65eeSBernard Iremonger if (rte_port->dev_info.max_vfs > 0) { 355386ef65eeSBernard Iremonger nb_rxq = rte_port->dev_info.nb_rx_queues; 355486ef65eeSBernard Iremonger nb_txq = rte_port->dev_info.nb_tx_queues; 355586ef65eeSBernard Iremonger } else { 35562a977b89SWenzhuo Lu nb_rxq = rte_port->dev_info.max_rx_queues; 35572a977b89SWenzhuo Lu nb_txq = rte_port->dev_info.max_tx_queues; 355886ef65eeSBernard Iremonger } 35592a977b89SWenzhuo Lu } else { 35602a977b89SWenzhuo Lu /*if vt is disabled, use all pf queues */ 35612a977b89SWenzhuo Lu if (rte_port->dev_info.vmdq_pool_base == 0) { 35622a977b89SWenzhuo Lu nb_rxq = rte_port->dev_info.max_rx_queues; 35632a977b89SWenzhuo Lu nb_txq = rte_port->dev_info.max_tx_queues; 35642a977b89SWenzhuo Lu } else { 35652a977b89SWenzhuo Lu nb_rxq = (queueid_t)num_tcs; 35662a977b89SWenzhuo Lu nb_txq = (queueid_t)num_tcs; 35672a977b89SWenzhuo Lu 35682a977b89SWenzhuo Lu } 35692a977b89SWenzhuo Lu } 35702a977b89SWenzhuo Lu rx_free_thresh = 64; 35712a977b89SWenzhuo Lu 3572013af9b6SIntel memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf)); 3573013af9b6SIntel 3574f2c5125aSPablo de Lara rxtx_port_config(rte_port); 3575013af9b6SIntel /* VLAN filter */ 35760074d02fSShahaf Shuler rte_port->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER; 35771a572499SJingjing Wu for (i = 0; i < RTE_DIM(vlan_tags); i++) 3578013af9b6SIntel rx_vft_set(pid, vlan_tags[i], 1); 3579013af9b6SIntel 3580a5279d25SIgor Romanov retval = eth_macaddr_get_print_err(pid, &rte_port->eth_addr); 3581a5279d25SIgor Romanov if (retval != 0) 3582a5279d25SIgor Romanov return retval; 3583a5279d25SIgor Romanov 3584013af9b6SIntel map_port_queue_stats_mapping_registers(pid, rte_port); 3585013af9b6SIntel 35867741e4cfSIntel rte_port->dcb_flag = 1; 35877741e4cfSIntel 3588013af9b6SIntel return 0; 3589af75078fSIntel } 3590af75078fSIntel 3591ffc468ffSTetsuya Mukawa static void 3592ffc468ffSTetsuya Mukawa init_port(void) 3593ffc468ffSTetsuya Mukawa { 3594ffc468ffSTetsuya Mukawa /* Configuration of Ethernet ports. */ 3595ffc468ffSTetsuya Mukawa ports = rte_zmalloc("testpmd: ports", 3596ffc468ffSTetsuya Mukawa sizeof(struct rte_port) * RTE_MAX_ETHPORTS, 3597ffc468ffSTetsuya Mukawa RTE_CACHE_LINE_SIZE); 3598ffc468ffSTetsuya Mukawa if (ports == NULL) { 3599ffc468ffSTetsuya Mukawa rte_exit(EXIT_FAILURE, 3600ffc468ffSTetsuya Mukawa "rte_zmalloc(%d struct rte_port) failed\n", 3601ffc468ffSTetsuya Mukawa RTE_MAX_ETHPORTS); 3602ffc468ffSTetsuya Mukawa } 360329841336SPhil Yang 360429841336SPhil Yang /* Initialize ports NUMA structures */ 360529841336SPhil Yang memset(port_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS); 360629841336SPhil Yang memset(rxring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS); 360729841336SPhil Yang memset(txring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS); 3608ffc468ffSTetsuya Mukawa } 3609ffc468ffSTetsuya Mukawa 3610d3a274ceSZhihong Wang static void 3611d3a274ceSZhihong Wang force_quit(void) 3612d3a274ceSZhihong Wang { 3613d3a274ceSZhihong Wang pmd_test_exit(); 3614d3a274ceSZhihong Wang prompt_exit(); 3615d3a274ceSZhihong Wang } 3616d3a274ceSZhihong Wang 3617d3a274ceSZhihong Wang static void 3618cfea1f30SPablo de Lara print_stats(void) 3619cfea1f30SPablo de Lara { 3620cfea1f30SPablo de Lara uint8_t i; 3621cfea1f30SPablo de Lara const char clr[] = { 27, '[', '2', 'J', '\0' }; 3622cfea1f30SPablo de Lara const char top_left[] = { 27, '[', '1', ';', '1', 'H', '\0' }; 3623cfea1f30SPablo de Lara 3624cfea1f30SPablo de Lara /* Clear screen and move to top left */ 3625cfea1f30SPablo de Lara printf("%s%s", clr, top_left); 3626cfea1f30SPablo de Lara 3627cfea1f30SPablo de Lara printf("\nPort statistics ===================================="); 3628cfea1f30SPablo de Lara for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) 3629cfea1f30SPablo de Lara nic_stats_display(fwd_ports_ids[i]); 3630683d1e82SIgor Romanov 3631683d1e82SIgor Romanov fflush(stdout); 3632cfea1f30SPablo de Lara } 3633cfea1f30SPablo de Lara 3634cfea1f30SPablo de Lara static void 3635d3a274ceSZhihong Wang signal_handler(int signum) 3636d3a274ceSZhihong Wang { 3637d3a274ceSZhihong Wang if (signum == SIGINT || signum == SIGTERM) { 3638d3a274ceSZhihong Wang printf("\nSignal %d received, preparing to exit...\n", 3639d3a274ceSZhihong Wang signum); 3640102b7329SReshma Pattan #ifdef RTE_LIBRTE_PDUMP 3641102b7329SReshma Pattan /* uninitialize packet capture framework */ 3642102b7329SReshma Pattan rte_pdump_uninit(); 3643102b7329SReshma Pattan #endif 364462d3216dSReshma Pattan #ifdef RTE_LIBRTE_LATENCY_STATS 36458b36297dSAmit Gupta if (latencystats_enabled != 0) 364662d3216dSReshma Pattan rte_latencystats_uninit(); 364762d3216dSReshma Pattan #endif 3648d3a274ceSZhihong Wang force_quit(); 3649d9a191a0SPhil Yang /* Set flag to indicate the force termination. */ 3650d9a191a0SPhil Yang f_quit = 1; 3651d3a274ceSZhihong Wang /* exit with the expected status */ 3652d3a274ceSZhihong Wang signal(signum, SIG_DFL); 3653d3a274ceSZhihong Wang kill(getpid(), signum); 3654d3a274ceSZhihong Wang } 3655d3a274ceSZhihong Wang } 3656d3a274ceSZhihong Wang 3657af75078fSIntel int 3658af75078fSIntel main(int argc, char** argv) 3659af75078fSIntel { 3660af75078fSIntel int diag; 3661f8244c63SZhiyong Yang portid_t port_id; 36624918a357SXiaoyun Li uint16_t count; 3663fb73e096SJeff Guo int ret; 3664af75078fSIntel 3665d3a274ceSZhihong Wang signal(SIGINT, signal_handler); 3666d3a274ceSZhihong Wang signal(SIGTERM, signal_handler); 3667d3a274ceSZhihong Wang 3668285fd101SOlivier Matz testpmd_logtype = rte_log_register("testpmd"); 3669285fd101SOlivier Matz if (testpmd_logtype < 0) 367016267ceeSStephen Hemminger rte_exit(EXIT_FAILURE, "Cannot register log type"); 3671285fd101SOlivier Matz rte_log_set_level(testpmd_logtype, RTE_LOG_DEBUG); 3672285fd101SOlivier Matz 36739201806eSStephen Hemminger diag = rte_eal_init(argc, argv); 36749201806eSStephen Hemminger if (diag < 0) 367516267ceeSStephen Hemminger rte_exit(EXIT_FAILURE, "Cannot init EAL: %s\n", 367616267ceeSStephen Hemminger rte_strerror(rte_errno)); 36779201806eSStephen Hemminger 3678a87ab9f7SStephen Hemminger if (rte_eal_process_type() == RTE_PROC_SECONDARY) 367916267ceeSStephen Hemminger rte_exit(EXIT_FAILURE, 368016267ceeSStephen Hemminger "Secondary process type not supported.\n"); 3681a87ab9f7SStephen Hemminger 368297b5d8b5SThomas Monjalon ret = register_eth_event_callback(); 368397b5d8b5SThomas Monjalon if (ret != 0) 368416267ceeSStephen Hemminger rte_exit(EXIT_FAILURE, "Cannot register for ethdev events"); 368597b5d8b5SThomas Monjalon 36864aa0d012SAnatoly Burakov #ifdef RTE_LIBRTE_PDUMP 36874aa0d012SAnatoly Burakov /* initialize packet capture framework */ 3688e9436f54STiwei Bie rte_pdump_init(); 36894aa0d012SAnatoly Burakov #endif 36904aa0d012SAnatoly Burakov 36914918a357SXiaoyun Li count = 0; 36924918a357SXiaoyun Li RTE_ETH_FOREACH_DEV(port_id) { 36934918a357SXiaoyun Li ports_ids[count] = port_id; 36944918a357SXiaoyun Li count++; 36954918a357SXiaoyun Li } 36964918a357SXiaoyun Li nb_ports = (portid_t) count; 36974aa0d012SAnatoly Burakov if (nb_ports == 0) 36984aa0d012SAnatoly Burakov TESTPMD_LOG(WARNING, "No probed ethernet devices\n"); 36994aa0d012SAnatoly Burakov 37004aa0d012SAnatoly Burakov /* allocate port structures, and init them */ 37014aa0d012SAnatoly Burakov init_port(); 37024aa0d012SAnatoly Burakov 37034aa0d012SAnatoly Burakov set_def_fwd_config(); 37044aa0d012SAnatoly Burakov if (nb_lcores == 0) 370516267ceeSStephen Hemminger rte_exit(EXIT_FAILURE, "No cores defined for forwarding\n" 370616267ceeSStephen Hemminger "Check the core mask argument\n"); 37074aa0d012SAnatoly Burakov 3708e505d84cSAnatoly Burakov /* Bitrate/latency stats disabled by default */ 3709e505d84cSAnatoly Burakov #ifdef RTE_LIBRTE_BITRATE 3710e505d84cSAnatoly Burakov bitrate_enabled = 0; 3711e505d84cSAnatoly Burakov #endif 3712e505d84cSAnatoly Burakov #ifdef RTE_LIBRTE_LATENCY_STATS 3713e505d84cSAnatoly Burakov latencystats_enabled = 0; 3714e505d84cSAnatoly Burakov #endif 3715e505d84cSAnatoly Burakov 3716fb7b8b32SAnatoly Burakov /* on FreeBSD, mlockall() is disabled by default */ 37175fbc1d49SBruce Richardson #ifdef RTE_EXEC_ENV_FREEBSD 3718fb7b8b32SAnatoly Burakov do_mlockall = 0; 3719fb7b8b32SAnatoly Burakov #else 3720fb7b8b32SAnatoly Burakov do_mlockall = 1; 3721fb7b8b32SAnatoly Burakov #endif 3722fb7b8b32SAnatoly Burakov 3723e505d84cSAnatoly Burakov argc -= diag; 3724e505d84cSAnatoly Burakov argv += diag; 3725e505d84cSAnatoly Burakov if (argc > 1) 3726e505d84cSAnatoly Burakov launch_args_parse(argc, argv); 3727e505d84cSAnatoly Burakov 3728e505d84cSAnatoly Burakov if (do_mlockall && mlockall(MCL_CURRENT | MCL_FUTURE)) { 3729285fd101SOlivier Matz TESTPMD_LOG(NOTICE, "mlockall() failed with error \"%s\"\n", 37301c036b16SEelco Chaudron strerror(errno)); 37311c036b16SEelco Chaudron } 37321c036b16SEelco Chaudron 373399cabef0SPablo de Lara if (tx_first && interactive) 373499cabef0SPablo de Lara rte_exit(EXIT_FAILURE, "--tx-first cannot be used on " 373599cabef0SPablo de Lara "interactive mode.\n"); 37368820cba4SDavid Hunt 37378820cba4SDavid Hunt if (tx_first && lsc_interrupt) { 37388820cba4SDavid Hunt printf("Warning: lsc_interrupt needs to be off when " 37398820cba4SDavid Hunt " using tx_first. Disabling.\n"); 37408820cba4SDavid Hunt lsc_interrupt = 0; 37418820cba4SDavid Hunt } 37428820cba4SDavid Hunt 37435a8fb55cSReshma Pattan if (!nb_rxq && !nb_txq) 37445a8fb55cSReshma Pattan printf("Warning: Either rx or tx queues should be non-zero\n"); 37455a8fb55cSReshma Pattan 37465a8fb55cSReshma Pattan if (nb_rxq > 1 && nb_rxq > nb_txq) 3747af75078fSIntel printf("Warning: nb_rxq=%d enables RSS configuration, " 3748af75078fSIntel "but nb_txq=%d will prevent to fully test it.\n", 3749af75078fSIntel nb_rxq, nb_txq); 3750af75078fSIntel 3751af75078fSIntel init_config(); 3752fb73e096SJeff Guo 3753fb73e096SJeff Guo if (hot_plug) { 37542049c511SJeff Guo ret = rte_dev_hotplug_handle_enable(); 3755fb73e096SJeff Guo if (ret) { 37562049c511SJeff Guo RTE_LOG(ERR, EAL, 37572049c511SJeff Guo "fail to enable hotplug handling."); 3758fb73e096SJeff Guo return -1; 3759fb73e096SJeff Guo } 3760fb73e096SJeff Guo 37612049c511SJeff Guo ret = rte_dev_event_monitor_start(); 37622049c511SJeff Guo if (ret) { 37632049c511SJeff Guo RTE_LOG(ERR, EAL, 37642049c511SJeff Guo "fail to start device event monitoring."); 37652049c511SJeff Guo return -1; 37662049c511SJeff Guo } 37672049c511SJeff Guo 37682049c511SJeff Guo ret = rte_dev_event_callback_register(NULL, 3769cc1bf307SJeff Guo dev_event_callback, NULL); 37702049c511SJeff Guo if (ret) { 37712049c511SJeff Guo RTE_LOG(ERR, EAL, 37722049c511SJeff Guo "fail to register device event callback\n"); 37732049c511SJeff Guo return -1; 37742049c511SJeff Guo } 3775fb73e096SJeff Guo } 3776fb73e096SJeff Guo 37776937d210SStephen Hemminger if (!no_device_start && start_port(RTE_PORT_ALL) != 0) 3778148f963fSBruce Richardson rte_exit(EXIT_FAILURE, "Start ports failed\n"); 3779af75078fSIntel 3780ce8d5614SIntel /* set all ports to promiscuous mode by default */ 378134fc1051SIvan Ilchenko RTE_ETH_FOREACH_DEV(port_id) { 378234fc1051SIvan Ilchenko ret = rte_eth_promiscuous_enable(port_id); 378334fc1051SIvan Ilchenko if (ret != 0) 378434fc1051SIvan Ilchenko printf("Error during enabling promiscuous mode for port %u: %s - ignore\n", 378534fc1051SIvan Ilchenko port_id, rte_strerror(-ret)); 378634fc1051SIvan Ilchenko } 3787af75078fSIntel 37887e4441c8SRemy Horton /* Init metrics library */ 37897e4441c8SRemy Horton rte_metrics_init(rte_socket_id()); 37907e4441c8SRemy Horton 379162d3216dSReshma Pattan #ifdef RTE_LIBRTE_LATENCY_STATS 379262d3216dSReshma Pattan if (latencystats_enabled != 0) { 379362d3216dSReshma Pattan int ret = rte_latencystats_init(1, NULL); 379462d3216dSReshma Pattan if (ret) 379562d3216dSReshma Pattan printf("Warning: latencystats init()" 379662d3216dSReshma Pattan " returned error %d\n", ret); 379762d3216dSReshma Pattan printf("Latencystats running on lcore %d\n", 379862d3216dSReshma Pattan latencystats_lcore_id); 379962d3216dSReshma Pattan } 380062d3216dSReshma Pattan #endif 380162d3216dSReshma Pattan 38027e4441c8SRemy Horton /* Setup bitrate stats */ 38037e4441c8SRemy Horton #ifdef RTE_LIBRTE_BITRATE 3804e25e6c70SRemy Horton if (bitrate_enabled != 0) { 38057e4441c8SRemy Horton bitrate_data = rte_stats_bitrate_create(); 38067e4441c8SRemy Horton if (bitrate_data == NULL) 3807e25e6c70SRemy Horton rte_exit(EXIT_FAILURE, 3808e25e6c70SRemy Horton "Could not allocate bitrate data.\n"); 38097e4441c8SRemy Horton rte_stats_bitrate_reg(bitrate_data); 3810e25e6c70SRemy Horton } 38117e4441c8SRemy Horton #endif 38127e4441c8SRemy Horton 38130d56cb81SThomas Monjalon #ifdef RTE_LIBRTE_CMDLINE 381481ef862bSAllain Legacy if (strlen(cmdline_filename) != 0) 381581ef862bSAllain Legacy cmdline_read_from_file(cmdline_filename); 381681ef862bSAllain Legacy 3817ca7feb22SCyril Chemparathy if (interactive == 1) { 3818ca7feb22SCyril Chemparathy if (auto_start) { 3819ca7feb22SCyril Chemparathy printf("Start automatic packet forwarding\n"); 3820ca7feb22SCyril Chemparathy start_packet_forwarding(0); 3821ca7feb22SCyril Chemparathy } 3822af75078fSIntel prompt(); 38230de738cfSJiayu Hu pmd_test_exit(); 3824ca7feb22SCyril Chemparathy } else 38250d56cb81SThomas Monjalon #endif 38260d56cb81SThomas Monjalon { 3827af75078fSIntel char c; 3828af75078fSIntel int rc; 3829af75078fSIntel 3830d9a191a0SPhil Yang f_quit = 0; 3831d9a191a0SPhil Yang 3832af75078fSIntel printf("No commandline core given, start packet forwarding\n"); 383399cabef0SPablo de Lara start_packet_forwarding(tx_first); 3834cfea1f30SPablo de Lara if (stats_period != 0) { 3835cfea1f30SPablo de Lara uint64_t prev_time = 0, cur_time, diff_time = 0; 3836cfea1f30SPablo de Lara uint64_t timer_period; 3837cfea1f30SPablo de Lara 3838cfea1f30SPablo de Lara /* Convert to number of cycles */ 3839cfea1f30SPablo de Lara timer_period = stats_period * rte_get_timer_hz(); 3840cfea1f30SPablo de Lara 3841d9a191a0SPhil Yang while (f_quit == 0) { 3842cfea1f30SPablo de Lara cur_time = rte_get_timer_cycles(); 3843cfea1f30SPablo de Lara diff_time += cur_time - prev_time; 3844cfea1f30SPablo de Lara 3845cfea1f30SPablo de Lara if (diff_time >= timer_period) { 3846cfea1f30SPablo de Lara print_stats(); 3847cfea1f30SPablo de Lara /* Reset the timer */ 3848cfea1f30SPablo de Lara diff_time = 0; 3849cfea1f30SPablo de Lara } 3850cfea1f30SPablo de Lara /* Sleep to avoid unnecessary checks */ 3851cfea1f30SPablo de Lara prev_time = cur_time; 3852cfea1f30SPablo de Lara sleep(1); 3853cfea1f30SPablo de Lara } 3854cfea1f30SPablo de Lara } 3855cfea1f30SPablo de Lara 3856af75078fSIntel printf("Press enter to exit\n"); 3857af75078fSIntel rc = read(0, &c, 1); 3858d3a274ceSZhihong Wang pmd_test_exit(); 3859af75078fSIntel if (rc < 0) 3860af75078fSIntel return 1; 3861af75078fSIntel } 3862af75078fSIntel 38635e516c89SStephen Hemminger ret = rte_eal_cleanup(); 38645e516c89SStephen Hemminger if (ret != 0) 38655e516c89SStephen Hemminger rte_exit(EXIT_FAILURE, 38665e516c89SStephen Hemminger "EAL cleanup failed: %s\n", strerror(-ret)); 38675e516c89SStephen Hemminger 38685e516c89SStephen Hemminger return EXIT_SUCCESS; 3869af75078fSIntel } 3870