1174a1631SBruce Richardson /* SPDX-License-Identifier: BSD-3-Clause 2174a1631SBruce Richardson * Copyright(c) 2010-2017 Intel Corporation 3af75078fSIntel */ 4af75078fSIntel 5af75078fSIntel #include <stdarg.h> 6af75078fSIntel #include <stdio.h> 7af75078fSIntel #include <stdlib.h> 8af75078fSIntel #include <signal.h> 9af75078fSIntel #include <string.h> 10af75078fSIntel #include <time.h> 11af75078fSIntel #include <fcntl.h> 121c036b16SEelco Chaudron #include <sys/mman.h> 13af75078fSIntel #include <sys/types.h> 14af75078fSIntel #include <errno.h> 15fb73e096SJeff Guo #include <stdbool.h> 16af75078fSIntel 17af75078fSIntel #include <sys/queue.h> 18af75078fSIntel #include <sys/stat.h> 19af75078fSIntel 20af75078fSIntel #include <stdint.h> 21af75078fSIntel #include <unistd.h> 22af75078fSIntel #include <inttypes.h> 23af75078fSIntel 24af75078fSIntel #include <rte_common.h> 25d1eb542eSOlivier Matz #include <rte_errno.h> 26af75078fSIntel #include <rte_byteorder.h> 27af75078fSIntel #include <rte_log.h> 28af75078fSIntel #include <rte_debug.h> 29af75078fSIntel #include <rte_cycles.h> 30c7f5dba7SAnatoly Burakov #include <rte_malloc_heap.h> 31af75078fSIntel #include <rte_memory.h> 32af75078fSIntel #include <rte_memcpy.h> 33af75078fSIntel #include <rte_launch.h> 34af75078fSIntel #include <rte_eal.h> 35284c908cSGaetan Rivet #include <rte_alarm.h> 36af75078fSIntel #include <rte_per_lcore.h> 37af75078fSIntel #include <rte_lcore.h> 38af75078fSIntel #include <rte_atomic.h> 39af75078fSIntel #include <rte_branch_prediction.h> 40af75078fSIntel #include <rte_mempool.h> 41af75078fSIntel #include <rte_malloc.h> 42af75078fSIntel #include <rte_mbuf.h> 430e798567SPavan Nikhilesh #include <rte_mbuf_pool_ops.h> 44af75078fSIntel #include <rte_interrupts.h> 45af75078fSIntel #include <rte_pci.h> 46af75078fSIntel #include <rte_ether.h> 47af75078fSIntel #include <rte_ethdev.h> 48edab33b1STetsuya Mukawa #include <rte_dev.h> 49af75078fSIntel #include <rte_string_fns.h> 50e261265eSRadu Nicolau #ifdef RTE_LIBRTE_IXGBE_PMD 51e261265eSRadu Nicolau #include <rte_pmd_ixgbe.h> 52e261265eSRadu Nicolau #endif 53102b7329SReshma Pattan #ifdef RTE_LIBRTE_PDUMP 54102b7329SReshma Pattan #include <rte_pdump.h> 55102b7329SReshma Pattan #endif 56938a184aSAdrien Mazarguil #include <rte_flow.h> 577e4441c8SRemy Horton #include <rte_metrics.h> 587e4441c8SRemy Horton #ifdef RTE_LIBRTE_BITRATE 597e4441c8SRemy Horton #include <rte_bitrate.h> 607e4441c8SRemy Horton #endif 6162d3216dSReshma Pattan #ifdef RTE_LIBRTE_LATENCY_STATS 6262d3216dSReshma Pattan #include <rte_latencystats.h> 6362d3216dSReshma Pattan #endif 64af75078fSIntel 65af75078fSIntel #include "testpmd.h" 66af75078fSIntel 67c7f5dba7SAnatoly Burakov #ifndef MAP_HUGETLB 68c7f5dba7SAnatoly Burakov /* FreeBSD may not have MAP_HUGETLB (in fact, it probably doesn't) */ 69c7f5dba7SAnatoly Burakov #define HUGE_FLAG (0x40000) 70c7f5dba7SAnatoly Burakov #else 71c7f5dba7SAnatoly Burakov #define HUGE_FLAG MAP_HUGETLB 72c7f5dba7SAnatoly Burakov #endif 73c7f5dba7SAnatoly Burakov 74c7f5dba7SAnatoly Burakov #ifndef MAP_HUGE_SHIFT 75c7f5dba7SAnatoly Burakov /* older kernels (or FreeBSD) will not have this define */ 76c7f5dba7SAnatoly Burakov #define HUGE_SHIFT (26) 77c7f5dba7SAnatoly Burakov #else 78c7f5dba7SAnatoly Burakov #define HUGE_SHIFT MAP_HUGE_SHIFT 79c7f5dba7SAnatoly Burakov #endif 80c7f5dba7SAnatoly Burakov 81c7f5dba7SAnatoly Burakov #define EXTMEM_HEAP_NAME "extmem" 82c7f5dba7SAnatoly Burakov 83af75078fSIntel uint16_t verbose_level = 0; /**< Silent by default. */ 84285fd101SOlivier Matz int testpmd_logtype; /**< Log type for testpmd logs */ 85af75078fSIntel 86af75078fSIntel /* use master core for command line ? */ 87af75078fSIntel uint8_t interactive = 0; 88ca7feb22SCyril Chemparathy uint8_t auto_start = 0; 8999cabef0SPablo de Lara uint8_t tx_first; 9081ef862bSAllain Legacy char cmdline_filename[PATH_MAX] = {0}; 91af75078fSIntel 92af75078fSIntel /* 93af75078fSIntel * NUMA support configuration. 94af75078fSIntel * When set, the NUMA support attempts to dispatch the allocation of the 95af75078fSIntel * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the 96af75078fSIntel * probed ports among the CPU sockets 0 and 1. 97af75078fSIntel * Otherwise, all memory is allocated from CPU socket 0. 98af75078fSIntel */ 99999b2ee0SBruce Richardson uint8_t numa_support = 1; /**< numa enabled by default */ 100af75078fSIntel 101af75078fSIntel /* 102b6ea6408SIntel * In UMA mode,all memory is allocated from socket 0 if --socket-num is 103b6ea6408SIntel * not configured. 104b6ea6408SIntel */ 105b6ea6408SIntel uint8_t socket_num = UMA_NO_CONFIG; 106b6ea6408SIntel 107b6ea6408SIntel /* 108c7f5dba7SAnatoly Burakov * Select mempool allocation type: 109c7f5dba7SAnatoly Burakov * - native: use regular DPDK memory 110c7f5dba7SAnatoly Burakov * - anon: use regular DPDK memory to create mempool, but populate using 111c7f5dba7SAnatoly Burakov * anonymous memory (may not be IOVA-contiguous) 112c7f5dba7SAnatoly Burakov * - xmem: use externally allocated hugepage memory 113148f963fSBruce Richardson */ 114c7f5dba7SAnatoly Burakov uint8_t mp_alloc_type = MP_ALLOC_NATIVE; 115148f963fSBruce Richardson 116148f963fSBruce Richardson /* 11763531389SGeorgios Katsikas * Store specified sockets on which memory pool to be used by ports 11863531389SGeorgios Katsikas * is allocated. 11963531389SGeorgios Katsikas */ 12063531389SGeorgios Katsikas uint8_t port_numa[RTE_MAX_ETHPORTS]; 12163531389SGeorgios Katsikas 12263531389SGeorgios Katsikas /* 12363531389SGeorgios Katsikas * Store specified sockets on which RX ring to be used by ports 12463531389SGeorgios Katsikas * is allocated. 12563531389SGeorgios Katsikas */ 12663531389SGeorgios Katsikas uint8_t rxring_numa[RTE_MAX_ETHPORTS]; 12763531389SGeorgios Katsikas 12863531389SGeorgios Katsikas /* 12963531389SGeorgios Katsikas * Store specified sockets on which TX ring to be used by ports 13063531389SGeorgios Katsikas * is allocated. 13163531389SGeorgios Katsikas */ 13263531389SGeorgios Katsikas uint8_t txring_numa[RTE_MAX_ETHPORTS]; 13363531389SGeorgios Katsikas 13463531389SGeorgios Katsikas /* 135af75078fSIntel * Record the Ethernet address of peer target ports to which packets are 136af75078fSIntel * forwarded. 137547d946cSNirmoy Das * Must be instantiated with the ethernet addresses of peer traffic generator 138af75078fSIntel * ports. 139af75078fSIntel */ 1406d13ea8eSOlivier Matz struct rte_ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS]; 141af75078fSIntel portid_t nb_peer_eth_addrs = 0; 142af75078fSIntel 143af75078fSIntel /* 144af75078fSIntel * Probed Target Environment. 145af75078fSIntel */ 146af75078fSIntel struct rte_port *ports; /**< For all probed ethernet ports. */ 147af75078fSIntel portid_t nb_ports; /**< Number of probed ethernet ports. */ 148af75078fSIntel struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */ 149af75078fSIntel lcoreid_t nb_lcores; /**< Number of probed logical cores. */ 150af75078fSIntel 1514918a357SXiaoyun Li portid_t ports_ids[RTE_MAX_ETHPORTS]; /**< Store all port ids. */ 1524918a357SXiaoyun Li 153af75078fSIntel /* 154af75078fSIntel * Test Forwarding Configuration. 155af75078fSIntel * nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores 156af75078fSIntel * nb_fwd_ports <= nb_cfg_ports <= nb_ports 157af75078fSIntel */ 158af75078fSIntel lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */ 159af75078fSIntel lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */ 160af75078fSIntel portid_t nb_cfg_ports; /**< Number of configured ports. */ 161af75078fSIntel portid_t nb_fwd_ports; /**< Number of forwarding ports. */ 162af75078fSIntel 163af75078fSIntel unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */ 164af75078fSIntel portid_t fwd_ports_ids[RTE_MAX_ETHPORTS]; /**< Port ids configuration. */ 165af75078fSIntel 166af75078fSIntel struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */ 167af75078fSIntel streamid_t nb_fwd_streams; /**< Is equal to (nb_ports * nb_rxq). */ 168af75078fSIntel 169af75078fSIntel /* 170af75078fSIntel * Forwarding engines. 171af75078fSIntel */ 172af75078fSIntel struct fwd_engine * fwd_engines[] = { 173af75078fSIntel &io_fwd_engine, 174af75078fSIntel &mac_fwd_engine, 175d47388f1SCyril Chemparathy &mac_swap_engine, 176e9e23a61SCyril Chemparathy &flow_gen_engine, 177af75078fSIntel &rx_only_engine, 178af75078fSIntel &tx_only_engine, 179af75078fSIntel &csum_fwd_engine, 180168dfa61SIvan Boule &icmp_echo_engine, 1813c156061SJens Freimann &noisy_vnf_engine, 1820ad778b3SJasvinder Singh #if defined RTE_LIBRTE_PMD_SOFTNIC 1830ad778b3SJasvinder Singh &softnic_fwd_engine, 1845b590fbeSJasvinder Singh #endif 185af75078fSIntel #ifdef RTE_LIBRTE_IEEE1588 186af75078fSIntel &ieee1588_fwd_engine, 187af75078fSIntel #endif 188af75078fSIntel NULL, 189af75078fSIntel }; 190af75078fSIntel 191401b744dSShahaf Shuler struct rte_mempool *mempools[RTE_MAX_NUMA_NODES]; 19259fcf854SShahaf Shuler uint16_t mempool_flags; 193401b744dSShahaf Shuler 194af75078fSIntel struct fwd_config cur_fwd_config; 195af75078fSIntel struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */ 196bf56fce1SZhihong Wang uint32_t retry_enabled; 197bf56fce1SZhihong Wang uint32_t burst_tx_delay_time = BURST_TX_WAIT_US; 198bf56fce1SZhihong Wang uint32_t burst_tx_retry_num = BURST_TX_RETRIES; 199af75078fSIntel 200af75078fSIntel uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */ 201c8798818SIntel uint32_t param_total_num_mbufs = 0; /**< number of mbufs in all pools - if 202c8798818SIntel * specified on command-line. */ 203cfea1f30SPablo de Lara uint16_t stats_period; /**< Period to show statistics (disabled by default) */ 204d9a191a0SPhil Yang 205d9a191a0SPhil Yang /* 206d9a191a0SPhil Yang * In container, it cannot terminate the process which running with 'stats-period' 207d9a191a0SPhil Yang * option. Set flag to exit stats period loop after received SIGINT/SIGTERM. 208d9a191a0SPhil Yang */ 209d9a191a0SPhil Yang uint8_t f_quit; 210d9a191a0SPhil Yang 211af75078fSIntel /* 212af75078fSIntel * Configuration of packet segments used by the "txonly" processing engine. 213af75078fSIntel */ 214af75078fSIntel uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */ 215af75078fSIntel uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = { 216af75078fSIntel TXONLY_DEF_PACKET_LEN, 217af75078fSIntel }; 218af75078fSIntel uint8_t tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */ 219af75078fSIntel 22079bec05bSKonstantin Ananyev enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF; 22179bec05bSKonstantin Ananyev /**< Split policy for packets to TX. */ 22279bec05bSKonstantin Ananyev 22382010ef5SYongseok Koh uint8_t txonly_multi_flow; 22482010ef5SYongseok Koh /**< Whether multiple flows are generated in TXONLY mode. */ 22582010ef5SYongseok Koh 226af75078fSIntel uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */ 227e9378bbcSCunming Liang uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */ 228af75078fSIntel 229900550deSIntel /* current configuration is in DCB or not,0 means it is not in DCB mode */ 230900550deSIntel uint8_t dcb_config = 0; 231900550deSIntel 232900550deSIntel /* Whether the dcb is in testing status */ 233900550deSIntel uint8_t dcb_test = 0; 234900550deSIntel 235af75078fSIntel /* 236af75078fSIntel * Configurable number of RX/TX queues. 237af75078fSIntel */ 238af75078fSIntel queueid_t nb_rxq = 1; /**< Number of RX queues per port. */ 239af75078fSIntel queueid_t nb_txq = 1; /**< Number of TX queues per port. */ 240af75078fSIntel 241af75078fSIntel /* 242af75078fSIntel * Configurable number of RX/TX ring descriptors. 2438599ed31SRemy Horton * Defaults are supplied by drivers via ethdev. 244af75078fSIntel */ 2458599ed31SRemy Horton #define RTE_TEST_RX_DESC_DEFAULT 0 2468599ed31SRemy Horton #define RTE_TEST_TX_DESC_DEFAULT 0 247af75078fSIntel uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */ 248af75078fSIntel uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */ 249af75078fSIntel 250f2c5125aSPablo de Lara #define RTE_PMD_PARAM_UNSET -1 251af75078fSIntel /* 252af75078fSIntel * Configurable values of RX and TX ring threshold registers. 253af75078fSIntel */ 254af75078fSIntel 255f2c5125aSPablo de Lara int8_t rx_pthresh = RTE_PMD_PARAM_UNSET; 256f2c5125aSPablo de Lara int8_t rx_hthresh = RTE_PMD_PARAM_UNSET; 257f2c5125aSPablo de Lara int8_t rx_wthresh = RTE_PMD_PARAM_UNSET; 258af75078fSIntel 259f2c5125aSPablo de Lara int8_t tx_pthresh = RTE_PMD_PARAM_UNSET; 260f2c5125aSPablo de Lara int8_t tx_hthresh = RTE_PMD_PARAM_UNSET; 261f2c5125aSPablo de Lara int8_t tx_wthresh = RTE_PMD_PARAM_UNSET; 262af75078fSIntel 263af75078fSIntel /* 264af75078fSIntel * Configurable value of RX free threshold. 265af75078fSIntel */ 266f2c5125aSPablo de Lara int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET; 267af75078fSIntel 268af75078fSIntel /* 269ce8d5614SIntel * Configurable value of RX drop enable. 270ce8d5614SIntel */ 271f2c5125aSPablo de Lara int8_t rx_drop_en = RTE_PMD_PARAM_UNSET; 272ce8d5614SIntel 273ce8d5614SIntel /* 274af75078fSIntel * Configurable value of TX free threshold. 275af75078fSIntel */ 276f2c5125aSPablo de Lara int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET; 277af75078fSIntel 278af75078fSIntel /* 279af75078fSIntel * Configurable value of TX RS bit threshold. 280af75078fSIntel */ 281f2c5125aSPablo de Lara int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET; 282af75078fSIntel 283af75078fSIntel /* 2843c156061SJens Freimann * Configurable value of buffered packets before sending. 2853c156061SJens Freimann */ 2863c156061SJens Freimann uint16_t noisy_tx_sw_bufsz; 2873c156061SJens Freimann 2883c156061SJens Freimann /* 2893c156061SJens Freimann * Configurable value of packet buffer timeout. 2903c156061SJens Freimann */ 2913c156061SJens Freimann uint16_t noisy_tx_sw_buf_flush_time; 2923c156061SJens Freimann 2933c156061SJens Freimann /* 2943c156061SJens Freimann * Configurable value for size of VNF internal memory area 2953c156061SJens Freimann * used for simulating noisy neighbour behaviour 2963c156061SJens Freimann */ 2973c156061SJens Freimann uint64_t noisy_lkup_mem_sz; 2983c156061SJens Freimann 2993c156061SJens Freimann /* 3003c156061SJens Freimann * Configurable value of number of random writes done in 3013c156061SJens Freimann * VNF simulation memory area. 3023c156061SJens Freimann */ 3033c156061SJens Freimann uint64_t noisy_lkup_num_writes; 3043c156061SJens Freimann 3053c156061SJens Freimann /* 3063c156061SJens Freimann * Configurable value of number of random reads done in 3073c156061SJens Freimann * VNF simulation memory area. 3083c156061SJens Freimann */ 3093c156061SJens Freimann uint64_t noisy_lkup_num_reads; 3103c156061SJens Freimann 3113c156061SJens Freimann /* 3123c156061SJens Freimann * Configurable value of number of random reads/writes done in 3133c156061SJens Freimann * VNF simulation memory area. 3143c156061SJens Freimann */ 3153c156061SJens Freimann uint64_t noisy_lkup_num_reads_writes; 3163c156061SJens Freimann 3173c156061SJens Freimann /* 318af75078fSIntel * Receive Side Scaling (RSS) configuration. 319af75078fSIntel */ 3208a387fa8SHelin Zhang uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */ 321af75078fSIntel 322af75078fSIntel /* 323af75078fSIntel * Port topology configuration 324af75078fSIntel */ 325af75078fSIntel uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */ 326af75078fSIntel 3277741e4cfSIntel /* 3287741e4cfSIntel * Avoids to flush all the RX streams before starts forwarding. 3297741e4cfSIntel */ 3307741e4cfSIntel uint8_t no_flush_rx = 0; /* flush by default */ 3317741e4cfSIntel 332af75078fSIntel /* 3337ee3e944SVasily Philipov * Flow API isolated mode. 3347ee3e944SVasily Philipov */ 3357ee3e944SVasily Philipov uint8_t flow_isolate_all; 3367ee3e944SVasily Philipov 3377ee3e944SVasily Philipov /* 338bc202406SDavid Marchand * Avoids to check link status when starting/stopping a port. 339bc202406SDavid Marchand */ 340bc202406SDavid Marchand uint8_t no_link_check = 0; /* check by default */ 341bc202406SDavid Marchand 342bc202406SDavid Marchand /* 3436937d210SStephen Hemminger * Don't automatically start all ports in interactive mode. 3446937d210SStephen Hemminger */ 3456937d210SStephen Hemminger uint8_t no_device_start = 0; 3466937d210SStephen Hemminger 3476937d210SStephen Hemminger /* 3488ea656f8SGaetan Rivet * Enable link status change notification 3498ea656f8SGaetan Rivet */ 3508ea656f8SGaetan Rivet uint8_t lsc_interrupt = 1; /* enabled by default */ 3518ea656f8SGaetan Rivet 3528ea656f8SGaetan Rivet /* 353284c908cSGaetan Rivet * Enable device removal notification. 354284c908cSGaetan Rivet */ 355284c908cSGaetan Rivet uint8_t rmv_interrupt = 1; /* enabled by default */ 356284c908cSGaetan Rivet 357fb73e096SJeff Guo uint8_t hot_plug = 0; /**< hotplug disabled by default. */ 358fb73e096SJeff Guo 3594f1ed78eSThomas Monjalon /* After attach, port setup is called on event or by iterator */ 3604f1ed78eSThomas Monjalon bool setup_on_probe_event = true; 3614f1ed78eSThomas Monjalon 36297b5d8b5SThomas Monjalon /* Pretty printing of ethdev events */ 36397b5d8b5SThomas Monjalon static const char * const eth_event_desc[] = { 36497b5d8b5SThomas Monjalon [RTE_ETH_EVENT_UNKNOWN] = "unknown", 36597b5d8b5SThomas Monjalon [RTE_ETH_EVENT_INTR_LSC] = "link state change", 36697b5d8b5SThomas Monjalon [RTE_ETH_EVENT_QUEUE_STATE] = "queue state", 36797b5d8b5SThomas Monjalon [RTE_ETH_EVENT_INTR_RESET] = "reset", 36897b5d8b5SThomas Monjalon [RTE_ETH_EVENT_VF_MBOX] = "VF mbox", 36997b5d8b5SThomas Monjalon [RTE_ETH_EVENT_IPSEC] = "IPsec", 37097b5d8b5SThomas Monjalon [RTE_ETH_EVENT_MACSEC] = "MACsec", 37197b5d8b5SThomas Monjalon [RTE_ETH_EVENT_INTR_RMV] = "device removal", 37297b5d8b5SThomas Monjalon [RTE_ETH_EVENT_NEW] = "device probed", 37397b5d8b5SThomas Monjalon [RTE_ETH_EVENT_DESTROY] = "device released", 37497b5d8b5SThomas Monjalon [RTE_ETH_EVENT_MAX] = NULL, 37597b5d8b5SThomas Monjalon }; 37697b5d8b5SThomas Monjalon 377284c908cSGaetan Rivet /* 3783af72783SGaetan Rivet * Display or mask ether events 3793af72783SGaetan Rivet * Default to all events except VF_MBOX 3803af72783SGaetan Rivet */ 3813af72783SGaetan Rivet uint32_t event_print_mask = (UINT32_C(1) << RTE_ETH_EVENT_UNKNOWN) | 3823af72783SGaetan Rivet (UINT32_C(1) << RTE_ETH_EVENT_INTR_LSC) | 3833af72783SGaetan Rivet (UINT32_C(1) << RTE_ETH_EVENT_QUEUE_STATE) | 3843af72783SGaetan Rivet (UINT32_C(1) << RTE_ETH_EVENT_INTR_RESET) | 385badb87c1SAnoob Joseph (UINT32_C(1) << RTE_ETH_EVENT_IPSEC) | 3863af72783SGaetan Rivet (UINT32_C(1) << RTE_ETH_EVENT_MACSEC) | 3873af72783SGaetan Rivet (UINT32_C(1) << RTE_ETH_EVENT_INTR_RMV); 388e505d84cSAnatoly Burakov /* 389e505d84cSAnatoly Burakov * Decide if all memory are locked for performance. 390e505d84cSAnatoly Burakov */ 391e505d84cSAnatoly Burakov int do_mlockall = 0; 3923af72783SGaetan Rivet 3933af72783SGaetan Rivet /* 3947b7e5ba7SIntel * NIC bypass mode configuration options. 3957b7e5ba7SIntel */ 3967b7e5ba7SIntel 39750c4440eSThomas Monjalon #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS 3987b7e5ba7SIntel /* The NIC bypass watchdog timeout. */ 399e261265eSRadu Nicolau uint32_t bypass_timeout = RTE_PMD_IXGBE_BYPASS_TMT_OFF; 4007b7e5ba7SIntel #endif 4017b7e5ba7SIntel 402e261265eSRadu Nicolau 40362d3216dSReshma Pattan #ifdef RTE_LIBRTE_LATENCY_STATS 40462d3216dSReshma Pattan 40562d3216dSReshma Pattan /* 40662d3216dSReshma Pattan * Set when latency stats is enabled in the commandline 40762d3216dSReshma Pattan */ 40862d3216dSReshma Pattan uint8_t latencystats_enabled; 40962d3216dSReshma Pattan 41062d3216dSReshma Pattan /* 41162d3216dSReshma Pattan * Lcore ID to serive latency statistics. 41262d3216dSReshma Pattan */ 41362d3216dSReshma Pattan lcoreid_t latencystats_lcore_id = -1; 41462d3216dSReshma Pattan 41562d3216dSReshma Pattan #endif 41662d3216dSReshma Pattan 4177b7e5ba7SIntel /* 418af75078fSIntel * Ethernet device configuration. 419af75078fSIntel */ 420af75078fSIntel struct rte_eth_rxmode rx_mode = { 42135b2d13fSOlivier Matz .max_rx_pkt_len = RTE_ETHER_MAX_LEN, 42235b2d13fSOlivier Matz /**< Default maximum frame length. */ 423af75078fSIntel }; 424af75078fSIntel 42507e5f7bdSShahaf Shuler struct rte_eth_txmode tx_mode = { 42607e5f7bdSShahaf Shuler .offloads = DEV_TX_OFFLOAD_MBUF_FAST_FREE, 42707e5f7bdSShahaf Shuler }; 428fd8c20aaSShahaf Shuler 429af75078fSIntel struct rte_fdir_conf fdir_conf = { 430af75078fSIntel .mode = RTE_FDIR_MODE_NONE, 431af75078fSIntel .pballoc = RTE_FDIR_PBALLOC_64K, 432af75078fSIntel .status = RTE_FDIR_REPORT_STATUS, 433d9d5e6f2SJingjing Wu .mask = { 43426f579aaSWei Zhao .vlan_tci_mask = 0xFFEF, 435d9d5e6f2SJingjing Wu .ipv4_mask = { 436d9d5e6f2SJingjing Wu .src_ip = 0xFFFFFFFF, 437d9d5e6f2SJingjing Wu .dst_ip = 0xFFFFFFFF, 438d9d5e6f2SJingjing Wu }, 439d9d5e6f2SJingjing Wu .ipv6_mask = { 440d9d5e6f2SJingjing Wu .src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF}, 441d9d5e6f2SJingjing Wu .dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF}, 442d9d5e6f2SJingjing Wu }, 443d9d5e6f2SJingjing Wu .src_port_mask = 0xFFFF, 444d9d5e6f2SJingjing Wu .dst_port_mask = 0xFFFF, 44547b3ac6bSWenzhuo Lu .mac_addr_byte_mask = 0xFF, 44647b3ac6bSWenzhuo Lu .tunnel_type_mask = 1, 44747b3ac6bSWenzhuo Lu .tunnel_id_mask = 0xFFFFFFFF, 448d9d5e6f2SJingjing Wu }, 449af75078fSIntel .drop_queue = 127, 450af75078fSIntel }; 451af75078fSIntel 4522950a769SDeclan Doherty volatile int test_done = 1; /* stop packet forwarding when set to 1. */ 453af75078fSIntel 454ed30d9b6SIntel struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS]; 455ed30d9b6SIntel struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS]; 456ed30d9b6SIntel 457ed30d9b6SIntel struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array; 458ed30d9b6SIntel struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array; 459ed30d9b6SIntel 460ed30d9b6SIntel uint16_t nb_tx_queue_stats_mappings = 0; 461ed30d9b6SIntel uint16_t nb_rx_queue_stats_mappings = 0; 462ed30d9b6SIntel 463a4fd5eeeSElza Mathew /* 464a4fd5eeeSElza Mathew * Display zero values by default for xstats 465a4fd5eeeSElza Mathew */ 466a4fd5eeeSElza Mathew uint8_t xstats_hide_zero; 467a4fd5eeeSElza Mathew 468c9cafcc8SShahaf Shuler unsigned int num_sockets = 0; 469c9cafcc8SShahaf Shuler unsigned int socket_ids[RTE_MAX_NUMA_NODES]; 4707acf894dSStephen Hurd 471e25e6c70SRemy Horton #ifdef RTE_LIBRTE_BITRATE 4727e4441c8SRemy Horton /* Bitrate statistics */ 4737e4441c8SRemy Horton struct rte_stats_bitrates *bitrate_data; 474e25e6c70SRemy Horton lcoreid_t bitrate_lcore_id; 475e25e6c70SRemy Horton uint8_t bitrate_enabled; 476e25e6c70SRemy Horton #endif 4777e4441c8SRemy Horton 478b40f8d78SJiayu Hu struct gro_status gro_ports[RTE_MAX_ETHPORTS]; 479b7091f1dSJiayu Hu uint8_t gro_flush_cycles = GRO_DEFAULT_FLUSH_CYCLES; 480b40f8d78SJiayu Hu 4811960be7dSNelio Laranjeiro struct vxlan_encap_conf vxlan_encap_conf = { 4821960be7dSNelio Laranjeiro .select_ipv4 = 1, 4831960be7dSNelio Laranjeiro .select_vlan = 0, 48462e8a5a8SViacheslav Ovsiienko .select_tos_ttl = 0, 4851960be7dSNelio Laranjeiro .vni = "\x00\x00\x00", 4861960be7dSNelio Laranjeiro .udp_src = 0, 4871960be7dSNelio Laranjeiro .udp_dst = RTE_BE16(4789), 4880c9da755SDavid Marchand .ipv4_src = RTE_IPV4(127, 0, 0, 1), 4890c9da755SDavid Marchand .ipv4_dst = RTE_IPV4(255, 255, 255, 255), 4901960be7dSNelio Laranjeiro .ipv6_src = "\x00\x00\x00\x00\x00\x00\x00\x00" 4911960be7dSNelio Laranjeiro "\x00\x00\x00\x00\x00\x00\x00\x01", 4921960be7dSNelio Laranjeiro .ipv6_dst = "\x00\x00\x00\x00\x00\x00\x00\x00" 4931960be7dSNelio Laranjeiro "\x00\x00\x00\x00\x00\x00\x11\x11", 4941960be7dSNelio Laranjeiro .vlan_tci = 0, 49562e8a5a8SViacheslav Ovsiienko .ip_tos = 0, 49662e8a5a8SViacheslav Ovsiienko .ip_ttl = 255, 4971960be7dSNelio Laranjeiro .eth_src = "\x00\x00\x00\x00\x00\x00", 4981960be7dSNelio Laranjeiro .eth_dst = "\xff\xff\xff\xff\xff\xff", 4991960be7dSNelio Laranjeiro }; 5001960be7dSNelio Laranjeiro 501dcd962fcSNelio Laranjeiro struct nvgre_encap_conf nvgre_encap_conf = { 502dcd962fcSNelio Laranjeiro .select_ipv4 = 1, 503dcd962fcSNelio Laranjeiro .select_vlan = 0, 504dcd962fcSNelio Laranjeiro .tni = "\x00\x00\x00", 5050c9da755SDavid Marchand .ipv4_src = RTE_IPV4(127, 0, 0, 1), 5060c9da755SDavid Marchand .ipv4_dst = RTE_IPV4(255, 255, 255, 255), 507dcd962fcSNelio Laranjeiro .ipv6_src = "\x00\x00\x00\x00\x00\x00\x00\x00" 508dcd962fcSNelio Laranjeiro "\x00\x00\x00\x00\x00\x00\x00\x01", 509dcd962fcSNelio Laranjeiro .ipv6_dst = "\x00\x00\x00\x00\x00\x00\x00\x00" 510dcd962fcSNelio Laranjeiro "\x00\x00\x00\x00\x00\x00\x11\x11", 511dcd962fcSNelio Laranjeiro .vlan_tci = 0, 512dcd962fcSNelio Laranjeiro .eth_src = "\x00\x00\x00\x00\x00\x00", 513dcd962fcSNelio Laranjeiro .eth_dst = "\xff\xff\xff\xff\xff\xff", 514dcd962fcSNelio Laranjeiro }; 515dcd962fcSNelio Laranjeiro 516ed30d9b6SIntel /* Forward function declarations */ 517c9cce428SThomas Monjalon static void setup_attached_port(portid_t pi); 51828caa76aSZhiyong Yang static void map_port_queue_stats_mapping_registers(portid_t pi, 51928caa76aSZhiyong Yang struct rte_port *port); 520edab33b1STetsuya Mukawa static void check_all_ports_link_status(uint32_t port_mask); 521f8244c63SZhiyong Yang static int eth_event_callback(portid_t port_id, 52276ad4a2dSGaetan Rivet enum rte_eth_event_type type, 523d6af1a13SBernard Iremonger void *param, void *ret_param); 524cc1bf307SJeff Guo static void dev_event_callback(const char *device_name, 525fb73e096SJeff Guo enum rte_dev_event_type type, 526fb73e096SJeff Guo void *param); 527ce8d5614SIntel 528ce8d5614SIntel /* 529ce8d5614SIntel * Check if all the ports are started. 530ce8d5614SIntel * If yes, return positive value. If not, return zero. 531ce8d5614SIntel */ 532ce8d5614SIntel static int all_ports_started(void); 533ed30d9b6SIntel 53452f38a20SJiayu Hu struct gso_status gso_ports[RTE_MAX_ETHPORTS]; 53535b2d13fSOlivier Matz uint16_t gso_max_segment_size = RTE_ETHER_MAX_LEN - RTE_ETHER_CRC_LEN; 53652f38a20SJiayu Hu 537af75078fSIntel /* 53898a7ea33SJerin Jacob * Helper function to check if socket is already discovered. 539c9cafcc8SShahaf Shuler * If yes, return positive value. If not, return zero. 540c9cafcc8SShahaf Shuler */ 541c9cafcc8SShahaf Shuler int 542c9cafcc8SShahaf Shuler new_socket_id(unsigned int socket_id) 543c9cafcc8SShahaf Shuler { 544c9cafcc8SShahaf Shuler unsigned int i; 545c9cafcc8SShahaf Shuler 546c9cafcc8SShahaf Shuler for (i = 0; i < num_sockets; i++) { 547c9cafcc8SShahaf Shuler if (socket_ids[i] == socket_id) 548c9cafcc8SShahaf Shuler return 0; 549c9cafcc8SShahaf Shuler } 550c9cafcc8SShahaf Shuler return 1; 551c9cafcc8SShahaf Shuler } 552c9cafcc8SShahaf Shuler 553c9cafcc8SShahaf Shuler /* 554af75078fSIntel * Setup default configuration. 555af75078fSIntel */ 556af75078fSIntel static void 557af75078fSIntel set_default_fwd_lcores_config(void) 558af75078fSIntel { 559af75078fSIntel unsigned int i; 560af75078fSIntel unsigned int nb_lc; 5617acf894dSStephen Hurd unsigned int sock_num; 562af75078fSIntel 563af75078fSIntel nb_lc = 0; 564af75078fSIntel for (i = 0; i < RTE_MAX_LCORE; i++) { 565dbfb8ec7SPhil Yang if (!rte_lcore_is_enabled(i)) 566dbfb8ec7SPhil Yang continue; 567c9cafcc8SShahaf Shuler sock_num = rte_lcore_to_socket_id(i); 568c9cafcc8SShahaf Shuler if (new_socket_id(sock_num)) { 569c9cafcc8SShahaf Shuler if (num_sockets >= RTE_MAX_NUMA_NODES) { 570c9cafcc8SShahaf Shuler rte_exit(EXIT_FAILURE, 571c9cafcc8SShahaf Shuler "Total sockets greater than %u\n", 572c9cafcc8SShahaf Shuler RTE_MAX_NUMA_NODES); 573c9cafcc8SShahaf Shuler } 574c9cafcc8SShahaf Shuler socket_ids[num_sockets++] = sock_num; 5757acf894dSStephen Hurd } 576f54fe5eeSStephen Hurd if (i == rte_get_master_lcore()) 577f54fe5eeSStephen Hurd continue; 578f54fe5eeSStephen Hurd fwd_lcores_cpuids[nb_lc++] = i; 579af75078fSIntel } 580af75078fSIntel nb_lcores = (lcoreid_t) nb_lc; 581af75078fSIntel nb_cfg_lcores = nb_lcores; 582af75078fSIntel nb_fwd_lcores = 1; 583af75078fSIntel } 584af75078fSIntel 585af75078fSIntel static void 586af75078fSIntel set_def_peer_eth_addrs(void) 587af75078fSIntel { 588af75078fSIntel portid_t i; 589af75078fSIntel 590af75078fSIntel for (i = 0; i < RTE_MAX_ETHPORTS; i++) { 59135b2d13fSOlivier Matz peer_eth_addrs[i].addr_bytes[0] = RTE_ETHER_LOCAL_ADMIN_ADDR; 592af75078fSIntel peer_eth_addrs[i].addr_bytes[5] = i; 593af75078fSIntel } 594af75078fSIntel } 595af75078fSIntel 596af75078fSIntel static void 597af75078fSIntel set_default_fwd_ports_config(void) 598af75078fSIntel { 599af75078fSIntel portid_t pt_id; 60065a7360cSMatan Azrad int i = 0; 601af75078fSIntel 602effdb8bbSPhil Yang RTE_ETH_FOREACH_DEV(pt_id) { 60365a7360cSMatan Azrad fwd_ports_ids[i++] = pt_id; 604af75078fSIntel 605effdb8bbSPhil Yang /* Update sockets info according to the attached device */ 606effdb8bbSPhil Yang int socket_id = rte_eth_dev_socket_id(pt_id); 607effdb8bbSPhil Yang if (socket_id >= 0 && new_socket_id(socket_id)) { 608effdb8bbSPhil Yang if (num_sockets >= RTE_MAX_NUMA_NODES) { 609effdb8bbSPhil Yang rte_exit(EXIT_FAILURE, 610effdb8bbSPhil Yang "Total sockets greater than %u\n", 611effdb8bbSPhil Yang RTE_MAX_NUMA_NODES); 612effdb8bbSPhil Yang } 613effdb8bbSPhil Yang socket_ids[num_sockets++] = socket_id; 614effdb8bbSPhil Yang } 615effdb8bbSPhil Yang } 616effdb8bbSPhil Yang 617af75078fSIntel nb_cfg_ports = nb_ports; 618af75078fSIntel nb_fwd_ports = nb_ports; 619af75078fSIntel } 620af75078fSIntel 621af75078fSIntel void 622af75078fSIntel set_def_fwd_config(void) 623af75078fSIntel { 624af75078fSIntel set_default_fwd_lcores_config(); 625af75078fSIntel set_def_peer_eth_addrs(); 626af75078fSIntel set_default_fwd_ports_config(); 627af75078fSIntel } 628af75078fSIntel 629c7f5dba7SAnatoly Burakov /* extremely pessimistic estimation of memory required to create a mempool */ 630c7f5dba7SAnatoly Burakov static int 631c7f5dba7SAnatoly Burakov calc_mem_size(uint32_t nb_mbufs, uint32_t mbuf_sz, size_t pgsz, size_t *out) 632c7f5dba7SAnatoly Burakov { 633c7f5dba7SAnatoly Burakov unsigned int n_pages, mbuf_per_pg, leftover; 634c7f5dba7SAnatoly Burakov uint64_t total_mem, mbuf_mem, obj_sz; 635c7f5dba7SAnatoly Burakov 636c7f5dba7SAnatoly Burakov /* there is no good way to predict how much space the mempool will 637c7f5dba7SAnatoly Burakov * occupy because it will allocate chunks on the fly, and some of those 638c7f5dba7SAnatoly Burakov * will come from default DPDK memory while some will come from our 639c7f5dba7SAnatoly Burakov * external memory, so just assume 128MB will be enough for everyone. 640c7f5dba7SAnatoly Burakov */ 641c7f5dba7SAnatoly Burakov uint64_t hdr_mem = 128 << 20; 642c7f5dba7SAnatoly Burakov 643c7f5dba7SAnatoly Burakov /* account for possible non-contiguousness */ 644c7f5dba7SAnatoly Burakov obj_sz = rte_mempool_calc_obj_size(mbuf_sz, 0, NULL); 645c7f5dba7SAnatoly Burakov if (obj_sz > pgsz) { 646c7f5dba7SAnatoly Burakov TESTPMD_LOG(ERR, "Object size is bigger than page size\n"); 647c7f5dba7SAnatoly Burakov return -1; 648c7f5dba7SAnatoly Burakov } 649c7f5dba7SAnatoly Burakov 650c7f5dba7SAnatoly Burakov mbuf_per_pg = pgsz / obj_sz; 651c7f5dba7SAnatoly Burakov leftover = (nb_mbufs % mbuf_per_pg) > 0; 652c7f5dba7SAnatoly Burakov n_pages = (nb_mbufs / mbuf_per_pg) + leftover; 653c7f5dba7SAnatoly Burakov 654c7f5dba7SAnatoly Burakov mbuf_mem = n_pages * pgsz; 655c7f5dba7SAnatoly Burakov 656c7f5dba7SAnatoly Burakov total_mem = RTE_ALIGN(hdr_mem + mbuf_mem, pgsz); 657c7f5dba7SAnatoly Burakov 658c7f5dba7SAnatoly Burakov if (total_mem > SIZE_MAX) { 659c7f5dba7SAnatoly Burakov TESTPMD_LOG(ERR, "Memory size too big\n"); 660c7f5dba7SAnatoly Burakov return -1; 661c7f5dba7SAnatoly Burakov } 662c7f5dba7SAnatoly Burakov *out = (size_t)total_mem; 663c7f5dba7SAnatoly Burakov 664c7f5dba7SAnatoly Burakov return 0; 665c7f5dba7SAnatoly Burakov } 666c7f5dba7SAnatoly Burakov 667c7f5dba7SAnatoly Burakov static int 668c7f5dba7SAnatoly Burakov pagesz_flags(uint64_t page_sz) 669c7f5dba7SAnatoly Burakov { 670c7f5dba7SAnatoly Burakov /* as per mmap() manpage, all page sizes are log2 of page size 671c7f5dba7SAnatoly Burakov * shifted by MAP_HUGE_SHIFT 672c7f5dba7SAnatoly Burakov */ 6739d650537SAnatoly Burakov int log2 = rte_log2_u64(page_sz); 674c7f5dba7SAnatoly Burakov 675c7f5dba7SAnatoly Burakov return (log2 << HUGE_SHIFT); 676c7f5dba7SAnatoly Burakov } 677c7f5dba7SAnatoly Burakov 678c7f5dba7SAnatoly Burakov static void * 679c7f5dba7SAnatoly Burakov alloc_mem(size_t memsz, size_t pgsz, bool huge) 680c7f5dba7SAnatoly Burakov { 681c7f5dba7SAnatoly Burakov void *addr; 682c7f5dba7SAnatoly Burakov int flags; 683c7f5dba7SAnatoly Burakov 684c7f5dba7SAnatoly Burakov /* allocate anonymous hugepages */ 685c7f5dba7SAnatoly Burakov flags = MAP_ANONYMOUS | MAP_PRIVATE; 686c7f5dba7SAnatoly Burakov if (huge) 687c7f5dba7SAnatoly Burakov flags |= HUGE_FLAG | pagesz_flags(pgsz); 688c7f5dba7SAnatoly Burakov 689c7f5dba7SAnatoly Burakov addr = mmap(NULL, memsz, PROT_READ | PROT_WRITE, flags, -1, 0); 690c7f5dba7SAnatoly Burakov if (addr == MAP_FAILED) 691c7f5dba7SAnatoly Burakov return NULL; 692c7f5dba7SAnatoly Burakov 693c7f5dba7SAnatoly Burakov return addr; 694c7f5dba7SAnatoly Burakov } 695c7f5dba7SAnatoly Burakov 696c7f5dba7SAnatoly Burakov struct extmem_param { 697c7f5dba7SAnatoly Burakov void *addr; 698c7f5dba7SAnatoly Burakov size_t len; 699c7f5dba7SAnatoly Burakov size_t pgsz; 700c7f5dba7SAnatoly Burakov rte_iova_t *iova_table; 701c7f5dba7SAnatoly Burakov unsigned int iova_table_len; 702c7f5dba7SAnatoly Burakov }; 703c7f5dba7SAnatoly Burakov 704c7f5dba7SAnatoly Burakov static int 705c7f5dba7SAnatoly Burakov create_extmem(uint32_t nb_mbufs, uint32_t mbuf_sz, struct extmem_param *param, 706c7f5dba7SAnatoly Burakov bool huge) 707c7f5dba7SAnatoly Burakov { 708c7f5dba7SAnatoly Burakov uint64_t pgsizes[] = {RTE_PGSIZE_2M, RTE_PGSIZE_1G, /* x86_64, ARM */ 709c7f5dba7SAnatoly Burakov RTE_PGSIZE_16M, RTE_PGSIZE_16G}; /* POWER */ 710c7f5dba7SAnatoly Burakov unsigned int cur_page, n_pages, pgsz_idx; 711c7f5dba7SAnatoly Burakov size_t mem_sz, cur_pgsz; 712c7f5dba7SAnatoly Burakov rte_iova_t *iovas = NULL; 713c7f5dba7SAnatoly Burakov void *addr; 714c7f5dba7SAnatoly Burakov int ret; 715c7f5dba7SAnatoly Burakov 716c7f5dba7SAnatoly Burakov for (pgsz_idx = 0; pgsz_idx < RTE_DIM(pgsizes); pgsz_idx++) { 717c7f5dba7SAnatoly Burakov /* skip anything that is too big */ 718c7f5dba7SAnatoly Burakov if (pgsizes[pgsz_idx] > SIZE_MAX) 719c7f5dba7SAnatoly Burakov continue; 720c7f5dba7SAnatoly Burakov 721c7f5dba7SAnatoly Burakov cur_pgsz = pgsizes[pgsz_idx]; 722c7f5dba7SAnatoly Burakov 723c7f5dba7SAnatoly Burakov /* if we were told not to allocate hugepages, override */ 724c7f5dba7SAnatoly Burakov if (!huge) 725c7f5dba7SAnatoly Burakov cur_pgsz = sysconf(_SC_PAGESIZE); 726c7f5dba7SAnatoly Burakov 727c7f5dba7SAnatoly Burakov ret = calc_mem_size(nb_mbufs, mbuf_sz, cur_pgsz, &mem_sz); 728c7f5dba7SAnatoly Burakov if (ret < 0) { 729c7f5dba7SAnatoly Burakov TESTPMD_LOG(ERR, "Cannot calculate memory size\n"); 730c7f5dba7SAnatoly Burakov return -1; 731c7f5dba7SAnatoly Burakov } 732c7f5dba7SAnatoly Burakov 733c7f5dba7SAnatoly Burakov /* allocate our memory */ 734c7f5dba7SAnatoly Burakov addr = alloc_mem(mem_sz, cur_pgsz, huge); 735c7f5dba7SAnatoly Burakov 736c7f5dba7SAnatoly Burakov /* if we couldn't allocate memory with a specified page size, 737c7f5dba7SAnatoly Burakov * that doesn't mean we can't do it with other page sizes, so 738c7f5dba7SAnatoly Burakov * try another one. 739c7f5dba7SAnatoly Burakov */ 740c7f5dba7SAnatoly Burakov if (addr == NULL) 741c7f5dba7SAnatoly Burakov continue; 742c7f5dba7SAnatoly Burakov 743c7f5dba7SAnatoly Burakov /* store IOVA addresses for every page in this memory area */ 744c7f5dba7SAnatoly Burakov n_pages = mem_sz / cur_pgsz; 745c7f5dba7SAnatoly Burakov 746c7f5dba7SAnatoly Burakov iovas = malloc(sizeof(*iovas) * n_pages); 747c7f5dba7SAnatoly Burakov 748c7f5dba7SAnatoly Burakov if (iovas == NULL) { 749c7f5dba7SAnatoly Burakov TESTPMD_LOG(ERR, "Cannot allocate memory for iova addresses\n"); 750c7f5dba7SAnatoly Burakov goto fail; 751c7f5dba7SAnatoly Burakov } 752c7f5dba7SAnatoly Burakov /* lock memory if it's not huge pages */ 753c7f5dba7SAnatoly Burakov if (!huge) 754c7f5dba7SAnatoly Burakov mlock(addr, mem_sz); 755c7f5dba7SAnatoly Burakov 756c7f5dba7SAnatoly Burakov /* populate IOVA addresses */ 757c7f5dba7SAnatoly Burakov for (cur_page = 0; cur_page < n_pages; cur_page++) { 758c7f5dba7SAnatoly Burakov rte_iova_t iova; 759c7f5dba7SAnatoly Burakov size_t offset; 760c7f5dba7SAnatoly Burakov void *cur; 761c7f5dba7SAnatoly Burakov 762c7f5dba7SAnatoly Burakov offset = cur_pgsz * cur_page; 763c7f5dba7SAnatoly Burakov cur = RTE_PTR_ADD(addr, offset); 764c7f5dba7SAnatoly Burakov 765c7f5dba7SAnatoly Burakov /* touch the page before getting its IOVA */ 766c7f5dba7SAnatoly Burakov *(volatile char *)cur = 0; 767c7f5dba7SAnatoly Burakov 768c7f5dba7SAnatoly Burakov iova = rte_mem_virt2iova(cur); 769c7f5dba7SAnatoly Burakov 770c7f5dba7SAnatoly Burakov iovas[cur_page] = iova; 771c7f5dba7SAnatoly Burakov } 772c7f5dba7SAnatoly Burakov 773c7f5dba7SAnatoly Burakov break; 774c7f5dba7SAnatoly Burakov } 775c7f5dba7SAnatoly Burakov /* if we couldn't allocate anything */ 776c7f5dba7SAnatoly Burakov if (iovas == NULL) 777c7f5dba7SAnatoly Burakov return -1; 778c7f5dba7SAnatoly Burakov 779c7f5dba7SAnatoly Burakov param->addr = addr; 780c7f5dba7SAnatoly Burakov param->len = mem_sz; 781c7f5dba7SAnatoly Burakov param->pgsz = cur_pgsz; 782c7f5dba7SAnatoly Burakov param->iova_table = iovas; 783c7f5dba7SAnatoly Burakov param->iova_table_len = n_pages; 784c7f5dba7SAnatoly Burakov 785c7f5dba7SAnatoly Burakov return 0; 786c7f5dba7SAnatoly Burakov fail: 787c7f5dba7SAnatoly Burakov if (iovas) 788c7f5dba7SAnatoly Burakov free(iovas); 789c7f5dba7SAnatoly Burakov if (addr) 790c7f5dba7SAnatoly Burakov munmap(addr, mem_sz); 791c7f5dba7SAnatoly Burakov 792c7f5dba7SAnatoly Burakov return -1; 793c7f5dba7SAnatoly Burakov } 794c7f5dba7SAnatoly Burakov 795c7f5dba7SAnatoly Burakov static int 796c7f5dba7SAnatoly Burakov setup_extmem(uint32_t nb_mbufs, uint32_t mbuf_sz, bool huge) 797c7f5dba7SAnatoly Burakov { 798c7f5dba7SAnatoly Burakov struct extmem_param param; 799c7f5dba7SAnatoly Burakov int socket_id, ret; 800c7f5dba7SAnatoly Burakov 801c7f5dba7SAnatoly Burakov memset(¶m, 0, sizeof(param)); 802c7f5dba7SAnatoly Burakov 803c7f5dba7SAnatoly Burakov /* check if our heap exists */ 804c7f5dba7SAnatoly Burakov socket_id = rte_malloc_heap_get_socket(EXTMEM_HEAP_NAME); 805c7f5dba7SAnatoly Burakov if (socket_id < 0) { 806c7f5dba7SAnatoly Burakov /* create our heap */ 807c7f5dba7SAnatoly Burakov ret = rte_malloc_heap_create(EXTMEM_HEAP_NAME); 808c7f5dba7SAnatoly Burakov if (ret < 0) { 809c7f5dba7SAnatoly Burakov TESTPMD_LOG(ERR, "Cannot create heap\n"); 810c7f5dba7SAnatoly Burakov return -1; 811c7f5dba7SAnatoly Burakov } 812c7f5dba7SAnatoly Burakov } 813c7f5dba7SAnatoly Burakov 814c7f5dba7SAnatoly Burakov ret = create_extmem(nb_mbufs, mbuf_sz, ¶m, huge); 815c7f5dba7SAnatoly Burakov if (ret < 0) { 816c7f5dba7SAnatoly Burakov TESTPMD_LOG(ERR, "Cannot create memory area\n"); 817c7f5dba7SAnatoly Burakov return -1; 818c7f5dba7SAnatoly Burakov } 819c7f5dba7SAnatoly Burakov 820c7f5dba7SAnatoly Burakov /* we now have a valid memory area, so add it to heap */ 821c7f5dba7SAnatoly Burakov ret = rte_malloc_heap_memory_add(EXTMEM_HEAP_NAME, 822c7f5dba7SAnatoly Burakov param.addr, param.len, param.iova_table, 823c7f5dba7SAnatoly Burakov param.iova_table_len, param.pgsz); 824c7f5dba7SAnatoly Burakov 825c7f5dba7SAnatoly Burakov /* when using VFIO, memory is automatically mapped for DMA by EAL */ 826c7f5dba7SAnatoly Burakov 827c7f5dba7SAnatoly Burakov /* not needed any more */ 828c7f5dba7SAnatoly Burakov free(param.iova_table); 829c7f5dba7SAnatoly Burakov 830c7f5dba7SAnatoly Burakov if (ret < 0) { 831c7f5dba7SAnatoly Burakov TESTPMD_LOG(ERR, "Cannot add memory to heap\n"); 832c7f5dba7SAnatoly Burakov munmap(param.addr, param.len); 833c7f5dba7SAnatoly Burakov return -1; 834c7f5dba7SAnatoly Burakov } 835c7f5dba7SAnatoly Burakov 836c7f5dba7SAnatoly Burakov /* success */ 837c7f5dba7SAnatoly Burakov 838c7f5dba7SAnatoly Burakov TESTPMD_LOG(DEBUG, "Allocated %zuMB of external memory\n", 839c7f5dba7SAnatoly Burakov param.len >> 20); 840c7f5dba7SAnatoly Burakov 841c7f5dba7SAnatoly Burakov return 0; 842c7f5dba7SAnatoly Burakov } 8433a0968c8SShahaf Shuler static void 8443a0968c8SShahaf Shuler dma_unmap_cb(struct rte_mempool *mp __rte_unused, void *opaque __rte_unused, 8453a0968c8SShahaf Shuler struct rte_mempool_memhdr *memhdr, unsigned mem_idx __rte_unused) 8463a0968c8SShahaf Shuler { 8473a0968c8SShahaf Shuler uint16_t pid = 0; 8483a0968c8SShahaf Shuler int ret; 8493a0968c8SShahaf Shuler 8503a0968c8SShahaf Shuler RTE_ETH_FOREACH_DEV(pid) { 8513a0968c8SShahaf Shuler struct rte_eth_dev *dev = 8523a0968c8SShahaf Shuler &rte_eth_devices[pid]; 8533a0968c8SShahaf Shuler 8543a0968c8SShahaf Shuler ret = rte_dev_dma_unmap(dev->device, memhdr->addr, 0, 8553a0968c8SShahaf Shuler memhdr->len); 8563a0968c8SShahaf Shuler if (ret) { 8573a0968c8SShahaf Shuler TESTPMD_LOG(DEBUG, 8583a0968c8SShahaf Shuler "unable to DMA unmap addr 0x%p " 8593a0968c8SShahaf Shuler "for device %s\n", 8603a0968c8SShahaf Shuler memhdr->addr, dev->data->name); 8613a0968c8SShahaf Shuler } 8623a0968c8SShahaf Shuler } 8633a0968c8SShahaf Shuler ret = rte_extmem_unregister(memhdr->addr, memhdr->len); 8643a0968c8SShahaf Shuler if (ret) { 8653a0968c8SShahaf Shuler TESTPMD_LOG(DEBUG, 8663a0968c8SShahaf Shuler "unable to un-register addr 0x%p\n", memhdr->addr); 8673a0968c8SShahaf Shuler } 8683a0968c8SShahaf Shuler } 8693a0968c8SShahaf Shuler 8703a0968c8SShahaf Shuler static void 8713a0968c8SShahaf Shuler dma_map_cb(struct rte_mempool *mp __rte_unused, void *opaque __rte_unused, 8723a0968c8SShahaf Shuler struct rte_mempool_memhdr *memhdr, unsigned mem_idx __rte_unused) 8733a0968c8SShahaf Shuler { 8743a0968c8SShahaf Shuler uint16_t pid = 0; 8753a0968c8SShahaf Shuler size_t page_size = sysconf(_SC_PAGESIZE); 8763a0968c8SShahaf Shuler int ret; 8773a0968c8SShahaf Shuler 8783a0968c8SShahaf Shuler ret = rte_extmem_register(memhdr->addr, memhdr->len, NULL, 0, 8793a0968c8SShahaf Shuler page_size); 8803a0968c8SShahaf Shuler if (ret) { 8813a0968c8SShahaf Shuler TESTPMD_LOG(DEBUG, 8823a0968c8SShahaf Shuler "unable to register addr 0x%p\n", memhdr->addr); 8833a0968c8SShahaf Shuler return; 8843a0968c8SShahaf Shuler } 8853a0968c8SShahaf Shuler RTE_ETH_FOREACH_DEV(pid) { 8863a0968c8SShahaf Shuler struct rte_eth_dev *dev = 8873a0968c8SShahaf Shuler &rte_eth_devices[pid]; 8883a0968c8SShahaf Shuler 8893a0968c8SShahaf Shuler ret = rte_dev_dma_map(dev->device, memhdr->addr, 0, 8903a0968c8SShahaf Shuler memhdr->len); 8913a0968c8SShahaf Shuler if (ret) { 8923a0968c8SShahaf Shuler TESTPMD_LOG(DEBUG, 8933a0968c8SShahaf Shuler "unable to DMA map addr 0x%p " 8943a0968c8SShahaf Shuler "for device %s\n", 8953a0968c8SShahaf Shuler memhdr->addr, dev->data->name); 8963a0968c8SShahaf Shuler } 8973a0968c8SShahaf Shuler } 8983a0968c8SShahaf Shuler } 899c7f5dba7SAnatoly Burakov 900af75078fSIntel /* 901af75078fSIntel * Configuration initialisation done once at init time. 902af75078fSIntel */ 903401b744dSShahaf Shuler static struct rte_mempool * 904af75078fSIntel mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf, 905af75078fSIntel unsigned int socket_id) 906af75078fSIntel { 907af75078fSIntel char pool_name[RTE_MEMPOOL_NAMESIZE]; 908bece7b6cSChristian Ehrhardt struct rte_mempool *rte_mp = NULL; 909af75078fSIntel uint32_t mb_size; 910af75078fSIntel 911dfb03bbeSOlivier Matz mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size; 912af75078fSIntel mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name)); 913148f963fSBruce Richardson 914285fd101SOlivier Matz TESTPMD_LOG(INFO, 915d1eb542eSOlivier Matz "create a new mbuf pool <%s>: n=%u, size=%u, socket=%u\n", 916d1eb542eSOlivier Matz pool_name, nb_mbuf, mbuf_seg_size, socket_id); 917d1eb542eSOlivier Matz 918c7f5dba7SAnatoly Burakov switch (mp_alloc_type) { 919c7f5dba7SAnatoly Burakov case MP_ALLOC_NATIVE: 920c7f5dba7SAnatoly Burakov { 921c7f5dba7SAnatoly Burakov /* wrapper to rte_mempool_create() */ 922c7f5dba7SAnatoly Burakov TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n", 923c7f5dba7SAnatoly Burakov rte_mbuf_best_mempool_ops()); 924c7f5dba7SAnatoly Burakov rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf, 925c7f5dba7SAnatoly Burakov mb_mempool_cache, 0, mbuf_seg_size, socket_id); 926c7f5dba7SAnatoly Burakov break; 927c7f5dba7SAnatoly Burakov } 928c7f5dba7SAnatoly Burakov case MP_ALLOC_ANON: 929c7f5dba7SAnatoly Burakov { 930b19a0c75SOlivier Matz rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf, 931c7f5dba7SAnatoly Burakov mb_size, (unsigned int) mb_mempool_cache, 932148f963fSBruce Richardson sizeof(struct rte_pktmbuf_pool_private), 93359fcf854SShahaf Shuler socket_id, mempool_flags); 93424427bb9SOlivier Matz if (rte_mp == NULL) 93524427bb9SOlivier Matz goto err; 936b19a0c75SOlivier Matz 937b19a0c75SOlivier Matz if (rte_mempool_populate_anon(rte_mp) == 0) { 938b19a0c75SOlivier Matz rte_mempool_free(rte_mp); 939b19a0c75SOlivier Matz rte_mp = NULL; 94024427bb9SOlivier Matz goto err; 941b19a0c75SOlivier Matz } 942b19a0c75SOlivier Matz rte_pktmbuf_pool_init(rte_mp, NULL); 943b19a0c75SOlivier Matz rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL); 9443a0968c8SShahaf Shuler rte_mempool_mem_iter(rte_mp, dma_map_cb, NULL); 945c7f5dba7SAnatoly Burakov break; 946c7f5dba7SAnatoly Burakov } 947c7f5dba7SAnatoly Burakov case MP_ALLOC_XMEM: 948c7f5dba7SAnatoly Burakov case MP_ALLOC_XMEM_HUGE: 949c7f5dba7SAnatoly Burakov { 950c7f5dba7SAnatoly Burakov int heap_socket; 951c7f5dba7SAnatoly Burakov bool huge = mp_alloc_type == MP_ALLOC_XMEM_HUGE; 952c7f5dba7SAnatoly Burakov 953c7f5dba7SAnatoly Burakov if (setup_extmem(nb_mbuf, mbuf_seg_size, huge) < 0) 954c7f5dba7SAnatoly Burakov rte_exit(EXIT_FAILURE, "Could not create external memory\n"); 955c7f5dba7SAnatoly Burakov 956c7f5dba7SAnatoly Burakov heap_socket = 957c7f5dba7SAnatoly Burakov rte_malloc_heap_get_socket(EXTMEM_HEAP_NAME); 958c7f5dba7SAnatoly Burakov if (heap_socket < 0) 959c7f5dba7SAnatoly Burakov rte_exit(EXIT_FAILURE, "Could not get external memory socket ID\n"); 960c7f5dba7SAnatoly Burakov 9610e798567SPavan Nikhilesh TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n", 9620e798567SPavan Nikhilesh rte_mbuf_best_mempool_ops()); 963ea0c20eaSOlivier Matz rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf, 964c7f5dba7SAnatoly Burakov mb_mempool_cache, 0, mbuf_seg_size, 965c7f5dba7SAnatoly Burakov heap_socket); 966c7f5dba7SAnatoly Burakov break; 967c7f5dba7SAnatoly Burakov } 968c7f5dba7SAnatoly Burakov default: 969c7f5dba7SAnatoly Burakov { 970c7f5dba7SAnatoly Burakov rte_exit(EXIT_FAILURE, "Invalid mempool creation mode\n"); 971c7f5dba7SAnatoly Burakov } 972bece7b6cSChristian Ehrhardt } 973148f963fSBruce Richardson 97424427bb9SOlivier Matz err: 975af75078fSIntel if (rte_mp == NULL) { 976d1eb542eSOlivier Matz rte_exit(EXIT_FAILURE, 977d1eb542eSOlivier Matz "Creation of mbuf pool for socket %u failed: %s\n", 978d1eb542eSOlivier Matz socket_id, rte_strerror(rte_errno)); 979148f963fSBruce Richardson } else if (verbose_level > 0) { 980591a9d79SStephen Hemminger rte_mempool_dump(stdout, rte_mp); 981af75078fSIntel } 982401b744dSShahaf Shuler return rte_mp; 983af75078fSIntel } 984af75078fSIntel 98520a0286fSLiu Xiaofeng /* 98620a0286fSLiu Xiaofeng * Check given socket id is valid or not with NUMA mode, 98720a0286fSLiu Xiaofeng * if valid, return 0, else return -1 98820a0286fSLiu Xiaofeng */ 98920a0286fSLiu Xiaofeng static int 99020a0286fSLiu Xiaofeng check_socket_id(const unsigned int socket_id) 99120a0286fSLiu Xiaofeng { 99220a0286fSLiu Xiaofeng static int warning_once = 0; 99320a0286fSLiu Xiaofeng 994c9cafcc8SShahaf Shuler if (new_socket_id(socket_id)) { 99520a0286fSLiu Xiaofeng if (!warning_once && numa_support) 99620a0286fSLiu Xiaofeng printf("Warning: NUMA should be configured manually by" 99720a0286fSLiu Xiaofeng " using --port-numa-config and" 99820a0286fSLiu Xiaofeng " --ring-numa-config parameters along with" 99920a0286fSLiu Xiaofeng " --numa.\n"); 100020a0286fSLiu Xiaofeng warning_once = 1; 100120a0286fSLiu Xiaofeng return -1; 100220a0286fSLiu Xiaofeng } 100320a0286fSLiu Xiaofeng return 0; 100420a0286fSLiu Xiaofeng } 100520a0286fSLiu Xiaofeng 10063f7311baSWei Dai /* 10073f7311baSWei Dai * Get the allowed maximum number of RX queues. 10083f7311baSWei Dai * *pid return the port id which has minimal value of 10093f7311baSWei Dai * max_rx_queues in all ports. 10103f7311baSWei Dai */ 10113f7311baSWei Dai queueid_t 10123f7311baSWei Dai get_allowed_max_nb_rxq(portid_t *pid) 10133f7311baSWei Dai { 10143f7311baSWei Dai queueid_t allowed_max_rxq = MAX_QUEUE_ID; 10153f7311baSWei Dai portid_t pi; 10163f7311baSWei Dai struct rte_eth_dev_info dev_info; 10173f7311baSWei Dai 10183f7311baSWei Dai RTE_ETH_FOREACH_DEV(pi) { 10193f7311baSWei Dai rte_eth_dev_info_get(pi, &dev_info); 10203f7311baSWei Dai if (dev_info.max_rx_queues < allowed_max_rxq) { 10213f7311baSWei Dai allowed_max_rxq = dev_info.max_rx_queues; 10223f7311baSWei Dai *pid = pi; 10233f7311baSWei Dai } 10243f7311baSWei Dai } 10253f7311baSWei Dai return allowed_max_rxq; 10263f7311baSWei Dai } 10273f7311baSWei Dai 10283f7311baSWei Dai /* 10293f7311baSWei Dai * Check input rxq is valid or not. 10303f7311baSWei Dai * If input rxq is not greater than any of maximum number 10313f7311baSWei Dai * of RX queues of all ports, it is valid. 10323f7311baSWei Dai * if valid, return 0, else return -1 10333f7311baSWei Dai */ 10343f7311baSWei Dai int 10353f7311baSWei Dai check_nb_rxq(queueid_t rxq) 10363f7311baSWei Dai { 10373f7311baSWei Dai queueid_t allowed_max_rxq; 10383f7311baSWei Dai portid_t pid = 0; 10393f7311baSWei Dai 10403f7311baSWei Dai allowed_max_rxq = get_allowed_max_nb_rxq(&pid); 10413f7311baSWei Dai if (rxq > allowed_max_rxq) { 10423f7311baSWei Dai printf("Fail: input rxq (%u) can't be greater " 10433f7311baSWei Dai "than max_rx_queues (%u) of port %u\n", 10443f7311baSWei Dai rxq, 10453f7311baSWei Dai allowed_max_rxq, 10463f7311baSWei Dai pid); 10473f7311baSWei Dai return -1; 10483f7311baSWei Dai } 10493f7311baSWei Dai return 0; 10503f7311baSWei Dai } 10513f7311baSWei Dai 105236db4f6cSWei Dai /* 105336db4f6cSWei Dai * Get the allowed maximum number of TX queues. 105436db4f6cSWei Dai * *pid return the port id which has minimal value of 105536db4f6cSWei Dai * max_tx_queues in all ports. 105636db4f6cSWei Dai */ 105736db4f6cSWei Dai queueid_t 105836db4f6cSWei Dai get_allowed_max_nb_txq(portid_t *pid) 105936db4f6cSWei Dai { 106036db4f6cSWei Dai queueid_t allowed_max_txq = MAX_QUEUE_ID; 106136db4f6cSWei Dai portid_t pi; 106236db4f6cSWei Dai struct rte_eth_dev_info dev_info; 106336db4f6cSWei Dai 106436db4f6cSWei Dai RTE_ETH_FOREACH_DEV(pi) { 106536db4f6cSWei Dai rte_eth_dev_info_get(pi, &dev_info); 106636db4f6cSWei Dai if (dev_info.max_tx_queues < allowed_max_txq) { 106736db4f6cSWei Dai allowed_max_txq = dev_info.max_tx_queues; 106836db4f6cSWei Dai *pid = pi; 106936db4f6cSWei Dai } 107036db4f6cSWei Dai } 107136db4f6cSWei Dai return allowed_max_txq; 107236db4f6cSWei Dai } 107336db4f6cSWei Dai 107436db4f6cSWei Dai /* 107536db4f6cSWei Dai * Check input txq is valid or not. 107636db4f6cSWei Dai * If input txq is not greater than any of maximum number 107736db4f6cSWei Dai * of TX queues of all ports, it is valid. 107836db4f6cSWei Dai * if valid, return 0, else return -1 107936db4f6cSWei Dai */ 108036db4f6cSWei Dai int 108136db4f6cSWei Dai check_nb_txq(queueid_t txq) 108236db4f6cSWei Dai { 108336db4f6cSWei Dai queueid_t allowed_max_txq; 108436db4f6cSWei Dai portid_t pid = 0; 108536db4f6cSWei Dai 108636db4f6cSWei Dai allowed_max_txq = get_allowed_max_nb_txq(&pid); 108736db4f6cSWei Dai if (txq > allowed_max_txq) { 108836db4f6cSWei Dai printf("Fail: input txq (%u) can't be greater " 108936db4f6cSWei Dai "than max_tx_queues (%u) of port %u\n", 109036db4f6cSWei Dai txq, 109136db4f6cSWei Dai allowed_max_txq, 109236db4f6cSWei Dai pid); 109336db4f6cSWei Dai return -1; 109436db4f6cSWei Dai } 109536db4f6cSWei Dai return 0; 109636db4f6cSWei Dai } 109736db4f6cSWei Dai 1098af75078fSIntel static void 1099af75078fSIntel init_config(void) 1100af75078fSIntel { 1101ce8d5614SIntel portid_t pid; 1102af75078fSIntel struct rte_port *port; 1103af75078fSIntel struct rte_mempool *mbp; 1104af75078fSIntel unsigned int nb_mbuf_per_pool; 1105af75078fSIntel lcoreid_t lc_id; 11067acf894dSStephen Hurd uint8_t port_per_socket[RTE_MAX_NUMA_NODES]; 1107b7091f1dSJiayu Hu struct rte_gro_param gro_param; 110852f38a20SJiayu Hu uint32_t gso_types; 110933f9630fSSunil Kumar Kori uint16_t data_size; 111033f9630fSSunil Kumar Kori bool warning = 0; 1111c73a9071SWei Dai int k; 1112af75078fSIntel 11137acf894dSStephen Hurd memset(port_per_socket,0,RTE_MAX_NUMA_NODES); 1114487f9a59SYulong Pei 1115af75078fSIntel /* Configuration of logical cores. */ 1116af75078fSIntel fwd_lcores = rte_zmalloc("testpmd: fwd_lcores", 1117af75078fSIntel sizeof(struct fwd_lcore *) * nb_lcores, 1118fdf20fa7SSergio Gonzalez Monroy RTE_CACHE_LINE_SIZE); 1119af75078fSIntel if (fwd_lcores == NULL) { 1120ce8d5614SIntel rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) " 1121ce8d5614SIntel "failed\n", nb_lcores); 1122af75078fSIntel } 1123af75078fSIntel for (lc_id = 0; lc_id < nb_lcores; lc_id++) { 1124af75078fSIntel fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore", 1125af75078fSIntel sizeof(struct fwd_lcore), 1126fdf20fa7SSergio Gonzalez Monroy RTE_CACHE_LINE_SIZE); 1127af75078fSIntel if (fwd_lcores[lc_id] == NULL) { 1128ce8d5614SIntel rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) " 1129ce8d5614SIntel "failed\n"); 1130af75078fSIntel } 1131af75078fSIntel fwd_lcores[lc_id]->cpuid_idx = lc_id; 1132af75078fSIntel } 1133af75078fSIntel 11347d89b261SGaetan Rivet RTE_ETH_FOREACH_DEV(pid) { 1135ce8d5614SIntel port = &ports[pid]; 11368b9bd0efSMoti Haimovsky /* Apply default TxRx configuration for all ports */ 1137fd8c20aaSShahaf Shuler port->dev_conf.txmode = tx_mode; 1138384161e0SShahaf Shuler port->dev_conf.rxmode = rx_mode; 1139ce8d5614SIntel rte_eth_dev_info_get(pid, &port->dev_info); 11407c45f6c0SFerruh Yigit 114107e5f7bdSShahaf Shuler if (!(port->dev_info.tx_offload_capa & 114207e5f7bdSShahaf Shuler DEV_TX_OFFLOAD_MBUF_FAST_FREE)) 114307e5f7bdSShahaf Shuler port->dev_conf.txmode.offloads &= 114407e5f7bdSShahaf Shuler ~DEV_TX_OFFLOAD_MBUF_FAST_FREE; 1145c18feafaSDekel Peled if (!(port->dev_info.tx_offload_capa & 1146c18feafaSDekel Peled DEV_TX_OFFLOAD_MATCH_METADATA)) 1147c18feafaSDekel Peled port->dev_conf.txmode.offloads &= 1148c18feafaSDekel Peled ~DEV_TX_OFFLOAD_MATCH_METADATA; 1149b6ea6408SIntel if (numa_support) { 1150b6ea6408SIntel if (port_numa[pid] != NUMA_NO_CONFIG) 1151b6ea6408SIntel port_per_socket[port_numa[pid]]++; 1152b6ea6408SIntel else { 1153b6ea6408SIntel uint32_t socket_id = rte_eth_dev_socket_id(pid); 115420a0286fSLiu Xiaofeng 115529841336SPhil Yang /* 115629841336SPhil Yang * if socket_id is invalid, 115729841336SPhil Yang * set to the first available socket. 115829841336SPhil Yang */ 115920a0286fSLiu Xiaofeng if (check_socket_id(socket_id) < 0) 116029841336SPhil Yang socket_id = socket_ids[0]; 1161b6ea6408SIntel port_per_socket[socket_id]++; 1162b6ea6408SIntel } 1163b6ea6408SIntel } 1164b6ea6408SIntel 1165c73a9071SWei Dai /* Apply Rx offloads configuration */ 1166c73a9071SWei Dai for (k = 0; k < port->dev_info.max_rx_queues; k++) 1167c73a9071SWei Dai port->rx_conf[k].offloads = 1168c73a9071SWei Dai port->dev_conf.rxmode.offloads; 1169c73a9071SWei Dai /* Apply Tx offloads configuration */ 1170c73a9071SWei Dai for (k = 0; k < port->dev_info.max_tx_queues; k++) 1171c73a9071SWei Dai port->tx_conf[k].offloads = 1172c73a9071SWei Dai port->dev_conf.txmode.offloads; 1173c73a9071SWei Dai 1174ce8d5614SIntel /* set flag to initialize port/queue */ 1175ce8d5614SIntel port->need_reconfig = 1; 1176ce8d5614SIntel port->need_reconfig_queues = 1; 1177c18feafaSDekel Peled port->tx_metadata = 0; 117833f9630fSSunil Kumar Kori 117933f9630fSSunil Kumar Kori /* Check for maximum number of segments per MTU. Accordingly 118033f9630fSSunil Kumar Kori * update the mbuf data size. 118133f9630fSSunil Kumar Kori */ 1182163fbaafSFerruh Yigit if (port->dev_info.rx_desc_lim.nb_mtu_seg_max != UINT16_MAX && 1183163fbaafSFerruh Yigit port->dev_info.rx_desc_lim.nb_mtu_seg_max != 0) { 118433f9630fSSunil Kumar Kori data_size = rx_mode.max_rx_pkt_len / 118533f9630fSSunil Kumar Kori port->dev_info.rx_desc_lim.nb_mtu_seg_max; 118633f9630fSSunil Kumar Kori 118733f9630fSSunil Kumar Kori if ((data_size + RTE_PKTMBUF_HEADROOM) > 118833f9630fSSunil Kumar Kori mbuf_data_size) { 118933f9630fSSunil Kumar Kori mbuf_data_size = data_size + 119033f9630fSSunil Kumar Kori RTE_PKTMBUF_HEADROOM; 119133f9630fSSunil Kumar Kori warning = 1; 1192ce8d5614SIntel } 119333f9630fSSunil Kumar Kori } 119433f9630fSSunil Kumar Kori } 119533f9630fSSunil Kumar Kori 119633f9630fSSunil Kumar Kori if (warning) 119733f9630fSSunil Kumar Kori TESTPMD_LOG(WARNING, "Configured mbuf size %hu\n", 119833f9630fSSunil Kumar Kori mbuf_data_size); 1199ce8d5614SIntel 12003ab64341SOlivier Matz /* 12013ab64341SOlivier Matz * Create pools of mbuf. 12023ab64341SOlivier Matz * If NUMA support is disabled, create a single pool of mbuf in 12033ab64341SOlivier Matz * socket 0 memory by default. 12043ab64341SOlivier Matz * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1. 12053ab64341SOlivier Matz * 12063ab64341SOlivier Matz * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and 12073ab64341SOlivier Matz * nb_txd can be configured at run time. 12083ab64341SOlivier Matz */ 12093ab64341SOlivier Matz if (param_total_num_mbufs) 12103ab64341SOlivier Matz nb_mbuf_per_pool = param_total_num_mbufs; 12113ab64341SOlivier Matz else { 12123ab64341SOlivier Matz nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX + 12133ab64341SOlivier Matz (nb_lcores * mb_mempool_cache) + 12143ab64341SOlivier Matz RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST; 12153ab64341SOlivier Matz nb_mbuf_per_pool *= RTE_MAX_ETHPORTS; 12163ab64341SOlivier Matz } 12173ab64341SOlivier Matz 1218b6ea6408SIntel if (numa_support) { 1219b6ea6408SIntel uint8_t i; 1220ce8d5614SIntel 1221c9cafcc8SShahaf Shuler for (i = 0; i < num_sockets; i++) 1222401b744dSShahaf Shuler mempools[i] = mbuf_pool_create(mbuf_data_size, 1223401b744dSShahaf Shuler nb_mbuf_per_pool, 1224c9cafcc8SShahaf Shuler socket_ids[i]); 12253ab64341SOlivier Matz } else { 12263ab64341SOlivier Matz if (socket_num == UMA_NO_CONFIG) 1227401b744dSShahaf Shuler mempools[0] = mbuf_pool_create(mbuf_data_size, 1228401b744dSShahaf Shuler nb_mbuf_per_pool, 0); 12293ab64341SOlivier Matz else 1230401b744dSShahaf Shuler mempools[socket_num] = mbuf_pool_create 1231401b744dSShahaf Shuler (mbuf_data_size, 1232401b744dSShahaf Shuler nb_mbuf_per_pool, 12333ab64341SOlivier Matz socket_num); 12343ab64341SOlivier Matz } 1235b6ea6408SIntel 1236b6ea6408SIntel init_port_config(); 12375886ae07SAdrien Mazarguil 123852f38a20SJiayu Hu gso_types = DEV_TX_OFFLOAD_TCP_TSO | DEV_TX_OFFLOAD_VXLAN_TNL_TSO | 1239aaacd052SJiayu Hu DEV_TX_OFFLOAD_GRE_TNL_TSO | DEV_TX_OFFLOAD_UDP_TSO; 12405886ae07SAdrien Mazarguil /* 12415886ae07SAdrien Mazarguil * Records which Mbuf pool to use by each logical core, if needed. 12425886ae07SAdrien Mazarguil */ 12435886ae07SAdrien Mazarguil for (lc_id = 0; lc_id < nb_lcores; lc_id++) { 12448fd8bebcSAdrien Mazarguil mbp = mbuf_pool_find( 12458fd8bebcSAdrien Mazarguil rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id])); 12468fd8bebcSAdrien Mazarguil 12475886ae07SAdrien Mazarguil if (mbp == NULL) 12485886ae07SAdrien Mazarguil mbp = mbuf_pool_find(0); 12495886ae07SAdrien Mazarguil fwd_lcores[lc_id]->mbp = mbp; 125052f38a20SJiayu Hu /* initialize GSO context */ 125152f38a20SJiayu Hu fwd_lcores[lc_id]->gso_ctx.direct_pool = mbp; 125252f38a20SJiayu Hu fwd_lcores[lc_id]->gso_ctx.indirect_pool = mbp; 125352f38a20SJiayu Hu fwd_lcores[lc_id]->gso_ctx.gso_types = gso_types; 125435b2d13fSOlivier Matz fwd_lcores[lc_id]->gso_ctx.gso_size = RTE_ETHER_MAX_LEN - 125535b2d13fSOlivier Matz RTE_ETHER_CRC_LEN; 125652f38a20SJiayu Hu fwd_lcores[lc_id]->gso_ctx.flag = 0; 12575886ae07SAdrien Mazarguil } 12585886ae07SAdrien Mazarguil 1259ce8d5614SIntel /* Configuration of packet forwarding streams. */ 1260ce8d5614SIntel if (init_fwd_streams() < 0) 1261ce8d5614SIntel rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n"); 12620c0db76fSBernard Iremonger 12630c0db76fSBernard Iremonger fwd_config_setup(); 1264b7091f1dSJiayu Hu 1265b7091f1dSJiayu Hu /* create a gro context for each lcore */ 1266b7091f1dSJiayu Hu gro_param.gro_types = RTE_GRO_TCP_IPV4; 1267b7091f1dSJiayu Hu gro_param.max_flow_num = GRO_MAX_FLUSH_CYCLES; 1268b7091f1dSJiayu Hu gro_param.max_item_per_flow = MAX_PKT_BURST; 1269b7091f1dSJiayu Hu for (lc_id = 0; lc_id < nb_lcores; lc_id++) { 1270b7091f1dSJiayu Hu gro_param.socket_id = rte_lcore_to_socket_id( 1271b7091f1dSJiayu Hu fwd_lcores_cpuids[lc_id]); 1272b7091f1dSJiayu Hu fwd_lcores[lc_id]->gro_ctx = rte_gro_ctx_create(&gro_param); 1273b7091f1dSJiayu Hu if (fwd_lcores[lc_id]->gro_ctx == NULL) { 1274b7091f1dSJiayu Hu rte_exit(EXIT_FAILURE, 1275b7091f1dSJiayu Hu "rte_gro_ctx_create() failed\n"); 1276b7091f1dSJiayu Hu } 1277b7091f1dSJiayu Hu } 12780ad778b3SJasvinder Singh 12790ad778b3SJasvinder Singh #if defined RTE_LIBRTE_PMD_SOFTNIC 12800ad778b3SJasvinder Singh if (strcmp(cur_fwd_eng->fwd_mode_name, "softnic") == 0) { 12810ad778b3SJasvinder Singh RTE_ETH_FOREACH_DEV(pid) { 12820ad778b3SJasvinder Singh port = &ports[pid]; 12830ad778b3SJasvinder Singh const char *driver = port->dev_info.driver_name; 12840ad778b3SJasvinder Singh 12850ad778b3SJasvinder Singh if (strcmp(driver, "net_softnic") == 0) 12860ad778b3SJasvinder Singh port->softport.fwd_lcore_arg = fwd_lcores; 12870ad778b3SJasvinder Singh } 12880ad778b3SJasvinder Singh } 12890ad778b3SJasvinder Singh #endif 12900ad778b3SJasvinder Singh 1291ce8d5614SIntel } 1292ce8d5614SIntel 12932950a769SDeclan Doherty 12942950a769SDeclan Doherty void 1295a21d5a4bSDeclan Doherty reconfig(portid_t new_port_id, unsigned socket_id) 12962950a769SDeclan Doherty { 12972950a769SDeclan Doherty struct rte_port *port; 12982950a769SDeclan Doherty 12992950a769SDeclan Doherty /* Reconfiguration of Ethernet ports. */ 13002950a769SDeclan Doherty port = &ports[new_port_id]; 13012950a769SDeclan Doherty rte_eth_dev_info_get(new_port_id, &port->dev_info); 13022950a769SDeclan Doherty 13032950a769SDeclan Doherty /* set flag to initialize port/queue */ 13042950a769SDeclan Doherty port->need_reconfig = 1; 13052950a769SDeclan Doherty port->need_reconfig_queues = 1; 1306a21d5a4bSDeclan Doherty port->socket_id = socket_id; 13072950a769SDeclan Doherty 13082950a769SDeclan Doherty init_port_config(); 13092950a769SDeclan Doherty } 13102950a769SDeclan Doherty 13112950a769SDeclan Doherty 1312ce8d5614SIntel int 1313ce8d5614SIntel init_fwd_streams(void) 1314ce8d5614SIntel { 1315ce8d5614SIntel portid_t pid; 1316ce8d5614SIntel struct rte_port *port; 1317ce8d5614SIntel streamid_t sm_id, nb_fwd_streams_new; 13185a8fb55cSReshma Pattan queueid_t q; 1319ce8d5614SIntel 1320ce8d5614SIntel /* set socket id according to numa or not */ 13217d89b261SGaetan Rivet RTE_ETH_FOREACH_DEV(pid) { 1322ce8d5614SIntel port = &ports[pid]; 1323ce8d5614SIntel if (nb_rxq > port->dev_info.max_rx_queues) { 1324ce8d5614SIntel printf("Fail: nb_rxq(%d) is greater than " 1325ce8d5614SIntel "max_rx_queues(%d)\n", nb_rxq, 1326ce8d5614SIntel port->dev_info.max_rx_queues); 1327ce8d5614SIntel return -1; 1328ce8d5614SIntel } 1329ce8d5614SIntel if (nb_txq > port->dev_info.max_tx_queues) { 1330ce8d5614SIntel printf("Fail: nb_txq(%d) is greater than " 1331ce8d5614SIntel "max_tx_queues(%d)\n", nb_txq, 1332ce8d5614SIntel port->dev_info.max_tx_queues); 1333ce8d5614SIntel return -1; 1334ce8d5614SIntel } 133520a0286fSLiu Xiaofeng if (numa_support) { 133620a0286fSLiu Xiaofeng if (port_numa[pid] != NUMA_NO_CONFIG) 133720a0286fSLiu Xiaofeng port->socket_id = port_numa[pid]; 133820a0286fSLiu Xiaofeng else { 1339b6ea6408SIntel port->socket_id = rte_eth_dev_socket_id(pid); 134020a0286fSLiu Xiaofeng 134129841336SPhil Yang /* 134229841336SPhil Yang * if socket_id is invalid, 134329841336SPhil Yang * set to the first available socket. 134429841336SPhil Yang */ 134520a0286fSLiu Xiaofeng if (check_socket_id(port->socket_id) < 0) 134629841336SPhil Yang port->socket_id = socket_ids[0]; 134720a0286fSLiu Xiaofeng } 134820a0286fSLiu Xiaofeng } 1349b6ea6408SIntel else { 1350b6ea6408SIntel if (socket_num == UMA_NO_CONFIG) 1351af75078fSIntel port->socket_id = 0; 1352b6ea6408SIntel else 1353b6ea6408SIntel port->socket_id = socket_num; 1354b6ea6408SIntel } 1355af75078fSIntel } 1356af75078fSIntel 13575a8fb55cSReshma Pattan q = RTE_MAX(nb_rxq, nb_txq); 13585a8fb55cSReshma Pattan if (q == 0) { 13595a8fb55cSReshma Pattan printf("Fail: Cannot allocate fwd streams as number of queues is 0\n"); 13605a8fb55cSReshma Pattan return -1; 13615a8fb55cSReshma Pattan } 13625a8fb55cSReshma Pattan nb_fwd_streams_new = (streamid_t)(nb_ports * q); 1363ce8d5614SIntel if (nb_fwd_streams_new == nb_fwd_streams) 1364ce8d5614SIntel return 0; 1365ce8d5614SIntel /* clear the old */ 1366ce8d5614SIntel if (fwd_streams != NULL) { 1367ce8d5614SIntel for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) { 1368ce8d5614SIntel if (fwd_streams[sm_id] == NULL) 1369ce8d5614SIntel continue; 1370ce8d5614SIntel rte_free(fwd_streams[sm_id]); 1371ce8d5614SIntel fwd_streams[sm_id] = NULL; 1372af75078fSIntel } 1373ce8d5614SIntel rte_free(fwd_streams); 1374ce8d5614SIntel fwd_streams = NULL; 1375ce8d5614SIntel } 1376ce8d5614SIntel 1377ce8d5614SIntel /* init new */ 1378ce8d5614SIntel nb_fwd_streams = nb_fwd_streams_new; 13791f84c469SMatan Azrad if (nb_fwd_streams) { 1380ce8d5614SIntel fwd_streams = rte_zmalloc("testpmd: fwd_streams", 13811f84c469SMatan Azrad sizeof(struct fwd_stream *) * nb_fwd_streams, 13821f84c469SMatan Azrad RTE_CACHE_LINE_SIZE); 1383ce8d5614SIntel if (fwd_streams == NULL) 13841f84c469SMatan Azrad rte_exit(EXIT_FAILURE, "rte_zmalloc(%d" 13851f84c469SMatan Azrad " (struct fwd_stream *)) failed\n", 13861f84c469SMatan Azrad nb_fwd_streams); 1387ce8d5614SIntel 1388af75078fSIntel for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) { 13891f84c469SMatan Azrad fwd_streams[sm_id] = rte_zmalloc("testpmd:" 13901f84c469SMatan Azrad " struct fwd_stream", sizeof(struct fwd_stream), 13911f84c469SMatan Azrad RTE_CACHE_LINE_SIZE); 1392ce8d5614SIntel if (fwd_streams[sm_id] == NULL) 13931f84c469SMatan Azrad rte_exit(EXIT_FAILURE, "rte_zmalloc" 13941f84c469SMatan Azrad "(struct fwd_stream) failed\n"); 13951f84c469SMatan Azrad } 1396af75078fSIntel } 1397ce8d5614SIntel 1398ce8d5614SIntel return 0; 1399af75078fSIntel } 1400af75078fSIntel 1401af75078fSIntel #ifdef RTE_TEST_PMD_RECORD_BURST_STATS 1402af75078fSIntel static void 1403af75078fSIntel pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs) 1404af75078fSIntel { 1405af75078fSIntel unsigned int total_burst; 1406af75078fSIntel unsigned int nb_burst; 1407af75078fSIntel unsigned int burst_stats[3]; 1408af75078fSIntel uint16_t pktnb_stats[3]; 1409af75078fSIntel uint16_t nb_pkt; 1410af75078fSIntel int burst_percent[3]; 1411af75078fSIntel 1412af75078fSIntel /* 1413af75078fSIntel * First compute the total number of packet bursts and the 1414af75078fSIntel * two highest numbers of bursts of the same number of packets. 1415af75078fSIntel */ 1416af75078fSIntel total_burst = 0; 1417af75078fSIntel burst_stats[0] = burst_stats[1] = burst_stats[2] = 0; 1418af75078fSIntel pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0; 1419af75078fSIntel for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) { 1420af75078fSIntel nb_burst = pbs->pkt_burst_spread[nb_pkt]; 1421af75078fSIntel if (nb_burst == 0) 1422af75078fSIntel continue; 1423af75078fSIntel total_burst += nb_burst; 1424af75078fSIntel if (nb_burst > burst_stats[0]) { 1425af75078fSIntel burst_stats[1] = burst_stats[0]; 1426af75078fSIntel pktnb_stats[1] = pktnb_stats[0]; 1427af75078fSIntel burst_stats[0] = nb_burst; 1428af75078fSIntel pktnb_stats[0] = nb_pkt; 1429fe613657SDaniel Shelepov } else if (nb_burst > burst_stats[1]) { 1430fe613657SDaniel Shelepov burst_stats[1] = nb_burst; 1431fe613657SDaniel Shelepov pktnb_stats[1] = nb_pkt; 1432af75078fSIntel } 1433af75078fSIntel } 1434af75078fSIntel if (total_burst == 0) 1435af75078fSIntel return; 1436af75078fSIntel burst_percent[0] = (burst_stats[0] * 100) / total_burst; 1437af75078fSIntel printf(" %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst, 1438af75078fSIntel burst_percent[0], (int) pktnb_stats[0]); 1439af75078fSIntel if (burst_stats[0] == total_burst) { 1440af75078fSIntel printf("]\n"); 1441af75078fSIntel return; 1442af75078fSIntel } 1443af75078fSIntel if (burst_stats[0] + burst_stats[1] == total_burst) { 1444af75078fSIntel printf(" + %d%% of %d pkts]\n", 1445af75078fSIntel 100 - burst_percent[0], pktnb_stats[1]); 1446af75078fSIntel return; 1447af75078fSIntel } 1448af75078fSIntel burst_percent[1] = (burst_stats[1] * 100) / total_burst; 1449af75078fSIntel burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]); 1450af75078fSIntel if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) { 1451af75078fSIntel printf(" + %d%% of others]\n", 100 - burst_percent[0]); 1452af75078fSIntel return; 1453af75078fSIntel } 1454af75078fSIntel printf(" + %d%% of %d pkts + %d%% of others]\n", 1455af75078fSIntel burst_percent[1], (int) pktnb_stats[1], burst_percent[2]); 1456af75078fSIntel } 1457af75078fSIntel #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */ 1458af75078fSIntel 1459af75078fSIntel static void 1460af75078fSIntel fwd_stream_stats_display(streamid_t stream_id) 1461af75078fSIntel { 1462af75078fSIntel struct fwd_stream *fs; 1463af75078fSIntel static const char *fwd_top_stats_border = "-------"; 1464af75078fSIntel 1465af75078fSIntel fs = fwd_streams[stream_id]; 1466af75078fSIntel if ((fs->rx_packets == 0) && (fs->tx_packets == 0) && 1467af75078fSIntel (fs->fwd_dropped == 0)) 1468af75078fSIntel return; 1469af75078fSIntel printf("\n %s Forward Stats for RX Port=%2d/Queue=%2d -> " 1470af75078fSIntel "TX Port=%2d/Queue=%2d %s\n", 1471af75078fSIntel fwd_top_stats_border, fs->rx_port, fs->rx_queue, 1472af75078fSIntel fs->tx_port, fs->tx_queue, fwd_top_stats_border); 1473c185d42cSDavid Marchand printf(" RX-packets: %-14"PRIu64" TX-packets: %-14"PRIu64 1474c185d42cSDavid Marchand " TX-dropped: %-14"PRIu64, 1475af75078fSIntel fs->rx_packets, fs->tx_packets, fs->fwd_dropped); 1476af75078fSIntel 1477af75078fSIntel /* if checksum mode */ 1478af75078fSIntel if (cur_fwd_eng == &csum_fwd_engine) { 1479c185d42cSDavid Marchand printf(" RX- bad IP checksum: %-14"PRIu64 1480c185d42cSDavid Marchand " Rx- bad L4 checksum: %-14"PRIu64 1481c185d42cSDavid Marchand " Rx- bad outer L4 checksum: %-14"PRIu64"\n", 148258d475b7SJerin Jacob fs->rx_bad_ip_csum, fs->rx_bad_l4_csum, 148358d475b7SJerin Jacob fs->rx_bad_outer_l4_csum); 148494d65546SDavid Marchand } else { 148594d65546SDavid Marchand printf("\n"); 1486af75078fSIntel } 1487af75078fSIntel 1488af75078fSIntel #ifdef RTE_TEST_PMD_RECORD_BURST_STATS 1489af75078fSIntel pkt_burst_stats_display("RX", &fs->rx_burst_stats); 1490af75078fSIntel pkt_burst_stats_display("TX", &fs->tx_burst_stats); 1491af75078fSIntel #endif 1492af75078fSIntel } 1493af75078fSIntel 149453324971SDavid Marchand void 149553324971SDavid Marchand fwd_stats_display(void) 149653324971SDavid Marchand { 149753324971SDavid Marchand static const char *fwd_stats_border = "----------------------"; 149853324971SDavid Marchand static const char *acc_stats_border = "+++++++++++++++"; 149953324971SDavid Marchand struct { 150053324971SDavid Marchand struct fwd_stream *rx_stream; 150153324971SDavid Marchand struct fwd_stream *tx_stream; 150253324971SDavid Marchand uint64_t tx_dropped; 150353324971SDavid Marchand uint64_t rx_bad_ip_csum; 150453324971SDavid Marchand uint64_t rx_bad_l4_csum; 150553324971SDavid Marchand uint64_t rx_bad_outer_l4_csum; 150653324971SDavid Marchand } ports_stats[RTE_MAX_ETHPORTS]; 150753324971SDavid Marchand uint64_t total_rx_dropped = 0; 150853324971SDavid Marchand uint64_t total_tx_dropped = 0; 150953324971SDavid Marchand uint64_t total_rx_nombuf = 0; 151053324971SDavid Marchand struct rte_eth_stats stats; 151153324971SDavid Marchand #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES 151253324971SDavid Marchand uint64_t fwd_cycles = 0; 151353324971SDavid Marchand #endif 151453324971SDavid Marchand uint64_t total_recv = 0; 151553324971SDavid Marchand uint64_t total_xmit = 0; 151653324971SDavid Marchand struct rte_port *port; 151753324971SDavid Marchand streamid_t sm_id; 151853324971SDavid Marchand portid_t pt_id; 151953324971SDavid Marchand int i; 152053324971SDavid Marchand 152153324971SDavid Marchand memset(ports_stats, 0, sizeof(ports_stats)); 152253324971SDavid Marchand 152353324971SDavid Marchand for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) { 152453324971SDavid Marchand struct fwd_stream *fs = fwd_streams[sm_id]; 152553324971SDavid Marchand 152653324971SDavid Marchand if (cur_fwd_config.nb_fwd_streams > 152753324971SDavid Marchand cur_fwd_config.nb_fwd_ports) { 152853324971SDavid Marchand fwd_stream_stats_display(sm_id); 152953324971SDavid Marchand } else { 153053324971SDavid Marchand ports_stats[fs->tx_port].tx_stream = fs; 153153324971SDavid Marchand ports_stats[fs->rx_port].rx_stream = fs; 153253324971SDavid Marchand } 153353324971SDavid Marchand 153453324971SDavid Marchand ports_stats[fs->tx_port].tx_dropped += fs->fwd_dropped; 153553324971SDavid Marchand 153653324971SDavid Marchand ports_stats[fs->rx_port].rx_bad_ip_csum += fs->rx_bad_ip_csum; 153753324971SDavid Marchand ports_stats[fs->rx_port].rx_bad_l4_csum += fs->rx_bad_l4_csum; 153853324971SDavid Marchand ports_stats[fs->rx_port].rx_bad_outer_l4_csum += 153953324971SDavid Marchand fs->rx_bad_outer_l4_csum; 154053324971SDavid Marchand 154153324971SDavid Marchand #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES 154253324971SDavid Marchand fwd_cycles += fs->core_cycles; 154353324971SDavid Marchand #endif 154453324971SDavid Marchand } 154553324971SDavid Marchand for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) { 154653324971SDavid Marchand uint8_t j; 154753324971SDavid Marchand 154853324971SDavid Marchand pt_id = fwd_ports_ids[i]; 154953324971SDavid Marchand port = &ports[pt_id]; 155053324971SDavid Marchand 155153324971SDavid Marchand rte_eth_stats_get(pt_id, &stats); 155253324971SDavid Marchand stats.ipackets -= port->stats.ipackets; 155353324971SDavid Marchand stats.opackets -= port->stats.opackets; 155453324971SDavid Marchand stats.ibytes -= port->stats.ibytes; 155553324971SDavid Marchand stats.obytes -= port->stats.obytes; 155653324971SDavid Marchand stats.imissed -= port->stats.imissed; 155753324971SDavid Marchand stats.oerrors -= port->stats.oerrors; 155853324971SDavid Marchand stats.rx_nombuf -= port->stats.rx_nombuf; 155953324971SDavid Marchand 156053324971SDavid Marchand total_recv += stats.ipackets; 156153324971SDavid Marchand total_xmit += stats.opackets; 156253324971SDavid Marchand total_rx_dropped += stats.imissed; 156353324971SDavid Marchand total_tx_dropped += ports_stats[pt_id].tx_dropped; 156453324971SDavid Marchand total_tx_dropped += stats.oerrors; 156553324971SDavid Marchand total_rx_nombuf += stats.rx_nombuf; 156653324971SDavid Marchand 156753324971SDavid Marchand printf("\n %s Forward statistics for port %-2d %s\n", 156853324971SDavid Marchand fwd_stats_border, pt_id, fwd_stats_border); 156953324971SDavid Marchand 157053324971SDavid Marchand if (!port->rx_queue_stats_mapping_enabled && 157153324971SDavid Marchand !port->tx_queue_stats_mapping_enabled) { 157253324971SDavid Marchand printf(" RX-packets: %-14"PRIu64 157353324971SDavid Marchand " RX-dropped: %-14"PRIu64 157453324971SDavid Marchand "RX-total: %-"PRIu64"\n", 157553324971SDavid Marchand stats.ipackets, stats.imissed, 157653324971SDavid Marchand stats.ipackets + stats.imissed); 157753324971SDavid Marchand 157853324971SDavid Marchand if (cur_fwd_eng == &csum_fwd_engine) 157953324971SDavid Marchand printf(" Bad-ipcsum: %-14"PRIu64 158053324971SDavid Marchand " Bad-l4csum: %-14"PRIu64 158153324971SDavid Marchand "Bad-outer-l4csum: %-14"PRIu64"\n", 158253324971SDavid Marchand ports_stats[pt_id].rx_bad_ip_csum, 158353324971SDavid Marchand ports_stats[pt_id].rx_bad_l4_csum, 158453324971SDavid Marchand ports_stats[pt_id].rx_bad_outer_l4_csum); 158553324971SDavid Marchand if (stats.ierrors + stats.rx_nombuf > 0) { 158653324971SDavid Marchand printf(" RX-error: %-"PRIu64"\n", 158753324971SDavid Marchand stats.ierrors); 158853324971SDavid Marchand printf(" RX-nombufs: %-14"PRIu64"\n", 158953324971SDavid Marchand stats.rx_nombuf); 159053324971SDavid Marchand } 159153324971SDavid Marchand 159253324971SDavid Marchand printf(" TX-packets: %-14"PRIu64 159353324971SDavid Marchand " TX-dropped: %-14"PRIu64 159453324971SDavid Marchand "TX-total: %-"PRIu64"\n", 159553324971SDavid Marchand stats.opackets, ports_stats[pt_id].tx_dropped, 159653324971SDavid Marchand stats.opackets + ports_stats[pt_id].tx_dropped); 159753324971SDavid Marchand } else { 159853324971SDavid Marchand printf(" RX-packets: %14"PRIu64 159953324971SDavid Marchand " RX-dropped:%14"PRIu64 160053324971SDavid Marchand " RX-total:%14"PRIu64"\n", 160153324971SDavid Marchand stats.ipackets, stats.imissed, 160253324971SDavid Marchand stats.ipackets + stats.imissed); 160353324971SDavid Marchand 160453324971SDavid Marchand if (cur_fwd_eng == &csum_fwd_engine) 160553324971SDavid Marchand printf(" Bad-ipcsum:%14"PRIu64 160653324971SDavid Marchand " Bad-l4csum:%14"PRIu64 160753324971SDavid Marchand " Bad-outer-l4csum: %-14"PRIu64"\n", 160853324971SDavid Marchand ports_stats[pt_id].rx_bad_ip_csum, 160953324971SDavid Marchand ports_stats[pt_id].rx_bad_l4_csum, 161053324971SDavid Marchand ports_stats[pt_id].rx_bad_outer_l4_csum); 161153324971SDavid Marchand if ((stats.ierrors + stats.rx_nombuf) > 0) { 161253324971SDavid Marchand printf(" RX-error:%"PRIu64"\n", stats.ierrors); 161353324971SDavid Marchand printf(" RX-nombufs: %14"PRIu64"\n", 161453324971SDavid Marchand stats.rx_nombuf); 161553324971SDavid Marchand } 161653324971SDavid Marchand 161753324971SDavid Marchand printf(" TX-packets: %14"PRIu64 161853324971SDavid Marchand " TX-dropped:%14"PRIu64 161953324971SDavid Marchand " TX-total:%14"PRIu64"\n", 162053324971SDavid Marchand stats.opackets, ports_stats[pt_id].tx_dropped, 162153324971SDavid Marchand stats.opackets + ports_stats[pt_id].tx_dropped); 162253324971SDavid Marchand } 162353324971SDavid Marchand 162453324971SDavid Marchand #ifdef RTE_TEST_PMD_RECORD_BURST_STATS 162553324971SDavid Marchand if (ports_stats[pt_id].rx_stream) 162653324971SDavid Marchand pkt_burst_stats_display("RX", 162753324971SDavid Marchand &ports_stats[pt_id].rx_stream->rx_burst_stats); 162853324971SDavid Marchand if (ports_stats[pt_id].tx_stream) 162953324971SDavid Marchand pkt_burst_stats_display("TX", 163053324971SDavid Marchand &ports_stats[pt_id].tx_stream->tx_burst_stats); 163153324971SDavid Marchand #endif 163253324971SDavid Marchand 163353324971SDavid Marchand if (port->rx_queue_stats_mapping_enabled) { 163453324971SDavid Marchand printf("\n"); 163553324971SDavid Marchand for (j = 0; j < RTE_ETHDEV_QUEUE_STAT_CNTRS; j++) { 163653324971SDavid Marchand printf(" Stats reg %2d RX-packets:%14"PRIu64 163753324971SDavid Marchand " RX-errors:%14"PRIu64 163853324971SDavid Marchand " RX-bytes:%14"PRIu64"\n", 163953324971SDavid Marchand j, stats.q_ipackets[j], 164053324971SDavid Marchand stats.q_errors[j], stats.q_ibytes[j]); 164153324971SDavid Marchand } 164253324971SDavid Marchand printf("\n"); 164353324971SDavid Marchand } 164453324971SDavid Marchand if (port->tx_queue_stats_mapping_enabled) { 164553324971SDavid Marchand for (j = 0; j < RTE_ETHDEV_QUEUE_STAT_CNTRS; j++) { 164653324971SDavid Marchand printf(" Stats reg %2d TX-packets:%14"PRIu64 164753324971SDavid Marchand " TX-bytes:%14" 164853324971SDavid Marchand PRIu64"\n", 164953324971SDavid Marchand j, stats.q_opackets[j], 165053324971SDavid Marchand stats.q_obytes[j]); 165153324971SDavid Marchand } 165253324971SDavid Marchand } 165353324971SDavid Marchand 165453324971SDavid Marchand printf(" %s--------------------------------%s\n", 165553324971SDavid Marchand fwd_stats_border, fwd_stats_border); 165653324971SDavid Marchand } 165753324971SDavid Marchand 165853324971SDavid Marchand printf("\n %s Accumulated forward statistics for all ports" 165953324971SDavid Marchand "%s\n", 166053324971SDavid Marchand acc_stats_border, acc_stats_border); 166153324971SDavid Marchand printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: " 166253324971SDavid Marchand "%-"PRIu64"\n" 166353324971SDavid Marchand " TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: " 166453324971SDavid Marchand "%-"PRIu64"\n", 166553324971SDavid Marchand total_recv, total_rx_dropped, total_recv + total_rx_dropped, 166653324971SDavid Marchand total_xmit, total_tx_dropped, total_xmit + total_tx_dropped); 166753324971SDavid Marchand if (total_rx_nombuf > 0) 166853324971SDavid Marchand printf(" RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf); 166953324971SDavid Marchand printf(" %s++++++++++++++++++++++++++++++++++++++++++++++" 167053324971SDavid Marchand "%s\n", 167153324971SDavid Marchand acc_stats_border, acc_stats_border); 167253324971SDavid Marchand #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES 167353324971SDavid Marchand if (total_recv > 0) 167453324971SDavid Marchand printf("\n CPU cycles/packet=%u (total cycles=" 167553324971SDavid Marchand "%"PRIu64" / total RX packets=%"PRIu64")\n", 167653324971SDavid Marchand (unsigned int)(fwd_cycles / total_recv), 167753324971SDavid Marchand fwd_cycles, total_recv); 167853324971SDavid Marchand #endif 167953324971SDavid Marchand } 168053324971SDavid Marchand 168153324971SDavid Marchand void 168253324971SDavid Marchand fwd_stats_reset(void) 168353324971SDavid Marchand { 168453324971SDavid Marchand streamid_t sm_id; 168553324971SDavid Marchand portid_t pt_id; 168653324971SDavid Marchand int i; 168753324971SDavid Marchand 168853324971SDavid Marchand for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) { 168953324971SDavid Marchand pt_id = fwd_ports_ids[i]; 169053324971SDavid Marchand rte_eth_stats_get(pt_id, &ports[pt_id].stats); 169153324971SDavid Marchand } 169253324971SDavid Marchand for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) { 169353324971SDavid Marchand struct fwd_stream *fs = fwd_streams[sm_id]; 169453324971SDavid Marchand 169553324971SDavid Marchand fs->rx_packets = 0; 169653324971SDavid Marchand fs->tx_packets = 0; 169753324971SDavid Marchand fs->fwd_dropped = 0; 169853324971SDavid Marchand fs->rx_bad_ip_csum = 0; 169953324971SDavid Marchand fs->rx_bad_l4_csum = 0; 170053324971SDavid Marchand fs->rx_bad_outer_l4_csum = 0; 170153324971SDavid Marchand 170253324971SDavid Marchand #ifdef RTE_TEST_PMD_RECORD_BURST_STATS 170353324971SDavid Marchand memset(&fs->rx_burst_stats, 0, sizeof(fs->rx_burst_stats)); 170453324971SDavid Marchand memset(&fs->tx_burst_stats, 0, sizeof(fs->tx_burst_stats)); 170553324971SDavid Marchand #endif 170653324971SDavid Marchand #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES 170753324971SDavid Marchand fs->core_cycles = 0; 170853324971SDavid Marchand #endif 170953324971SDavid Marchand } 171053324971SDavid Marchand } 171153324971SDavid Marchand 1712af75078fSIntel static void 17137741e4cfSIntel flush_fwd_rx_queues(void) 1714af75078fSIntel { 1715af75078fSIntel struct rte_mbuf *pkts_burst[MAX_PKT_BURST]; 1716af75078fSIntel portid_t rxp; 17177741e4cfSIntel portid_t port_id; 1718af75078fSIntel queueid_t rxq; 1719af75078fSIntel uint16_t nb_rx; 1720af75078fSIntel uint16_t i; 1721af75078fSIntel uint8_t j; 1722f487715fSReshma Pattan uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0; 1723594302c7SJames Poole uint64_t timer_period; 1724f487715fSReshma Pattan 1725f487715fSReshma Pattan /* convert to number of cycles */ 1726594302c7SJames Poole timer_period = rte_get_timer_hz(); /* 1 second timeout */ 1727af75078fSIntel 1728af75078fSIntel for (j = 0; j < 2; j++) { 17297741e4cfSIntel for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) { 1730af75078fSIntel for (rxq = 0; rxq < nb_rxq; rxq++) { 17317741e4cfSIntel port_id = fwd_ports_ids[rxp]; 1732f487715fSReshma Pattan /** 1733f487715fSReshma Pattan * testpmd can stuck in the below do while loop 1734f487715fSReshma Pattan * if rte_eth_rx_burst() always returns nonzero 1735f487715fSReshma Pattan * packets. So timer is added to exit this loop 1736f487715fSReshma Pattan * after 1sec timer expiry. 1737f487715fSReshma Pattan */ 1738f487715fSReshma Pattan prev_tsc = rte_rdtsc(); 1739af75078fSIntel do { 17407741e4cfSIntel nb_rx = rte_eth_rx_burst(port_id, rxq, 1741013af9b6SIntel pkts_burst, MAX_PKT_BURST); 1742af75078fSIntel for (i = 0; i < nb_rx; i++) 1743af75078fSIntel rte_pktmbuf_free(pkts_burst[i]); 1744f487715fSReshma Pattan 1745f487715fSReshma Pattan cur_tsc = rte_rdtsc(); 1746f487715fSReshma Pattan diff_tsc = cur_tsc - prev_tsc; 1747f487715fSReshma Pattan timer_tsc += diff_tsc; 1748f487715fSReshma Pattan } while ((nb_rx > 0) && 1749f487715fSReshma Pattan (timer_tsc < timer_period)); 1750f487715fSReshma Pattan timer_tsc = 0; 1751af75078fSIntel } 1752af75078fSIntel } 1753af75078fSIntel rte_delay_ms(10); /* wait 10 milli-seconds before retrying */ 1754af75078fSIntel } 1755af75078fSIntel } 1756af75078fSIntel 1757af75078fSIntel static void 1758af75078fSIntel run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd) 1759af75078fSIntel { 1760af75078fSIntel struct fwd_stream **fsm; 1761af75078fSIntel streamid_t nb_fs; 1762af75078fSIntel streamid_t sm_id; 17637e4441c8SRemy Horton #ifdef RTE_LIBRTE_BITRATE 17647e4441c8SRemy Horton uint64_t tics_per_1sec; 17657e4441c8SRemy Horton uint64_t tics_datum; 17667e4441c8SRemy Horton uint64_t tics_current; 17674918a357SXiaoyun Li uint16_t i, cnt_ports; 1768af75078fSIntel 17694918a357SXiaoyun Li cnt_ports = nb_ports; 17707e4441c8SRemy Horton tics_datum = rte_rdtsc(); 17717e4441c8SRemy Horton tics_per_1sec = rte_get_timer_hz(); 17727e4441c8SRemy Horton #endif 1773af75078fSIntel fsm = &fwd_streams[fc->stream_idx]; 1774af75078fSIntel nb_fs = fc->stream_nb; 1775af75078fSIntel do { 1776af75078fSIntel for (sm_id = 0; sm_id < nb_fs; sm_id++) 1777af75078fSIntel (*pkt_fwd)(fsm[sm_id]); 17787e4441c8SRemy Horton #ifdef RTE_LIBRTE_BITRATE 1779e25e6c70SRemy Horton if (bitrate_enabled != 0 && 1780e25e6c70SRemy Horton bitrate_lcore_id == rte_lcore_id()) { 17817e4441c8SRemy Horton tics_current = rte_rdtsc(); 17827e4441c8SRemy Horton if (tics_current - tics_datum >= tics_per_1sec) { 17837e4441c8SRemy Horton /* Periodic bitrate calculation */ 17844918a357SXiaoyun Li for (i = 0; i < cnt_ports; i++) 1785e25e6c70SRemy Horton rte_stats_bitrate_calc(bitrate_data, 17864918a357SXiaoyun Li ports_ids[i]); 17877e4441c8SRemy Horton tics_datum = tics_current; 17887e4441c8SRemy Horton } 1789e25e6c70SRemy Horton } 17907e4441c8SRemy Horton #endif 179162d3216dSReshma Pattan #ifdef RTE_LIBRTE_LATENCY_STATS 179265eb1e54SPablo de Lara if (latencystats_enabled != 0 && 179365eb1e54SPablo de Lara latencystats_lcore_id == rte_lcore_id()) 179462d3216dSReshma Pattan rte_latencystats_update(); 179562d3216dSReshma Pattan #endif 179662d3216dSReshma Pattan 1797af75078fSIntel } while (! fc->stopped); 1798af75078fSIntel } 1799af75078fSIntel 1800af75078fSIntel static int 1801af75078fSIntel start_pkt_forward_on_core(void *fwd_arg) 1802af75078fSIntel { 1803af75078fSIntel run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg, 1804af75078fSIntel cur_fwd_config.fwd_eng->packet_fwd); 1805af75078fSIntel return 0; 1806af75078fSIntel } 1807af75078fSIntel 1808af75078fSIntel /* 1809af75078fSIntel * Run the TXONLY packet forwarding engine to send a single burst of packets. 1810af75078fSIntel * Used to start communication flows in network loopback test configurations. 1811af75078fSIntel */ 1812af75078fSIntel static int 1813af75078fSIntel run_one_txonly_burst_on_core(void *fwd_arg) 1814af75078fSIntel { 1815af75078fSIntel struct fwd_lcore *fwd_lc; 1816af75078fSIntel struct fwd_lcore tmp_lcore; 1817af75078fSIntel 1818af75078fSIntel fwd_lc = (struct fwd_lcore *) fwd_arg; 1819af75078fSIntel tmp_lcore = *fwd_lc; 1820af75078fSIntel tmp_lcore.stopped = 1; 1821af75078fSIntel run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd); 1822af75078fSIntel return 0; 1823af75078fSIntel } 1824af75078fSIntel 1825af75078fSIntel /* 1826af75078fSIntel * Launch packet forwarding: 1827af75078fSIntel * - Setup per-port forwarding context. 1828af75078fSIntel * - launch logical cores with their forwarding configuration. 1829af75078fSIntel */ 1830af75078fSIntel static void 1831af75078fSIntel launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore) 1832af75078fSIntel { 1833af75078fSIntel port_fwd_begin_t port_fwd_begin; 1834af75078fSIntel unsigned int i; 1835af75078fSIntel unsigned int lc_id; 1836af75078fSIntel int diag; 1837af75078fSIntel 1838af75078fSIntel port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin; 1839af75078fSIntel if (port_fwd_begin != NULL) { 1840af75078fSIntel for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) 1841af75078fSIntel (*port_fwd_begin)(fwd_ports_ids[i]); 1842af75078fSIntel } 1843af75078fSIntel for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) { 1844af75078fSIntel lc_id = fwd_lcores_cpuids[i]; 1845af75078fSIntel if ((interactive == 0) || (lc_id != rte_lcore_id())) { 1846af75078fSIntel fwd_lcores[i]->stopped = 0; 1847af75078fSIntel diag = rte_eal_remote_launch(pkt_fwd_on_lcore, 1848af75078fSIntel fwd_lcores[i], lc_id); 1849af75078fSIntel if (diag != 0) 1850af75078fSIntel printf("launch lcore %u failed - diag=%d\n", 1851af75078fSIntel lc_id, diag); 1852af75078fSIntel } 1853af75078fSIntel } 1854af75078fSIntel } 1855af75078fSIntel 1856af75078fSIntel /* 1857af75078fSIntel * Launch packet forwarding configuration. 1858af75078fSIntel */ 1859af75078fSIntel void 1860af75078fSIntel start_packet_forwarding(int with_tx_first) 1861af75078fSIntel { 1862af75078fSIntel port_fwd_begin_t port_fwd_begin; 1863af75078fSIntel port_fwd_end_t port_fwd_end; 1864af75078fSIntel struct rte_port *port; 1865af75078fSIntel unsigned int i; 1866af75078fSIntel portid_t pt_id; 1867af75078fSIntel 18685a8fb55cSReshma Pattan if (strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") == 0 && !nb_rxq) 18695a8fb55cSReshma Pattan rte_exit(EXIT_FAILURE, "rxq are 0, cannot use rxonly fwd mode\n"); 18705a8fb55cSReshma Pattan 18715a8fb55cSReshma Pattan if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 && !nb_txq) 18725a8fb55cSReshma Pattan rte_exit(EXIT_FAILURE, "txq are 0, cannot use txonly fwd mode\n"); 18735a8fb55cSReshma Pattan 18745a8fb55cSReshma Pattan if ((strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") != 0 && 18755a8fb55cSReshma Pattan strcmp(cur_fwd_eng->fwd_mode_name, "txonly") != 0) && 18765a8fb55cSReshma Pattan (!nb_rxq || !nb_txq)) 18775a8fb55cSReshma Pattan rte_exit(EXIT_FAILURE, 18785a8fb55cSReshma Pattan "Either rxq or txq are 0, cannot use %s fwd mode\n", 18795a8fb55cSReshma Pattan cur_fwd_eng->fwd_mode_name); 18805a8fb55cSReshma Pattan 1881ce8d5614SIntel if (all_ports_started() == 0) { 1882ce8d5614SIntel printf("Not all ports were started\n"); 1883ce8d5614SIntel return; 1884ce8d5614SIntel } 1885af75078fSIntel if (test_done == 0) { 1886af75078fSIntel printf("Packet forwarding already started\n"); 1887af75078fSIntel return; 1888af75078fSIntel } 1889edf87b4aSBernard Iremonger 1890edf87b4aSBernard Iremonger 18917741e4cfSIntel if(dcb_test) { 18927741e4cfSIntel for (i = 0; i < nb_fwd_ports; i++) { 18937741e4cfSIntel pt_id = fwd_ports_ids[i]; 18947741e4cfSIntel port = &ports[pt_id]; 18957741e4cfSIntel if (!port->dcb_flag) { 18967741e4cfSIntel printf("In DCB mode, all forwarding ports must " 18977741e4cfSIntel "be configured in this mode.\n"); 1898013af9b6SIntel return; 1899013af9b6SIntel } 19007741e4cfSIntel } 19017741e4cfSIntel if (nb_fwd_lcores == 1) { 19027741e4cfSIntel printf("In DCB mode,the nb forwarding cores " 19037741e4cfSIntel "should be larger than 1.\n"); 19047741e4cfSIntel return; 19057741e4cfSIntel } 19067741e4cfSIntel } 1907af75078fSIntel test_done = 0; 19087741e4cfSIntel 190947a767b2SMatan Azrad fwd_config_setup(); 191047a767b2SMatan Azrad 19117741e4cfSIntel if(!no_flush_rx) 19127741e4cfSIntel flush_fwd_rx_queues(); 19137741e4cfSIntel 1914933617d8SZhihong Wang pkt_fwd_config_display(&cur_fwd_config); 1915af75078fSIntel rxtx_config_display(); 1916af75078fSIntel 191753324971SDavid Marchand fwd_stats_reset(); 1918af75078fSIntel for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) { 1919af75078fSIntel pt_id = fwd_ports_ids[i]; 1920af75078fSIntel port = &ports[pt_id]; 1921013af9b6SIntel map_port_queue_stats_mapping_registers(pt_id, port); 1922af75078fSIntel } 1923af75078fSIntel if (with_tx_first) { 1924af75078fSIntel port_fwd_begin = tx_only_engine.port_fwd_begin; 1925af75078fSIntel if (port_fwd_begin != NULL) { 1926af75078fSIntel for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) 1927af75078fSIntel (*port_fwd_begin)(fwd_ports_ids[i]); 1928af75078fSIntel } 1929acbf77a6SZhihong Wang while (with_tx_first--) { 1930acbf77a6SZhihong Wang launch_packet_forwarding( 1931acbf77a6SZhihong Wang run_one_txonly_burst_on_core); 1932af75078fSIntel rte_eal_mp_wait_lcore(); 1933acbf77a6SZhihong Wang } 1934af75078fSIntel port_fwd_end = tx_only_engine.port_fwd_end; 1935af75078fSIntel if (port_fwd_end != NULL) { 1936af75078fSIntel for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) 1937af75078fSIntel (*port_fwd_end)(fwd_ports_ids[i]); 1938af75078fSIntel } 1939af75078fSIntel } 1940af75078fSIntel launch_packet_forwarding(start_pkt_forward_on_core); 1941af75078fSIntel } 1942af75078fSIntel 1943af75078fSIntel void 1944af75078fSIntel stop_packet_forwarding(void) 1945af75078fSIntel { 1946af75078fSIntel port_fwd_end_t port_fwd_end; 1947af75078fSIntel lcoreid_t lc_id; 194853324971SDavid Marchand portid_t pt_id; 194953324971SDavid Marchand int i; 1950af75078fSIntel 1951af75078fSIntel if (test_done) { 1952af75078fSIntel printf("Packet forwarding not started\n"); 1953af75078fSIntel return; 1954af75078fSIntel } 1955af75078fSIntel printf("Telling cores to stop..."); 1956af75078fSIntel for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) 1957af75078fSIntel fwd_lcores[lc_id]->stopped = 1; 1958af75078fSIntel printf("\nWaiting for lcores to finish...\n"); 1959af75078fSIntel rte_eal_mp_wait_lcore(); 1960af75078fSIntel port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end; 1961af75078fSIntel if (port_fwd_end != NULL) { 1962af75078fSIntel for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) { 1963af75078fSIntel pt_id = fwd_ports_ids[i]; 1964af75078fSIntel (*port_fwd_end)(pt_id); 1965af75078fSIntel } 1966af75078fSIntel } 1967c185d42cSDavid Marchand 196853324971SDavid Marchand fwd_stats_display(); 196958d475b7SJerin Jacob 1970af75078fSIntel printf("\nDone.\n"); 1971af75078fSIntel test_done = 1; 1972af75078fSIntel } 1973af75078fSIntel 1974cfae07fdSOuyang Changchun void 1975cfae07fdSOuyang Changchun dev_set_link_up(portid_t pid) 1976cfae07fdSOuyang Changchun { 1977492ab604SZhiyong Yang if (rte_eth_dev_set_link_up(pid) < 0) 1978cfae07fdSOuyang Changchun printf("\nSet link up fail.\n"); 1979cfae07fdSOuyang Changchun } 1980cfae07fdSOuyang Changchun 1981cfae07fdSOuyang Changchun void 1982cfae07fdSOuyang Changchun dev_set_link_down(portid_t pid) 1983cfae07fdSOuyang Changchun { 1984492ab604SZhiyong Yang if (rte_eth_dev_set_link_down(pid) < 0) 1985cfae07fdSOuyang Changchun printf("\nSet link down fail.\n"); 1986cfae07fdSOuyang Changchun } 1987cfae07fdSOuyang Changchun 1988ce8d5614SIntel static int 1989ce8d5614SIntel all_ports_started(void) 1990ce8d5614SIntel { 1991ce8d5614SIntel portid_t pi; 1992ce8d5614SIntel struct rte_port *port; 1993ce8d5614SIntel 19947d89b261SGaetan Rivet RTE_ETH_FOREACH_DEV(pi) { 1995ce8d5614SIntel port = &ports[pi]; 1996ce8d5614SIntel /* Check if there is a port which is not started */ 199741b05095SBernard Iremonger if ((port->port_status != RTE_PORT_STARTED) && 199841b05095SBernard Iremonger (port->slave_flag == 0)) 1999ce8d5614SIntel return 0; 2000ce8d5614SIntel } 2001ce8d5614SIntel 2002ce8d5614SIntel /* No port is not started */ 2003ce8d5614SIntel return 1; 2004ce8d5614SIntel } 2005ce8d5614SIntel 2006148f963fSBruce Richardson int 20076018eb8cSShahaf Shuler port_is_stopped(portid_t port_id) 20086018eb8cSShahaf Shuler { 20096018eb8cSShahaf Shuler struct rte_port *port = &ports[port_id]; 20106018eb8cSShahaf Shuler 20116018eb8cSShahaf Shuler if ((port->port_status != RTE_PORT_STOPPED) && 20126018eb8cSShahaf Shuler (port->slave_flag == 0)) 20136018eb8cSShahaf Shuler return 0; 20146018eb8cSShahaf Shuler return 1; 20156018eb8cSShahaf Shuler } 20166018eb8cSShahaf Shuler 20176018eb8cSShahaf Shuler int 2018edab33b1STetsuya Mukawa all_ports_stopped(void) 2019edab33b1STetsuya Mukawa { 2020edab33b1STetsuya Mukawa portid_t pi; 2021edab33b1STetsuya Mukawa 20227d89b261SGaetan Rivet RTE_ETH_FOREACH_DEV(pi) { 20236018eb8cSShahaf Shuler if (!port_is_stopped(pi)) 2024edab33b1STetsuya Mukawa return 0; 2025edab33b1STetsuya Mukawa } 2026edab33b1STetsuya Mukawa 2027edab33b1STetsuya Mukawa return 1; 2028edab33b1STetsuya Mukawa } 2029edab33b1STetsuya Mukawa 2030edab33b1STetsuya Mukawa int 2031edab33b1STetsuya Mukawa port_is_started(portid_t port_id) 2032edab33b1STetsuya Mukawa { 2033edab33b1STetsuya Mukawa if (port_id_is_invalid(port_id, ENABLED_WARN)) 2034edab33b1STetsuya Mukawa return 0; 2035edab33b1STetsuya Mukawa 2036edab33b1STetsuya Mukawa if (ports[port_id].port_status != RTE_PORT_STARTED) 2037edab33b1STetsuya Mukawa return 0; 2038edab33b1STetsuya Mukawa 2039edab33b1STetsuya Mukawa return 1; 2040edab33b1STetsuya Mukawa } 2041edab33b1STetsuya Mukawa 2042edab33b1STetsuya Mukawa int 2043ce8d5614SIntel start_port(portid_t pid) 2044ce8d5614SIntel { 204592d2703eSMichael Qiu int diag, need_check_link_status = -1; 2046ce8d5614SIntel portid_t pi; 2047ce8d5614SIntel queueid_t qi; 2048ce8d5614SIntel struct rte_port *port; 20496d13ea8eSOlivier Matz struct rte_ether_addr mac_addr; 2050ce8d5614SIntel 20514468635fSMichael Qiu if (port_id_is_invalid(pid, ENABLED_WARN)) 20524468635fSMichael Qiu return 0; 20534468635fSMichael Qiu 2054ce8d5614SIntel if(dcb_config) 2055ce8d5614SIntel dcb_test = 1; 20567d89b261SGaetan Rivet RTE_ETH_FOREACH_DEV(pi) { 2057edab33b1STetsuya Mukawa if (pid != pi && pid != (portid_t)RTE_PORT_ALL) 2058ce8d5614SIntel continue; 2059ce8d5614SIntel 206092d2703eSMichael Qiu need_check_link_status = 0; 2061ce8d5614SIntel port = &ports[pi]; 2062ce8d5614SIntel if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED, 2063ce8d5614SIntel RTE_PORT_HANDLING) == 0) { 2064ce8d5614SIntel printf("Port %d is now not stopped\n", pi); 2065ce8d5614SIntel continue; 2066ce8d5614SIntel } 2067ce8d5614SIntel 2068ce8d5614SIntel if (port->need_reconfig > 0) { 2069ce8d5614SIntel port->need_reconfig = 0; 2070ce8d5614SIntel 20717ee3e944SVasily Philipov if (flow_isolate_all) { 20727ee3e944SVasily Philipov int ret = port_flow_isolate(pi, 1); 20737ee3e944SVasily Philipov if (ret) { 20747ee3e944SVasily Philipov printf("Failed to apply isolated" 20757ee3e944SVasily Philipov " mode on port %d\n", pi); 20767ee3e944SVasily Philipov return -1; 20777ee3e944SVasily Philipov } 20787ee3e944SVasily Philipov } 2079b5b38ed8SRaslan Darawsheh configure_rxtx_dump_callbacks(0); 20805706de65SJulien Cretin printf("Configuring Port %d (socket %u)\n", pi, 208120a0286fSLiu Xiaofeng port->socket_id); 2082ce8d5614SIntel /* configure port */ 2083ce8d5614SIntel diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq, 2084ce8d5614SIntel &(port->dev_conf)); 2085ce8d5614SIntel if (diag != 0) { 2086ce8d5614SIntel if (rte_atomic16_cmpset(&(port->port_status), 2087ce8d5614SIntel RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0) 2088ce8d5614SIntel printf("Port %d can not be set back " 2089ce8d5614SIntel "to stopped\n", pi); 2090ce8d5614SIntel printf("Fail to configure port %d\n", pi); 2091ce8d5614SIntel /* try to reconfigure port next time */ 2092ce8d5614SIntel port->need_reconfig = 1; 2093148f963fSBruce Richardson return -1; 2094ce8d5614SIntel } 2095ce8d5614SIntel } 2096ce8d5614SIntel if (port->need_reconfig_queues > 0) { 2097ce8d5614SIntel port->need_reconfig_queues = 0; 2098ce8d5614SIntel /* setup tx queues */ 2099ce8d5614SIntel for (qi = 0; qi < nb_txq; qi++) { 2100b6ea6408SIntel if ((numa_support) && 2101b6ea6408SIntel (txring_numa[pi] != NUMA_NO_CONFIG)) 2102b6ea6408SIntel diag = rte_eth_tx_queue_setup(pi, qi, 2103d44f8a48SQi Zhang port->nb_tx_desc[qi], 2104d44f8a48SQi Zhang txring_numa[pi], 2105d44f8a48SQi Zhang &(port->tx_conf[qi])); 2106b6ea6408SIntel else 2107b6ea6408SIntel diag = rte_eth_tx_queue_setup(pi, qi, 2108d44f8a48SQi Zhang port->nb_tx_desc[qi], 2109d44f8a48SQi Zhang port->socket_id, 2110d44f8a48SQi Zhang &(port->tx_conf[qi])); 2111b6ea6408SIntel 2112ce8d5614SIntel if (diag == 0) 2113ce8d5614SIntel continue; 2114ce8d5614SIntel 2115ce8d5614SIntel /* Fail to setup tx queue, return */ 2116ce8d5614SIntel if (rte_atomic16_cmpset(&(port->port_status), 2117ce8d5614SIntel RTE_PORT_HANDLING, 2118ce8d5614SIntel RTE_PORT_STOPPED) == 0) 2119ce8d5614SIntel printf("Port %d can not be set back " 2120ce8d5614SIntel "to stopped\n", pi); 2121d44f8a48SQi Zhang printf("Fail to configure port %d tx queues\n", 2122d44f8a48SQi Zhang pi); 2123ce8d5614SIntel /* try to reconfigure queues next time */ 2124ce8d5614SIntel port->need_reconfig_queues = 1; 2125148f963fSBruce Richardson return -1; 2126ce8d5614SIntel } 2127ce8d5614SIntel for (qi = 0; qi < nb_rxq; qi++) { 2128d44f8a48SQi Zhang /* setup rx queues */ 2129b6ea6408SIntel if ((numa_support) && 2130b6ea6408SIntel (rxring_numa[pi] != NUMA_NO_CONFIG)) { 2131b6ea6408SIntel struct rte_mempool * mp = 2132b6ea6408SIntel mbuf_pool_find(rxring_numa[pi]); 2133b6ea6408SIntel if (mp == NULL) { 2134b6ea6408SIntel printf("Failed to setup RX queue:" 2135b6ea6408SIntel "No mempool allocation" 2136b6ea6408SIntel " on the socket %d\n", 2137b6ea6408SIntel rxring_numa[pi]); 2138148f963fSBruce Richardson return -1; 2139b6ea6408SIntel } 2140b6ea6408SIntel 2141b6ea6408SIntel diag = rte_eth_rx_queue_setup(pi, qi, 2142d4930794SFerruh Yigit port->nb_rx_desc[qi], 2143d44f8a48SQi Zhang rxring_numa[pi], 2144d44f8a48SQi Zhang &(port->rx_conf[qi]), 2145d44f8a48SQi Zhang mp); 21461e1d6bddSBernard Iremonger } else { 21471e1d6bddSBernard Iremonger struct rte_mempool *mp = 21481e1d6bddSBernard Iremonger mbuf_pool_find(port->socket_id); 21491e1d6bddSBernard Iremonger if (mp == NULL) { 21501e1d6bddSBernard Iremonger printf("Failed to setup RX queue:" 21511e1d6bddSBernard Iremonger "No mempool allocation" 21521e1d6bddSBernard Iremonger " on the socket %d\n", 21531e1d6bddSBernard Iremonger port->socket_id); 21541e1d6bddSBernard Iremonger return -1; 2155b6ea6408SIntel } 2156b6ea6408SIntel diag = rte_eth_rx_queue_setup(pi, qi, 2157d4930794SFerruh Yigit port->nb_rx_desc[qi], 2158d44f8a48SQi Zhang port->socket_id, 2159d44f8a48SQi Zhang &(port->rx_conf[qi]), 2160d44f8a48SQi Zhang mp); 21611e1d6bddSBernard Iremonger } 2162ce8d5614SIntel if (diag == 0) 2163ce8d5614SIntel continue; 2164ce8d5614SIntel 2165ce8d5614SIntel /* Fail to setup rx queue, return */ 2166ce8d5614SIntel if (rte_atomic16_cmpset(&(port->port_status), 2167ce8d5614SIntel RTE_PORT_HANDLING, 2168ce8d5614SIntel RTE_PORT_STOPPED) == 0) 2169ce8d5614SIntel printf("Port %d can not be set back " 2170ce8d5614SIntel "to stopped\n", pi); 2171d44f8a48SQi Zhang printf("Fail to configure port %d rx queues\n", 2172d44f8a48SQi Zhang pi); 2173ce8d5614SIntel /* try to reconfigure queues next time */ 2174ce8d5614SIntel port->need_reconfig_queues = 1; 2175148f963fSBruce Richardson return -1; 2176ce8d5614SIntel } 2177ce8d5614SIntel } 2178b5b38ed8SRaslan Darawsheh configure_rxtx_dump_callbacks(verbose_level); 2179ce8d5614SIntel /* start port */ 2180ce8d5614SIntel if (rte_eth_dev_start(pi) < 0) { 2181ce8d5614SIntel printf("Fail to start port %d\n", pi); 2182ce8d5614SIntel 2183ce8d5614SIntel /* Fail to setup rx queue, return */ 2184ce8d5614SIntel if (rte_atomic16_cmpset(&(port->port_status), 2185ce8d5614SIntel RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0) 2186ce8d5614SIntel printf("Port %d can not be set back to " 2187ce8d5614SIntel "stopped\n", pi); 2188ce8d5614SIntel continue; 2189ce8d5614SIntel } 2190ce8d5614SIntel 2191ce8d5614SIntel if (rte_atomic16_cmpset(&(port->port_status), 2192ce8d5614SIntel RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0) 2193ce8d5614SIntel printf("Port %d can not be set into started\n", pi); 2194ce8d5614SIntel 21952950a769SDeclan Doherty rte_eth_macaddr_get(pi, &mac_addr); 2196d8c89163SZijie Pan printf("Port %d: %02X:%02X:%02X:%02X:%02X:%02X\n", pi, 21972950a769SDeclan Doherty mac_addr.addr_bytes[0], mac_addr.addr_bytes[1], 21982950a769SDeclan Doherty mac_addr.addr_bytes[2], mac_addr.addr_bytes[3], 21992950a769SDeclan Doherty mac_addr.addr_bytes[4], mac_addr.addr_bytes[5]); 2200d8c89163SZijie Pan 2201ce8d5614SIntel /* at least one port started, need checking link status */ 2202ce8d5614SIntel need_check_link_status = 1; 2203ce8d5614SIntel } 2204ce8d5614SIntel 220592d2703eSMichael Qiu if (need_check_link_status == 1 && !no_link_check) 2206edab33b1STetsuya Mukawa check_all_ports_link_status(RTE_PORT_ALL); 220792d2703eSMichael Qiu else if (need_check_link_status == 0) 2208ce8d5614SIntel printf("Please stop the ports first\n"); 2209ce8d5614SIntel 2210ce8d5614SIntel printf("Done\n"); 2211148f963fSBruce Richardson return 0; 2212ce8d5614SIntel } 2213ce8d5614SIntel 2214ce8d5614SIntel void 2215ce8d5614SIntel stop_port(portid_t pid) 2216ce8d5614SIntel { 2217ce8d5614SIntel portid_t pi; 2218ce8d5614SIntel struct rte_port *port; 2219ce8d5614SIntel int need_check_link_status = 0; 2220ce8d5614SIntel 2221ce8d5614SIntel if (dcb_test) { 2222ce8d5614SIntel dcb_test = 0; 2223ce8d5614SIntel dcb_config = 0; 2224ce8d5614SIntel } 22254468635fSMichael Qiu 22264468635fSMichael Qiu if (port_id_is_invalid(pid, ENABLED_WARN)) 22274468635fSMichael Qiu return; 22284468635fSMichael Qiu 2229ce8d5614SIntel printf("Stopping ports...\n"); 2230ce8d5614SIntel 22317d89b261SGaetan Rivet RTE_ETH_FOREACH_DEV(pi) { 22324468635fSMichael Qiu if (pid != pi && pid != (portid_t)RTE_PORT_ALL) 2233ce8d5614SIntel continue; 2234ce8d5614SIntel 2235a8ef3e3aSBernard Iremonger if (port_is_forwarding(pi) != 0 && test_done == 0) { 2236a8ef3e3aSBernard Iremonger printf("Please remove port %d from forwarding configuration.\n", pi); 2237a8ef3e3aSBernard Iremonger continue; 2238a8ef3e3aSBernard Iremonger } 2239a8ef3e3aSBernard Iremonger 22400e545d30SBernard Iremonger if (port_is_bonding_slave(pi)) { 22410e545d30SBernard Iremonger printf("Please remove port %d from bonded device.\n", pi); 22420e545d30SBernard Iremonger continue; 22430e545d30SBernard Iremonger } 22440e545d30SBernard Iremonger 2245ce8d5614SIntel port = &ports[pi]; 2246ce8d5614SIntel if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED, 2247ce8d5614SIntel RTE_PORT_HANDLING) == 0) 2248ce8d5614SIntel continue; 2249ce8d5614SIntel 2250ce8d5614SIntel rte_eth_dev_stop(pi); 2251ce8d5614SIntel 2252ce8d5614SIntel if (rte_atomic16_cmpset(&(port->port_status), 2253ce8d5614SIntel RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0) 2254ce8d5614SIntel printf("Port %d can not be set into stopped\n", pi); 2255ce8d5614SIntel need_check_link_status = 1; 2256ce8d5614SIntel } 2257bc202406SDavid Marchand if (need_check_link_status && !no_link_check) 2258edab33b1STetsuya Mukawa check_all_ports_link_status(RTE_PORT_ALL); 2259ce8d5614SIntel 2260ce8d5614SIntel printf("Done\n"); 2261ce8d5614SIntel } 2262ce8d5614SIntel 2263ce6959bfSWisam Jaddo static void 22644f1de450SThomas Monjalon remove_invalid_ports_in(portid_t *array, portid_t *total) 2265ce6959bfSWisam Jaddo { 22664f1de450SThomas Monjalon portid_t i; 22674f1de450SThomas Monjalon portid_t new_total = 0; 2268ce6959bfSWisam Jaddo 22694f1de450SThomas Monjalon for (i = 0; i < *total; i++) 22704f1de450SThomas Monjalon if (!port_id_is_invalid(array[i], DISABLED_WARN)) { 22714f1de450SThomas Monjalon array[new_total] = array[i]; 22724f1de450SThomas Monjalon new_total++; 2273ce6959bfSWisam Jaddo } 22744f1de450SThomas Monjalon *total = new_total; 22754f1de450SThomas Monjalon } 22764f1de450SThomas Monjalon 22774f1de450SThomas Monjalon static void 22784f1de450SThomas Monjalon remove_invalid_ports(void) 22794f1de450SThomas Monjalon { 22804f1de450SThomas Monjalon remove_invalid_ports_in(ports_ids, &nb_ports); 22814f1de450SThomas Monjalon remove_invalid_ports_in(fwd_ports_ids, &nb_fwd_ports); 22824f1de450SThomas Monjalon nb_cfg_ports = nb_fwd_ports; 2283ce6959bfSWisam Jaddo } 2284ce6959bfSWisam Jaddo 2285ce8d5614SIntel void 2286ce8d5614SIntel close_port(portid_t pid) 2287ce8d5614SIntel { 2288ce8d5614SIntel portid_t pi; 2289ce8d5614SIntel struct rte_port *port; 2290ce8d5614SIntel 22914468635fSMichael Qiu if (port_id_is_invalid(pid, ENABLED_WARN)) 22924468635fSMichael Qiu return; 22934468635fSMichael Qiu 2294ce8d5614SIntel printf("Closing ports...\n"); 2295ce8d5614SIntel 22967d89b261SGaetan Rivet RTE_ETH_FOREACH_DEV(pi) { 22974468635fSMichael Qiu if (pid != pi && pid != (portid_t)RTE_PORT_ALL) 2298ce8d5614SIntel continue; 2299ce8d5614SIntel 2300a8ef3e3aSBernard Iremonger if (port_is_forwarding(pi) != 0 && test_done == 0) { 2301a8ef3e3aSBernard Iremonger printf("Please remove port %d from forwarding configuration.\n", pi); 2302a8ef3e3aSBernard Iremonger continue; 2303a8ef3e3aSBernard Iremonger } 2304a8ef3e3aSBernard Iremonger 23050e545d30SBernard Iremonger if (port_is_bonding_slave(pi)) { 23060e545d30SBernard Iremonger printf("Please remove port %d from bonded device.\n", pi); 23070e545d30SBernard Iremonger continue; 23080e545d30SBernard Iremonger } 23090e545d30SBernard Iremonger 2310ce8d5614SIntel port = &ports[pi]; 2311ce8d5614SIntel if (rte_atomic16_cmpset(&(port->port_status), 2312d4e8ad64SMichael Qiu RTE_PORT_CLOSED, RTE_PORT_CLOSED) == 1) { 2313d4e8ad64SMichael Qiu printf("Port %d is already closed\n", pi); 2314d4e8ad64SMichael Qiu continue; 2315d4e8ad64SMichael Qiu } 2316d4e8ad64SMichael Qiu 2317d4e8ad64SMichael Qiu if (rte_atomic16_cmpset(&(port->port_status), 2318ce8d5614SIntel RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) { 2319ce8d5614SIntel printf("Port %d is now not stopped\n", pi); 2320ce8d5614SIntel continue; 2321ce8d5614SIntel } 2322ce8d5614SIntel 2323938a184aSAdrien Mazarguil if (port->flow_list) 2324938a184aSAdrien Mazarguil port_flow_flush(pi); 2325ce8d5614SIntel rte_eth_dev_close(pi); 2326ce8d5614SIntel 23274f1de450SThomas Monjalon remove_invalid_ports(); 232823ea57a2SThomas Monjalon 2329ce8d5614SIntel if (rte_atomic16_cmpset(&(port->port_status), 2330ce8d5614SIntel RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0) 2331b38bb262SPablo de Lara printf("Port %d cannot be set to closed\n", pi); 2332ce8d5614SIntel } 2333ce8d5614SIntel 2334ce8d5614SIntel printf("Done\n"); 2335ce8d5614SIntel } 2336ce8d5614SIntel 2337edab33b1STetsuya Mukawa void 233897f1e196SWei Dai reset_port(portid_t pid) 233997f1e196SWei Dai { 234097f1e196SWei Dai int diag; 234197f1e196SWei Dai portid_t pi; 234297f1e196SWei Dai struct rte_port *port; 234397f1e196SWei Dai 234497f1e196SWei Dai if (port_id_is_invalid(pid, ENABLED_WARN)) 234597f1e196SWei Dai return; 234697f1e196SWei Dai 234797f1e196SWei Dai printf("Resetting ports...\n"); 234897f1e196SWei Dai 234997f1e196SWei Dai RTE_ETH_FOREACH_DEV(pi) { 235097f1e196SWei Dai if (pid != pi && pid != (portid_t)RTE_PORT_ALL) 235197f1e196SWei Dai continue; 235297f1e196SWei Dai 235397f1e196SWei Dai if (port_is_forwarding(pi) != 0 && test_done == 0) { 235497f1e196SWei Dai printf("Please remove port %d from forwarding " 235597f1e196SWei Dai "configuration.\n", pi); 235697f1e196SWei Dai continue; 235797f1e196SWei Dai } 235897f1e196SWei Dai 235997f1e196SWei Dai if (port_is_bonding_slave(pi)) { 236097f1e196SWei Dai printf("Please remove port %d from bonded device.\n", 236197f1e196SWei Dai pi); 236297f1e196SWei Dai continue; 236397f1e196SWei Dai } 236497f1e196SWei Dai 236597f1e196SWei Dai diag = rte_eth_dev_reset(pi); 236697f1e196SWei Dai if (diag == 0) { 236797f1e196SWei Dai port = &ports[pi]; 236897f1e196SWei Dai port->need_reconfig = 1; 236997f1e196SWei Dai port->need_reconfig_queues = 1; 237097f1e196SWei Dai } else { 237197f1e196SWei Dai printf("Failed to reset port %d. diag=%d\n", pi, diag); 237297f1e196SWei Dai } 237397f1e196SWei Dai } 237497f1e196SWei Dai 237597f1e196SWei Dai printf("Done\n"); 237697f1e196SWei Dai } 237797f1e196SWei Dai 237897f1e196SWei Dai void 2379edab33b1STetsuya Mukawa attach_port(char *identifier) 2380ce8d5614SIntel { 23814f1ed78eSThomas Monjalon portid_t pi; 2382c9cce428SThomas Monjalon struct rte_dev_iterator iterator; 2383ce8d5614SIntel 2384edab33b1STetsuya Mukawa printf("Attaching a new port...\n"); 2385edab33b1STetsuya Mukawa 2386edab33b1STetsuya Mukawa if (identifier == NULL) { 2387edab33b1STetsuya Mukawa printf("Invalid parameters are specified\n"); 2388edab33b1STetsuya Mukawa return; 2389ce8d5614SIntel } 2390ce8d5614SIntel 239175b66decSIlya Maximets if (rte_dev_probe(identifier) < 0) { 2392c9cce428SThomas Monjalon TESTPMD_LOG(ERR, "Failed to attach port %s\n", identifier); 2393edab33b1STetsuya Mukawa return; 2394c9cce428SThomas Monjalon } 2395c9cce428SThomas Monjalon 23964f1ed78eSThomas Monjalon /* first attach mode: event */ 23974f1ed78eSThomas Monjalon if (setup_on_probe_event) { 23984f1ed78eSThomas Monjalon /* new ports are detected on RTE_ETH_EVENT_NEW event */ 23994f1ed78eSThomas Monjalon for (pi = 0; pi < RTE_MAX_ETHPORTS; pi++) 24004f1ed78eSThomas Monjalon if (ports[pi].port_status == RTE_PORT_HANDLING && 24014f1ed78eSThomas Monjalon ports[pi].need_setup != 0) 24024f1ed78eSThomas Monjalon setup_attached_port(pi); 24034f1ed78eSThomas Monjalon return; 24044f1ed78eSThomas Monjalon } 24054f1ed78eSThomas Monjalon 24064f1ed78eSThomas Monjalon /* second attach mode: iterator */ 240786fa5de1SThomas Monjalon RTE_ETH_FOREACH_MATCHING_DEV(pi, identifier, &iterator) { 24084f1ed78eSThomas Monjalon /* setup ports matching the devargs used for probing */ 240986fa5de1SThomas Monjalon if (port_is_forwarding(pi)) 241086fa5de1SThomas Monjalon continue; /* port was already attached before */ 2411c9cce428SThomas Monjalon setup_attached_port(pi); 2412c9cce428SThomas Monjalon } 241386fa5de1SThomas Monjalon } 2414c9cce428SThomas Monjalon 2415c9cce428SThomas Monjalon static void 2416c9cce428SThomas Monjalon setup_attached_port(portid_t pi) 2417c9cce428SThomas Monjalon { 2418c9cce428SThomas Monjalon unsigned int socket_id; 2419edab33b1STetsuya Mukawa 2420931126baSBernard Iremonger socket_id = (unsigned)rte_eth_dev_socket_id(pi); 242129841336SPhil Yang /* if socket_id is invalid, set to the first available socket. */ 2422931126baSBernard Iremonger if (check_socket_id(socket_id) < 0) 242329841336SPhil Yang socket_id = socket_ids[0]; 2424931126baSBernard Iremonger reconfig(pi, socket_id); 2425edab33b1STetsuya Mukawa rte_eth_promiscuous_enable(pi); 2426edab33b1STetsuya Mukawa 24274f1de450SThomas Monjalon ports_ids[nb_ports++] = pi; 24284f1de450SThomas Monjalon fwd_ports_ids[nb_fwd_ports++] = pi; 24294f1de450SThomas Monjalon nb_cfg_ports = nb_fwd_ports; 24304f1ed78eSThomas Monjalon ports[pi].need_setup = 0; 2431edab33b1STetsuya Mukawa ports[pi].port_status = RTE_PORT_STOPPED; 2432edab33b1STetsuya Mukawa 2433edab33b1STetsuya Mukawa printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports); 2434edab33b1STetsuya Mukawa printf("Done\n"); 2435edab33b1STetsuya Mukawa } 2436edab33b1STetsuya Mukawa 2437edab33b1STetsuya Mukawa void 2438f8e5baa2SThomas Monjalon detach_port_device(portid_t port_id) 24395f4ec54fSChen Jing D(Mark) { 2440f8e5baa2SThomas Monjalon struct rte_device *dev; 2441f8e5baa2SThomas Monjalon portid_t sibling; 2442f8e5baa2SThomas Monjalon 2443c9cce428SThomas Monjalon printf("Removing a device...\n"); 24445f4ec54fSChen Jing D(Mark) 2445f8e5baa2SThomas Monjalon dev = rte_eth_devices[port_id].device; 2446f8e5baa2SThomas Monjalon if (dev == NULL) { 2447f8e5baa2SThomas Monjalon printf("Device already removed\n"); 2448f8e5baa2SThomas Monjalon return; 2449f8e5baa2SThomas Monjalon } 2450f8e5baa2SThomas Monjalon 245123ea57a2SThomas Monjalon if (ports[port_id].port_status != RTE_PORT_CLOSED) { 24523f4a8370SThomas Monjalon if (ports[port_id].port_status != RTE_PORT_STOPPED) { 24533f4a8370SThomas Monjalon printf("Port not stopped\n"); 2454edab33b1STetsuya Mukawa return; 2455edab33b1STetsuya Mukawa } 24563f4a8370SThomas Monjalon printf("Port was not closed\n"); 2457938a184aSAdrien Mazarguil if (ports[port_id].flow_list) 2458938a184aSAdrien Mazarguil port_flow_flush(port_id); 24593f4a8370SThomas Monjalon } 2460938a184aSAdrien Mazarguil 246175b66decSIlya Maximets if (rte_dev_remove(dev) < 0) { 2462f8e5baa2SThomas Monjalon TESTPMD_LOG(ERR, "Failed to detach device %s\n", dev->name); 2463edab33b1STetsuya Mukawa return; 24643070419eSGaetan Rivet } 24657ca262b8SViacheslav Ovsiienko RTE_ETH_FOREACH_DEV_OF(sibling, dev) { 2466f8e5baa2SThomas Monjalon /* reset mapping between old ports and removed device */ 2467f8e5baa2SThomas Monjalon rte_eth_devices[sibling].device = NULL; 2468f8e5baa2SThomas Monjalon if (ports[sibling].port_status != RTE_PORT_CLOSED) { 2469f8e5baa2SThomas Monjalon /* sibling ports are forced to be closed */ 2470f8e5baa2SThomas Monjalon ports[sibling].port_status = RTE_PORT_CLOSED; 2471f8e5baa2SThomas Monjalon printf("Port %u is closed\n", sibling); 2472f8e5baa2SThomas Monjalon } 2473f8e5baa2SThomas Monjalon } 2474f8e5baa2SThomas Monjalon 24754f1de450SThomas Monjalon remove_invalid_ports(); 247603ce2c53SMatan Azrad 2477f8e5baa2SThomas Monjalon printf("Device of port %u is detached\n", port_id); 2478f8e5baa2SThomas Monjalon printf("Now total ports is %d\n", nb_ports); 2479edab33b1STetsuya Mukawa printf("Done\n"); 2480edab33b1STetsuya Mukawa return; 24815f4ec54fSChen Jing D(Mark) } 24825f4ec54fSChen Jing D(Mark) 2483af75078fSIntel void 248455e51c96SNithin Dabilpuram detach_device(char *identifier) 248555e51c96SNithin Dabilpuram { 248655e51c96SNithin Dabilpuram struct rte_dev_iterator iterator; 248755e51c96SNithin Dabilpuram struct rte_devargs da; 248855e51c96SNithin Dabilpuram portid_t port_id; 248955e51c96SNithin Dabilpuram 249055e51c96SNithin Dabilpuram printf("Removing a device...\n"); 249155e51c96SNithin Dabilpuram 249255e51c96SNithin Dabilpuram memset(&da, 0, sizeof(da)); 249355e51c96SNithin Dabilpuram if (rte_devargs_parsef(&da, "%s", identifier)) { 249455e51c96SNithin Dabilpuram printf("cannot parse identifier\n"); 249555e51c96SNithin Dabilpuram if (da.args) 249655e51c96SNithin Dabilpuram free(da.args); 249755e51c96SNithin Dabilpuram return; 249855e51c96SNithin Dabilpuram } 249955e51c96SNithin Dabilpuram 250055e51c96SNithin Dabilpuram RTE_ETH_FOREACH_MATCHING_DEV(port_id, identifier, &iterator) { 250155e51c96SNithin Dabilpuram if (ports[port_id].port_status != RTE_PORT_CLOSED) { 250255e51c96SNithin Dabilpuram if (ports[port_id].port_status != RTE_PORT_STOPPED) { 250355e51c96SNithin Dabilpuram printf("Port %u not stopped\n", port_id); 250455e51c96SNithin Dabilpuram return; 250555e51c96SNithin Dabilpuram } 250655e51c96SNithin Dabilpuram 250755e51c96SNithin Dabilpuram /* sibling ports are forced to be closed */ 250855e51c96SNithin Dabilpuram if (ports[port_id].flow_list) 250955e51c96SNithin Dabilpuram port_flow_flush(port_id); 251055e51c96SNithin Dabilpuram ports[port_id].port_status = RTE_PORT_CLOSED; 251155e51c96SNithin Dabilpuram printf("Port %u is now closed\n", port_id); 251255e51c96SNithin Dabilpuram } 251355e51c96SNithin Dabilpuram } 251455e51c96SNithin Dabilpuram 251555e51c96SNithin Dabilpuram if (rte_eal_hotplug_remove(da.bus->name, da.name) != 0) { 251655e51c96SNithin Dabilpuram TESTPMD_LOG(ERR, "Failed to detach device %s(%s)\n", 251755e51c96SNithin Dabilpuram da.name, da.bus->name); 251855e51c96SNithin Dabilpuram return; 251955e51c96SNithin Dabilpuram } 252055e51c96SNithin Dabilpuram 252155e51c96SNithin Dabilpuram remove_invalid_ports(); 252255e51c96SNithin Dabilpuram 252355e51c96SNithin Dabilpuram printf("Device %s is detached\n", identifier); 252455e51c96SNithin Dabilpuram printf("Now total ports is %d\n", nb_ports); 252555e51c96SNithin Dabilpuram printf("Done\n"); 252655e51c96SNithin Dabilpuram } 252755e51c96SNithin Dabilpuram 252855e51c96SNithin Dabilpuram void 2529af75078fSIntel pmd_test_exit(void) 2530af75078fSIntel { 2531af75078fSIntel portid_t pt_id; 2532fb73e096SJeff Guo int ret; 2533401b744dSShahaf Shuler int i; 2534af75078fSIntel 25358210ec25SPablo de Lara if (test_done == 0) 25368210ec25SPablo de Lara stop_packet_forwarding(); 25378210ec25SPablo de Lara 25383a0968c8SShahaf Shuler for (i = 0 ; i < RTE_MAX_NUMA_NODES ; i++) { 25393a0968c8SShahaf Shuler if (mempools[i]) { 25403a0968c8SShahaf Shuler if (mp_alloc_type == MP_ALLOC_ANON) 25413a0968c8SShahaf Shuler rte_mempool_mem_iter(mempools[i], dma_unmap_cb, 25423a0968c8SShahaf Shuler NULL); 25433a0968c8SShahaf Shuler } 25443a0968c8SShahaf Shuler } 2545d3a274ceSZhihong Wang if (ports != NULL) { 2546d3a274ceSZhihong Wang no_link_check = 1; 25477d89b261SGaetan Rivet RTE_ETH_FOREACH_DEV(pt_id) { 254808fd782bSCristian Dumitrescu printf("\nStopping port %d...\n", pt_id); 2549af75078fSIntel fflush(stdout); 2550d3a274ceSZhihong Wang stop_port(pt_id); 255108fd782bSCristian Dumitrescu } 255208fd782bSCristian Dumitrescu RTE_ETH_FOREACH_DEV(pt_id) { 255308fd782bSCristian Dumitrescu printf("\nShutting down port %d...\n", pt_id); 255408fd782bSCristian Dumitrescu fflush(stdout); 2555d3a274ceSZhihong Wang close_port(pt_id); 2556af75078fSIntel } 2557d3a274ceSZhihong Wang } 2558fb73e096SJeff Guo 2559fb73e096SJeff Guo if (hot_plug) { 2560fb73e096SJeff Guo ret = rte_dev_event_monitor_stop(); 25612049c511SJeff Guo if (ret) { 2562fb73e096SJeff Guo RTE_LOG(ERR, EAL, 2563fb73e096SJeff Guo "fail to stop device event monitor."); 25642049c511SJeff Guo return; 25652049c511SJeff Guo } 2566fb73e096SJeff Guo 25672049c511SJeff Guo ret = rte_dev_event_callback_unregister(NULL, 2568cc1bf307SJeff Guo dev_event_callback, NULL); 25692049c511SJeff Guo if (ret < 0) { 2570fb73e096SJeff Guo RTE_LOG(ERR, EAL, 25712049c511SJeff Guo "fail to unregister device event callback.\n"); 25722049c511SJeff Guo return; 25732049c511SJeff Guo } 25742049c511SJeff Guo 25752049c511SJeff Guo ret = rte_dev_hotplug_handle_disable(); 25762049c511SJeff Guo if (ret) { 25772049c511SJeff Guo RTE_LOG(ERR, EAL, 25782049c511SJeff Guo "fail to disable hotplug handling.\n"); 25792049c511SJeff Guo return; 25802049c511SJeff Guo } 2581fb73e096SJeff Guo } 2582401b744dSShahaf Shuler for (i = 0 ; i < RTE_MAX_NUMA_NODES ; i++) { 2583401b744dSShahaf Shuler if (mempools[i]) 2584401b744dSShahaf Shuler rte_mempool_free(mempools[i]); 2585401b744dSShahaf Shuler } 2586fb73e096SJeff Guo 2587d3a274ceSZhihong Wang printf("\nBye...\n"); 2588af75078fSIntel } 2589af75078fSIntel 2590af75078fSIntel typedef void (*cmd_func_t)(void); 2591af75078fSIntel struct pmd_test_command { 2592af75078fSIntel const char *cmd_name; 2593af75078fSIntel cmd_func_t cmd_func; 2594af75078fSIntel }; 2595af75078fSIntel 2596af75078fSIntel #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0])) 2597af75078fSIntel 2598ce8d5614SIntel /* Check the link status of all ports in up to 9s, and print them finally */ 2599af75078fSIntel static void 2600edab33b1STetsuya Mukawa check_all_ports_link_status(uint32_t port_mask) 2601af75078fSIntel { 2602ce8d5614SIntel #define CHECK_INTERVAL 100 /* 100ms */ 2603ce8d5614SIntel #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */ 2604f8244c63SZhiyong Yang portid_t portid; 2605f8244c63SZhiyong Yang uint8_t count, all_ports_up, print_flag = 0; 2606ce8d5614SIntel struct rte_eth_link link; 2607ce8d5614SIntel 2608ce8d5614SIntel printf("Checking link statuses...\n"); 2609ce8d5614SIntel fflush(stdout); 2610ce8d5614SIntel for (count = 0; count <= MAX_CHECK_TIME; count++) { 2611ce8d5614SIntel all_ports_up = 1; 26127d89b261SGaetan Rivet RTE_ETH_FOREACH_DEV(portid) { 2613ce8d5614SIntel if ((port_mask & (1 << portid)) == 0) 2614ce8d5614SIntel continue; 2615ce8d5614SIntel memset(&link, 0, sizeof(link)); 2616ce8d5614SIntel rte_eth_link_get_nowait(portid, &link); 2617ce8d5614SIntel /* print link status if flag set */ 2618ce8d5614SIntel if (print_flag == 1) { 2619ce8d5614SIntel if (link.link_status) 2620f8244c63SZhiyong Yang printf( 2621f8244c63SZhiyong Yang "Port%d Link Up. speed %u Mbps- %s\n", 2622f8244c63SZhiyong Yang portid, link.link_speed, 2623ce8d5614SIntel (link.link_duplex == ETH_LINK_FULL_DUPLEX) ? 2624ce8d5614SIntel ("full-duplex") : ("half-duplex\n")); 2625ce8d5614SIntel else 2626f8244c63SZhiyong Yang printf("Port %d Link Down\n", portid); 2627ce8d5614SIntel continue; 2628ce8d5614SIntel } 2629ce8d5614SIntel /* clear all_ports_up flag if any link down */ 263009419f23SThomas Monjalon if (link.link_status == ETH_LINK_DOWN) { 2631ce8d5614SIntel all_ports_up = 0; 2632ce8d5614SIntel break; 2633ce8d5614SIntel } 2634ce8d5614SIntel } 2635ce8d5614SIntel /* after finally printing all link status, get out */ 2636ce8d5614SIntel if (print_flag == 1) 2637ce8d5614SIntel break; 2638ce8d5614SIntel 2639ce8d5614SIntel if (all_ports_up == 0) { 2640ce8d5614SIntel fflush(stdout); 2641ce8d5614SIntel rte_delay_ms(CHECK_INTERVAL); 2642ce8d5614SIntel } 2643ce8d5614SIntel 2644ce8d5614SIntel /* set the print_flag if all ports up or timeout */ 2645ce8d5614SIntel if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) { 2646ce8d5614SIntel print_flag = 1; 2647ce8d5614SIntel } 26488ea656f8SGaetan Rivet 26498ea656f8SGaetan Rivet if (lsc_interrupt) 26508ea656f8SGaetan Rivet break; 2651ce8d5614SIntel } 2652af75078fSIntel } 2653af75078fSIntel 2654cc1bf307SJeff Guo /* 2655cc1bf307SJeff Guo * This callback is for remove a port for a device. It has limitation because 2656cc1bf307SJeff Guo * it is not for multiple port removal for a device. 2657cc1bf307SJeff Guo * TODO: the device detach invoke will plan to be removed from user side to 2658cc1bf307SJeff Guo * eal. And convert all PMDs to free port resources on ether device closing. 2659cc1bf307SJeff Guo */ 2660284c908cSGaetan Rivet static void 2661cc1bf307SJeff Guo rmv_port_callback(void *arg) 2662284c908cSGaetan Rivet { 26633b97888aSMatan Azrad int need_to_start = 0; 26640da2a62bSMatan Azrad int org_no_link_check = no_link_check; 266528caa76aSZhiyong Yang portid_t port_id = (intptr_t)arg; 2666284c908cSGaetan Rivet 2667284c908cSGaetan Rivet RTE_ETH_VALID_PORTID_OR_RET(port_id); 2668284c908cSGaetan Rivet 26693b97888aSMatan Azrad if (!test_done && port_is_forwarding(port_id)) { 26703b97888aSMatan Azrad need_to_start = 1; 26713b97888aSMatan Azrad stop_packet_forwarding(); 26723b97888aSMatan Azrad } 26730da2a62bSMatan Azrad no_link_check = 1; 2674284c908cSGaetan Rivet stop_port(port_id); 26750da2a62bSMatan Azrad no_link_check = org_no_link_check; 2676284c908cSGaetan Rivet close_port(port_id); 2677f8e5baa2SThomas Monjalon detach_port_device(port_id); 26783b97888aSMatan Azrad if (need_to_start) 26793b97888aSMatan Azrad start_packet_forwarding(0); 2680284c908cSGaetan Rivet } 2681284c908cSGaetan Rivet 268276ad4a2dSGaetan Rivet /* This function is used by the interrupt thread */ 2683d6af1a13SBernard Iremonger static int 2684f8244c63SZhiyong Yang eth_event_callback(portid_t port_id, enum rte_eth_event_type type, void *param, 2685d6af1a13SBernard Iremonger void *ret_param) 268676ad4a2dSGaetan Rivet { 268776ad4a2dSGaetan Rivet RTE_SET_USED(param); 2688d6af1a13SBernard Iremonger RTE_SET_USED(ret_param); 268976ad4a2dSGaetan Rivet 269076ad4a2dSGaetan Rivet if (type >= RTE_ETH_EVENT_MAX) { 2691f431e010SHerakliusz Lipiec fprintf(stderr, "\nPort %" PRIu16 ": %s called upon invalid event %d\n", 269276ad4a2dSGaetan Rivet port_id, __func__, type); 269376ad4a2dSGaetan Rivet fflush(stderr); 26943af72783SGaetan Rivet } else if (event_print_mask & (UINT32_C(1) << type)) { 2695f431e010SHerakliusz Lipiec printf("\nPort %" PRIu16 ": %s event\n", port_id, 269697b5d8b5SThomas Monjalon eth_event_desc[type]); 269776ad4a2dSGaetan Rivet fflush(stdout); 269876ad4a2dSGaetan Rivet } 2699284c908cSGaetan Rivet 2700284c908cSGaetan Rivet switch (type) { 27014f1ed78eSThomas Monjalon case RTE_ETH_EVENT_NEW: 27024f1ed78eSThomas Monjalon ports[port_id].need_setup = 1; 27034f1ed78eSThomas Monjalon ports[port_id].port_status = RTE_PORT_HANDLING; 27044f1ed78eSThomas Monjalon break; 2705284c908cSGaetan Rivet case RTE_ETH_EVENT_INTR_RMV: 27064f1ed78eSThomas Monjalon if (port_id_is_invalid(port_id, DISABLED_WARN)) 27074f1ed78eSThomas Monjalon break; 2708284c908cSGaetan Rivet if (rte_eal_alarm_set(100000, 2709cc1bf307SJeff Guo rmv_port_callback, (void *)(intptr_t)port_id)) 2710284c908cSGaetan Rivet fprintf(stderr, "Could not set up deferred device removal\n"); 2711284c908cSGaetan Rivet break; 2712284c908cSGaetan Rivet default: 2713284c908cSGaetan Rivet break; 2714284c908cSGaetan Rivet } 2715d6af1a13SBernard Iremonger return 0; 271676ad4a2dSGaetan Rivet } 271776ad4a2dSGaetan Rivet 271897b5d8b5SThomas Monjalon static int 271997b5d8b5SThomas Monjalon register_eth_event_callback(void) 272097b5d8b5SThomas Monjalon { 272197b5d8b5SThomas Monjalon int ret; 272297b5d8b5SThomas Monjalon enum rte_eth_event_type event; 272397b5d8b5SThomas Monjalon 272497b5d8b5SThomas Monjalon for (event = RTE_ETH_EVENT_UNKNOWN; 272597b5d8b5SThomas Monjalon event < RTE_ETH_EVENT_MAX; event++) { 272697b5d8b5SThomas Monjalon ret = rte_eth_dev_callback_register(RTE_ETH_ALL, 272797b5d8b5SThomas Monjalon event, 272897b5d8b5SThomas Monjalon eth_event_callback, 272997b5d8b5SThomas Monjalon NULL); 273097b5d8b5SThomas Monjalon if (ret != 0) { 273197b5d8b5SThomas Monjalon TESTPMD_LOG(ERR, "Failed to register callback for " 273297b5d8b5SThomas Monjalon "%s event\n", eth_event_desc[event]); 273397b5d8b5SThomas Monjalon return -1; 273497b5d8b5SThomas Monjalon } 273597b5d8b5SThomas Monjalon } 273697b5d8b5SThomas Monjalon 273797b5d8b5SThomas Monjalon return 0; 273897b5d8b5SThomas Monjalon } 273997b5d8b5SThomas Monjalon 2740fb73e096SJeff Guo /* This function is used by the interrupt thread */ 2741fb73e096SJeff Guo static void 2742cc1bf307SJeff Guo dev_event_callback(const char *device_name, enum rte_dev_event_type type, 2743fb73e096SJeff Guo __rte_unused void *arg) 2744fb73e096SJeff Guo { 27452049c511SJeff Guo uint16_t port_id; 27462049c511SJeff Guo int ret; 27472049c511SJeff Guo 2748fb73e096SJeff Guo if (type >= RTE_DEV_EVENT_MAX) { 2749fb73e096SJeff Guo fprintf(stderr, "%s called upon invalid event %d\n", 2750fb73e096SJeff Guo __func__, type); 2751fb73e096SJeff Guo fflush(stderr); 2752fb73e096SJeff Guo } 2753fb73e096SJeff Guo 2754fb73e096SJeff Guo switch (type) { 2755fb73e096SJeff Guo case RTE_DEV_EVENT_REMOVE: 2756cc1bf307SJeff Guo RTE_LOG(DEBUG, EAL, "The device: %s has been removed!\n", 2757fb73e096SJeff Guo device_name); 27582049c511SJeff Guo ret = rte_eth_dev_get_port_by_name(device_name, &port_id); 27592049c511SJeff Guo if (ret) { 27602049c511SJeff Guo RTE_LOG(ERR, EAL, "can not get port by device %s!\n", 27612049c511SJeff Guo device_name); 27622049c511SJeff Guo return; 27632049c511SJeff Guo } 2764cc1bf307SJeff Guo /* 2765cc1bf307SJeff Guo * Because the user's callback is invoked in eal interrupt 2766cc1bf307SJeff Guo * callback, the interrupt callback need to be finished before 2767cc1bf307SJeff Guo * it can be unregistered when detaching device. So finish 2768cc1bf307SJeff Guo * callback soon and use a deferred removal to detach device 2769cc1bf307SJeff Guo * is need. It is a workaround, once the device detaching be 2770cc1bf307SJeff Guo * moved into the eal in the future, the deferred removal could 2771cc1bf307SJeff Guo * be deleted. 2772cc1bf307SJeff Guo */ 2773cc1bf307SJeff Guo if (rte_eal_alarm_set(100000, 2774cc1bf307SJeff Guo rmv_port_callback, (void *)(intptr_t)port_id)) 2775cc1bf307SJeff Guo RTE_LOG(ERR, EAL, 2776cc1bf307SJeff Guo "Could not set up deferred device removal\n"); 2777fb73e096SJeff Guo break; 2778fb73e096SJeff Guo case RTE_DEV_EVENT_ADD: 2779fb73e096SJeff Guo RTE_LOG(ERR, EAL, "The device: %s has been added!\n", 2780fb73e096SJeff Guo device_name); 2781fb73e096SJeff Guo /* TODO: After finish kernel driver binding, 2782fb73e096SJeff Guo * begin to attach port. 2783fb73e096SJeff Guo */ 2784fb73e096SJeff Guo break; 2785fb73e096SJeff Guo default: 2786fb73e096SJeff Guo break; 2787fb73e096SJeff Guo } 2788fb73e096SJeff Guo } 2789fb73e096SJeff Guo 2790013af9b6SIntel static int 279128caa76aSZhiyong Yang set_tx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port) 2792af75078fSIntel { 2793013af9b6SIntel uint16_t i; 2794af75078fSIntel int diag; 2795013af9b6SIntel uint8_t mapping_found = 0; 2796af75078fSIntel 2797013af9b6SIntel for (i = 0; i < nb_tx_queue_stats_mappings; i++) { 2798013af9b6SIntel if ((tx_queue_stats_mappings[i].port_id == port_id) && 2799013af9b6SIntel (tx_queue_stats_mappings[i].queue_id < nb_txq )) { 2800013af9b6SIntel diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id, 2801013af9b6SIntel tx_queue_stats_mappings[i].queue_id, 2802013af9b6SIntel tx_queue_stats_mappings[i].stats_counter_id); 2803013af9b6SIntel if (diag != 0) 2804013af9b6SIntel return diag; 2805013af9b6SIntel mapping_found = 1; 2806af75078fSIntel } 2807013af9b6SIntel } 2808013af9b6SIntel if (mapping_found) 2809013af9b6SIntel port->tx_queue_stats_mapping_enabled = 1; 2810013af9b6SIntel return 0; 2811013af9b6SIntel } 2812013af9b6SIntel 2813013af9b6SIntel static int 281428caa76aSZhiyong Yang set_rx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port) 2815013af9b6SIntel { 2816013af9b6SIntel uint16_t i; 2817013af9b6SIntel int diag; 2818013af9b6SIntel uint8_t mapping_found = 0; 2819013af9b6SIntel 2820013af9b6SIntel for (i = 0; i < nb_rx_queue_stats_mappings; i++) { 2821013af9b6SIntel if ((rx_queue_stats_mappings[i].port_id == port_id) && 2822013af9b6SIntel (rx_queue_stats_mappings[i].queue_id < nb_rxq )) { 2823013af9b6SIntel diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id, 2824013af9b6SIntel rx_queue_stats_mappings[i].queue_id, 2825013af9b6SIntel rx_queue_stats_mappings[i].stats_counter_id); 2826013af9b6SIntel if (diag != 0) 2827013af9b6SIntel return diag; 2828013af9b6SIntel mapping_found = 1; 2829013af9b6SIntel } 2830013af9b6SIntel } 2831013af9b6SIntel if (mapping_found) 2832013af9b6SIntel port->rx_queue_stats_mapping_enabled = 1; 2833013af9b6SIntel return 0; 2834013af9b6SIntel } 2835013af9b6SIntel 2836013af9b6SIntel static void 283728caa76aSZhiyong Yang map_port_queue_stats_mapping_registers(portid_t pi, struct rte_port *port) 2838013af9b6SIntel { 2839013af9b6SIntel int diag = 0; 2840013af9b6SIntel 2841013af9b6SIntel diag = set_tx_queue_stats_mapping_registers(pi, port); 2842af75078fSIntel if (diag != 0) { 2843013af9b6SIntel if (diag == -ENOTSUP) { 2844013af9b6SIntel port->tx_queue_stats_mapping_enabled = 0; 2845013af9b6SIntel printf("TX queue stats mapping not supported port id=%d\n", pi); 2846013af9b6SIntel } 2847013af9b6SIntel else 2848013af9b6SIntel rte_exit(EXIT_FAILURE, 2849013af9b6SIntel "set_tx_queue_stats_mapping_registers " 2850013af9b6SIntel "failed for port id=%d diag=%d\n", 2851af75078fSIntel pi, diag); 2852af75078fSIntel } 2853013af9b6SIntel 2854013af9b6SIntel diag = set_rx_queue_stats_mapping_registers(pi, port); 2855af75078fSIntel if (diag != 0) { 2856013af9b6SIntel if (diag == -ENOTSUP) { 2857013af9b6SIntel port->rx_queue_stats_mapping_enabled = 0; 2858013af9b6SIntel printf("RX queue stats mapping not supported port id=%d\n", pi); 2859013af9b6SIntel } 2860013af9b6SIntel else 2861013af9b6SIntel rte_exit(EXIT_FAILURE, 2862013af9b6SIntel "set_rx_queue_stats_mapping_registers " 2863013af9b6SIntel "failed for port id=%d diag=%d\n", 2864af75078fSIntel pi, diag); 2865af75078fSIntel } 2866af75078fSIntel } 2867af75078fSIntel 2868f2c5125aSPablo de Lara static void 2869f2c5125aSPablo de Lara rxtx_port_config(struct rte_port *port) 2870f2c5125aSPablo de Lara { 2871d44f8a48SQi Zhang uint16_t qid; 28725e91aeefSWei Zhao uint64_t offloads; 2873f2c5125aSPablo de Lara 2874d44f8a48SQi Zhang for (qid = 0; qid < nb_rxq; qid++) { 28755e91aeefSWei Zhao offloads = port->rx_conf[qid].offloads; 2876d44f8a48SQi Zhang port->rx_conf[qid] = port->dev_info.default_rxconf; 2877575e0fd1SWei Zhao if (offloads != 0) 2878575e0fd1SWei Zhao port->rx_conf[qid].offloads = offloads; 2879d44f8a48SQi Zhang 2880d44f8a48SQi Zhang /* Check if any Rx parameters have been passed */ 2881f2c5125aSPablo de Lara if (rx_pthresh != RTE_PMD_PARAM_UNSET) 2882d44f8a48SQi Zhang port->rx_conf[qid].rx_thresh.pthresh = rx_pthresh; 2883f2c5125aSPablo de Lara 2884f2c5125aSPablo de Lara if (rx_hthresh != RTE_PMD_PARAM_UNSET) 2885d44f8a48SQi Zhang port->rx_conf[qid].rx_thresh.hthresh = rx_hthresh; 2886f2c5125aSPablo de Lara 2887f2c5125aSPablo de Lara if (rx_wthresh != RTE_PMD_PARAM_UNSET) 2888d44f8a48SQi Zhang port->rx_conf[qid].rx_thresh.wthresh = rx_wthresh; 2889f2c5125aSPablo de Lara 2890f2c5125aSPablo de Lara if (rx_free_thresh != RTE_PMD_PARAM_UNSET) 2891d44f8a48SQi Zhang port->rx_conf[qid].rx_free_thresh = rx_free_thresh; 2892f2c5125aSPablo de Lara 2893f2c5125aSPablo de Lara if (rx_drop_en != RTE_PMD_PARAM_UNSET) 2894d44f8a48SQi Zhang port->rx_conf[qid].rx_drop_en = rx_drop_en; 2895f2c5125aSPablo de Lara 2896d44f8a48SQi Zhang port->nb_rx_desc[qid] = nb_rxd; 2897d44f8a48SQi Zhang } 2898d44f8a48SQi Zhang 2899d44f8a48SQi Zhang for (qid = 0; qid < nb_txq; qid++) { 29005e91aeefSWei Zhao offloads = port->tx_conf[qid].offloads; 2901d44f8a48SQi Zhang port->tx_conf[qid] = port->dev_info.default_txconf; 2902575e0fd1SWei Zhao if (offloads != 0) 2903575e0fd1SWei Zhao port->tx_conf[qid].offloads = offloads; 2904d44f8a48SQi Zhang 2905d44f8a48SQi Zhang /* Check if any Tx parameters have been passed */ 2906f2c5125aSPablo de Lara if (tx_pthresh != RTE_PMD_PARAM_UNSET) 2907d44f8a48SQi Zhang port->tx_conf[qid].tx_thresh.pthresh = tx_pthresh; 2908f2c5125aSPablo de Lara 2909f2c5125aSPablo de Lara if (tx_hthresh != RTE_PMD_PARAM_UNSET) 2910d44f8a48SQi Zhang port->tx_conf[qid].tx_thresh.hthresh = tx_hthresh; 2911f2c5125aSPablo de Lara 2912f2c5125aSPablo de Lara if (tx_wthresh != RTE_PMD_PARAM_UNSET) 2913d44f8a48SQi Zhang port->tx_conf[qid].tx_thresh.wthresh = tx_wthresh; 2914f2c5125aSPablo de Lara 2915f2c5125aSPablo de Lara if (tx_rs_thresh != RTE_PMD_PARAM_UNSET) 2916d44f8a48SQi Zhang port->tx_conf[qid].tx_rs_thresh = tx_rs_thresh; 2917f2c5125aSPablo de Lara 2918f2c5125aSPablo de Lara if (tx_free_thresh != RTE_PMD_PARAM_UNSET) 2919d44f8a48SQi Zhang port->tx_conf[qid].tx_free_thresh = tx_free_thresh; 2920d44f8a48SQi Zhang 2921d44f8a48SQi Zhang port->nb_tx_desc[qid] = nb_txd; 2922d44f8a48SQi Zhang } 2923f2c5125aSPablo de Lara } 2924f2c5125aSPablo de Lara 2925013af9b6SIntel void 2926013af9b6SIntel init_port_config(void) 2927013af9b6SIntel { 2928013af9b6SIntel portid_t pid; 2929013af9b6SIntel struct rte_port *port; 2930013af9b6SIntel 29317d89b261SGaetan Rivet RTE_ETH_FOREACH_DEV(pid) { 2932013af9b6SIntel port = &ports[pid]; 2933013af9b6SIntel port->dev_conf.fdir_conf = fdir_conf; 2934422515b9SAdrien Mazarguil rte_eth_dev_info_get(pid, &port->dev_info); 29353ce690d3SBruce Richardson if (nb_rxq > 1) { 2936013af9b6SIntel port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL; 293790892962SQi Zhang port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 2938422515b9SAdrien Mazarguil rss_hf & port->dev_info.flow_type_rss_offloads; 2939af75078fSIntel } else { 2940013af9b6SIntel port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL; 2941013af9b6SIntel port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0; 2942af75078fSIntel } 29433ce690d3SBruce Richardson 29445f592039SJingjing Wu if (port->dcb_flag == 0) { 29453ce690d3SBruce Richardson if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0) 29463ce690d3SBruce Richardson port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_RSS; 29473ce690d3SBruce Richardson else 29483ce690d3SBruce Richardson port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE; 29493ce690d3SBruce Richardson } 29503ce690d3SBruce Richardson 2951f2c5125aSPablo de Lara rxtx_port_config(port); 2952013af9b6SIntel 2953013af9b6SIntel rte_eth_macaddr_get(pid, &port->eth_addr); 2954013af9b6SIntel 2955013af9b6SIntel map_port_queue_stats_mapping_registers(pid, port); 295650c4440eSThomas Monjalon #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS 2957e261265eSRadu Nicolau rte_pmd_ixgbe_bypass_init(pid); 29587b7e5ba7SIntel #endif 29598ea656f8SGaetan Rivet 29608ea656f8SGaetan Rivet if (lsc_interrupt && 29618ea656f8SGaetan Rivet (rte_eth_devices[pid].data->dev_flags & 29628ea656f8SGaetan Rivet RTE_ETH_DEV_INTR_LSC)) 29638ea656f8SGaetan Rivet port->dev_conf.intr_conf.lsc = 1; 2964284c908cSGaetan Rivet if (rmv_interrupt && 2965284c908cSGaetan Rivet (rte_eth_devices[pid].data->dev_flags & 2966284c908cSGaetan Rivet RTE_ETH_DEV_INTR_RMV)) 2967284c908cSGaetan Rivet port->dev_conf.intr_conf.rmv = 1; 2968013af9b6SIntel } 2969013af9b6SIntel } 2970013af9b6SIntel 297141b05095SBernard Iremonger void set_port_slave_flag(portid_t slave_pid) 297241b05095SBernard Iremonger { 297341b05095SBernard Iremonger struct rte_port *port; 297441b05095SBernard Iremonger 297541b05095SBernard Iremonger port = &ports[slave_pid]; 297641b05095SBernard Iremonger port->slave_flag = 1; 297741b05095SBernard Iremonger } 297841b05095SBernard Iremonger 297941b05095SBernard Iremonger void clear_port_slave_flag(portid_t slave_pid) 298041b05095SBernard Iremonger { 298141b05095SBernard Iremonger struct rte_port *port; 298241b05095SBernard Iremonger 298341b05095SBernard Iremonger port = &ports[slave_pid]; 298441b05095SBernard Iremonger port->slave_flag = 0; 298541b05095SBernard Iremonger } 298641b05095SBernard Iremonger 29870e545d30SBernard Iremonger uint8_t port_is_bonding_slave(portid_t slave_pid) 29880e545d30SBernard Iremonger { 29890e545d30SBernard Iremonger struct rte_port *port; 29900e545d30SBernard Iremonger 29910e545d30SBernard Iremonger port = &ports[slave_pid]; 2992b8b8b344SMatan Azrad if ((rte_eth_devices[slave_pid].data->dev_flags & 2993b8b8b344SMatan Azrad RTE_ETH_DEV_BONDED_SLAVE) || (port->slave_flag == 1)) 2994b8b8b344SMatan Azrad return 1; 2995b8b8b344SMatan Azrad return 0; 29960e545d30SBernard Iremonger } 29970e545d30SBernard Iremonger 2998013af9b6SIntel const uint16_t vlan_tags[] = { 2999013af9b6SIntel 0, 1, 2, 3, 4, 5, 6, 7, 3000013af9b6SIntel 8, 9, 10, 11, 12, 13, 14, 15, 3001013af9b6SIntel 16, 17, 18, 19, 20, 21, 22, 23, 3002013af9b6SIntel 24, 25, 26, 27, 28, 29, 30, 31 3003013af9b6SIntel }; 3004013af9b6SIntel 3005013af9b6SIntel static int 3006ac7c491cSKonstantin Ananyev get_eth_dcb_conf(portid_t pid, struct rte_eth_conf *eth_conf, 30071a572499SJingjing Wu enum dcb_mode_enable dcb_mode, 30081a572499SJingjing Wu enum rte_eth_nb_tcs num_tcs, 30091a572499SJingjing Wu uint8_t pfc_en) 3010013af9b6SIntel { 3011013af9b6SIntel uint8_t i; 3012ac7c491cSKonstantin Ananyev int32_t rc; 3013ac7c491cSKonstantin Ananyev struct rte_eth_rss_conf rss_conf; 3014af75078fSIntel 3015af75078fSIntel /* 3016013af9b6SIntel * Builds up the correct configuration for dcb+vt based on the vlan tags array 3017013af9b6SIntel * given above, and the number of traffic classes available for use. 3018af75078fSIntel */ 30191a572499SJingjing Wu if (dcb_mode == DCB_VT_ENABLED) { 30201a572499SJingjing Wu struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf = 30211a572499SJingjing Wu ð_conf->rx_adv_conf.vmdq_dcb_conf; 30221a572499SJingjing Wu struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf = 30231a572499SJingjing Wu ð_conf->tx_adv_conf.vmdq_dcb_tx_conf; 3024013af9b6SIntel 3025547d946cSNirmoy Das /* VMDQ+DCB RX and TX configurations */ 30261a572499SJingjing Wu vmdq_rx_conf->enable_default_pool = 0; 30271a572499SJingjing Wu vmdq_rx_conf->default_pool = 0; 30281a572499SJingjing Wu vmdq_rx_conf->nb_queue_pools = 30291a572499SJingjing Wu (num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS); 30301a572499SJingjing Wu vmdq_tx_conf->nb_queue_pools = 30311a572499SJingjing Wu (num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS); 3032013af9b6SIntel 30331a572499SJingjing Wu vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools; 30341a572499SJingjing Wu for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) { 30351a572499SJingjing Wu vmdq_rx_conf->pool_map[i].vlan_id = vlan_tags[i]; 30361a572499SJingjing Wu vmdq_rx_conf->pool_map[i].pools = 30371a572499SJingjing Wu 1 << (i % vmdq_rx_conf->nb_queue_pools); 3038af75078fSIntel } 3039013af9b6SIntel for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) { 3040f59908feSWei Dai vmdq_rx_conf->dcb_tc[i] = i % num_tcs; 3041f59908feSWei Dai vmdq_tx_conf->dcb_tc[i] = i % num_tcs; 3042013af9b6SIntel } 3043013af9b6SIntel 3044013af9b6SIntel /* set DCB mode of RX and TX of multiple queues */ 304532e7aa0bSIntel eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB; 304632e7aa0bSIntel eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB; 30471a572499SJingjing Wu } else { 30481a572499SJingjing Wu struct rte_eth_dcb_rx_conf *rx_conf = 30491a572499SJingjing Wu ð_conf->rx_adv_conf.dcb_rx_conf; 30501a572499SJingjing Wu struct rte_eth_dcb_tx_conf *tx_conf = 30511a572499SJingjing Wu ð_conf->tx_adv_conf.dcb_tx_conf; 3052013af9b6SIntel 3053ac7c491cSKonstantin Ananyev rc = rte_eth_dev_rss_hash_conf_get(pid, &rss_conf); 3054ac7c491cSKonstantin Ananyev if (rc != 0) 3055ac7c491cSKonstantin Ananyev return rc; 3056ac7c491cSKonstantin Ananyev 30571a572499SJingjing Wu rx_conf->nb_tcs = num_tcs; 30581a572499SJingjing Wu tx_conf->nb_tcs = num_tcs; 30591a572499SJingjing Wu 3060bcd0e432SJingjing Wu for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) { 3061bcd0e432SJingjing Wu rx_conf->dcb_tc[i] = i % num_tcs; 3062bcd0e432SJingjing Wu tx_conf->dcb_tc[i] = i % num_tcs; 3063013af9b6SIntel } 3064ac7c491cSKonstantin Ananyev 30651a572499SJingjing Wu eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB_RSS; 3066ac7c491cSKonstantin Ananyev eth_conf->rx_adv_conf.rss_conf = rss_conf; 306732e7aa0bSIntel eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB; 30681a572499SJingjing Wu } 30691a572499SJingjing Wu 30701a572499SJingjing Wu if (pfc_en) 30711a572499SJingjing Wu eth_conf->dcb_capability_en = 30721a572499SJingjing Wu ETH_DCB_PG_SUPPORT | ETH_DCB_PFC_SUPPORT; 3073013af9b6SIntel else 3074013af9b6SIntel eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT; 3075013af9b6SIntel 3076013af9b6SIntel return 0; 3077013af9b6SIntel } 3078013af9b6SIntel 3079013af9b6SIntel int 30801a572499SJingjing Wu init_port_dcb_config(portid_t pid, 30811a572499SJingjing Wu enum dcb_mode_enable dcb_mode, 30821a572499SJingjing Wu enum rte_eth_nb_tcs num_tcs, 30831a572499SJingjing Wu uint8_t pfc_en) 3084013af9b6SIntel { 3085013af9b6SIntel struct rte_eth_conf port_conf; 3086013af9b6SIntel struct rte_port *rte_port; 3087013af9b6SIntel int retval; 3088013af9b6SIntel uint16_t i; 3089013af9b6SIntel 30902a977b89SWenzhuo Lu rte_port = &ports[pid]; 3091013af9b6SIntel 3092013af9b6SIntel memset(&port_conf, 0, sizeof(struct rte_eth_conf)); 3093013af9b6SIntel /* Enter DCB configuration status */ 3094013af9b6SIntel dcb_config = 1; 3095013af9b6SIntel 3096d5354e89SYanglong Wu port_conf.rxmode = rte_port->dev_conf.rxmode; 3097d5354e89SYanglong Wu port_conf.txmode = rte_port->dev_conf.txmode; 3098d5354e89SYanglong Wu 3099013af9b6SIntel /*set configuration of DCB in vt mode and DCB in non-vt mode*/ 3100ac7c491cSKonstantin Ananyev retval = get_eth_dcb_conf(pid, &port_conf, dcb_mode, num_tcs, pfc_en); 3101013af9b6SIntel if (retval < 0) 3102013af9b6SIntel return retval; 31030074d02fSShahaf Shuler port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER; 3104013af9b6SIntel 31052f203d44SQi Zhang /* re-configure the device . */ 31062b0e0ebaSChenbo Xia retval = rte_eth_dev_configure(pid, nb_rxq, nb_rxq, &port_conf); 31072b0e0ebaSChenbo Xia if (retval < 0) 31082b0e0ebaSChenbo Xia return retval; 31092a977b89SWenzhuo Lu rte_eth_dev_info_get(pid, &rte_port->dev_info); 31102a977b89SWenzhuo Lu 31112a977b89SWenzhuo Lu /* If dev_info.vmdq_pool_base is greater than 0, 31122a977b89SWenzhuo Lu * the queue id of vmdq pools is started after pf queues. 31132a977b89SWenzhuo Lu */ 31142a977b89SWenzhuo Lu if (dcb_mode == DCB_VT_ENABLED && 31152a977b89SWenzhuo Lu rte_port->dev_info.vmdq_pool_base > 0) { 31162a977b89SWenzhuo Lu printf("VMDQ_DCB multi-queue mode is nonsensical" 31172a977b89SWenzhuo Lu " for port %d.", pid); 31182a977b89SWenzhuo Lu return -1; 31192a977b89SWenzhuo Lu } 31202a977b89SWenzhuo Lu 31212a977b89SWenzhuo Lu /* Assume the ports in testpmd have the same dcb capability 31222a977b89SWenzhuo Lu * and has the same number of rxq and txq in dcb mode 31232a977b89SWenzhuo Lu */ 31242a977b89SWenzhuo Lu if (dcb_mode == DCB_VT_ENABLED) { 312586ef65eeSBernard Iremonger if (rte_port->dev_info.max_vfs > 0) { 312686ef65eeSBernard Iremonger nb_rxq = rte_port->dev_info.nb_rx_queues; 312786ef65eeSBernard Iremonger nb_txq = rte_port->dev_info.nb_tx_queues; 312886ef65eeSBernard Iremonger } else { 31292a977b89SWenzhuo Lu nb_rxq = rte_port->dev_info.max_rx_queues; 31302a977b89SWenzhuo Lu nb_txq = rte_port->dev_info.max_tx_queues; 313186ef65eeSBernard Iremonger } 31322a977b89SWenzhuo Lu } else { 31332a977b89SWenzhuo Lu /*if vt is disabled, use all pf queues */ 31342a977b89SWenzhuo Lu if (rte_port->dev_info.vmdq_pool_base == 0) { 31352a977b89SWenzhuo Lu nb_rxq = rte_port->dev_info.max_rx_queues; 31362a977b89SWenzhuo Lu nb_txq = rte_port->dev_info.max_tx_queues; 31372a977b89SWenzhuo Lu } else { 31382a977b89SWenzhuo Lu nb_rxq = (queueid_t)num_tcs; 31392a977b89SWenzhuo Lu nb_txq = (queueid_t)num_tcs; 31402a977b89SWenzhuo Lu 31412a977b89SWenzhuo Lu } 31422a977b89SWenzhuo Lu } 31432a977b89SWenzhuo Lu rx_free_thresh = 64; 31442a977b89SWenzhuo Lu 3145013af9b6SIntel memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf)); 3146013af9b6SIntel 3147f2c5125aSPablo de Lara rxtx_port_config(rte_port); 3148013af9b6SIntel /* VLAN filter */ 31490074d02fSShahaf Shuler rte_port->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER; 31501a572499SJingjing Wu for (i = 0; i < RTE_DIM(vlan_tags); i++) 3151013af9b6SIntel rx_vft_set(pid, vlan_tags[i], 1); 3152013af9b6SIntel 3153013af9b6SIntel rte_eth_macaddr_get(pid, &rte_port->eth_addr); 3154013af9b6SIntel map_port_queue_stats_mapping_registers(pid, rte_port); 3155013af9b6SIntel 31567741e4cfSIntel rte_port->dcb_flag = 1; 31577741e4cfSIntel 3158013af9b6SIntel return 0; 3159af75078fSIntel } 3160af75078fSIntel 3161ffc468ffSTetsuya Mukawa static void 3162ffc468ffSTetsuya Mukawa init_port(void) 3163ffc468ffSTetsuya Mukawa { 3164ffc468ffSTetsuya Mukawa /* Configuration of Ethernet ports. */ 3165ffc468ffSTetsuya Mukawa ports = rte_zmalloc("testpmd: ports", 3166ffc468ffSTetsuya Mukawa sizeof(struct rte_port) * RTE_MAX_ETHPORTS, 3167ffc468ffSTetsuya Mukawa RTE_CACHE_LINE_SIZE); 3168ffc468ffSTetsuya Mukawa if (ports == NULL) { 3169ffc468ffSTetsuya Mukawa rte_exit(EXIT_FAILURE, 3170ffc468ffSTetsuya Mukawa "rte_zmalloc(%d struct rte_port) failed\n", 3171ffc468ffSTetsuya Mukawa RTE_MAX_ETHPORTS); 3172ffc468ffSTetsuya Mukawa } 317329841336SPhil Yang 317429841336SPhil Yang /* Initialize ports NUMA structures */ 317529841336SPhil Yang memset(port_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS); 317629841336SPhil Yang memset(rxring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS); 317729841336SPhil Yang memset(txring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS); 3178ffc468ffSTetsuya Mukawa } 3179ffc468ffSTetsuya Mukawa 3180d3a274ceSZhihong Wang static void 3181d3a274ceSZhihong Wang force_quit(void) 3182d3a274ceSZhihong Wang { 3183d3a274ceSZhihong Wang pmd_test_exit(); 3184d3a274ceSZhihong Wang prompt_exit(); 3185d3a274ceSZhihong Wang } 3186d3a274ceSZhihong Wang 3187d3a274ceSZhihong Wang static void 3188cfea1f30SPablo de Lara print_stats(void) 3189cfea1f30SPablo de Lara { 3190cfea1f30SPablo de Lara uint8_t i; 3191cfea1f30SPablo de Lara const char clr[] = { 27, '[', '2', 'J', '\0' }; 3192cfea1f30SPablo de Lara const char top_left[] = { 27, '[', '1', ';', '1', 'H', '\0' }; 3193cfea1f30SPablo de Lara 3194cfea1f30SPablo de Lara /* Clear screen and move to top left */ 3195cfea1f30SPablo de Lara printf("%s%s", clr, top_left); 3196cfea1f30SPablo de Lara 3197cfea1f30SPablo de Lara printf("\nPort statistics ===================================="); 3198cfea1f30SPablo de Lara for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) 3199cfea1f30SPablo de Lara nic_stats_display(fwd_ports_ids[i]); 3200683d1e82SIgor Romanov 3201683d1e82SIgor Romanov fflush(stdout); 3202cfea1f30SPablo de Lara } 3203cfea1f30SPablo de Lara 3204cfea1f30SPablo de Lara static void 3205d3a274ceSZhihong Wang signal_handler(int signum) 3206d3a274ceSZhihong Wang { 3207d3a274ceSZhihong Wang if (signum == SIGINT || signum == SIGTERM) { 3208d3a274ceSZhihong Wang printf("\nSignal %d received, preparing to exit...\n", 3209d3a274ceSZhihong Wang signum); 3210102b7329SReshma Pattan #ifdef RTE_LIBRTE_PDUMP 3211102b7329SReshma Pattan /* uninitialize packet capture framework */ 3212102b7329SReshma Pattan rte_pdump_uninit(); 3213102b7329SReshma Pattan #endif 321462d3216dSReshma Pattan #ifdef RTE_LIBRTE_LATENCY_STATS 321562d3216dSReshma Pattan rte_latencystats_uninit(); 321662d3216dSReshma Pattan #endif 3217d3a274ceSZhihong Wang force_quit(); 3218d9a191a0SPhil Yang /* Set flag to indicate the force termination. */ 3219d9a191a0SPhil Yang f_quit = 1; 3220d3a274ceSZhihong Wang /* exit with the expected status */ 3221d3a274ceSZhihong Wang signal(signum, SIG_DFL); 3222d3a274ceSZhihong Wang kill(getpid(), signum); 3223d3a274ceSZhihong Wang } 3224d3a274ceSZhihong Wang } 3225d3a274ceSZhihong Wang 3226af75078fSIntel int 3227af75078fSIntel main(int argc, char** argv) 3228af75078fSIntel { 3229af75078fSIntel int diag; 3230f8244c63SZhiyong Yang portid_t port_id; 32314918a357SXiaoyun Li uint16_t count; 3232fb73e096SJeff Guo int ret; 3233af75078fSIntel 3234d3a274ceSZhihong Wang signal(SIGINT, signal_handler); 3235d3a274ceSZhihong Wang signal(SIGTERM, signal_handler); 3236d3a274ceSZhihong Wang 3237285fd101SOlivier Matz testpmd_logtype = rte_log_register("testpmd"); 3238285fd101SOlivier Matz if (testpmd_logtype < 0) 3239285fd101SOlivier Matz rte_panic("Cannot register log type"); 3240285fd101SOlivier Matz rte_log_set_level(testpmd_logtype, RTE_LOG_DEBUG); 3241285fd101SOlivier Matz 32429201806eSStephen Hemminger diag = rte_eal_init(argc, argv); 32439201806eSStephen Hemminger if (diag < 0) 32449201806eSStephen Hemminger rte_panic("Cannot init EAL\n"); 32459201806eSStephen Hemminger 3246*a87ab9f7SStephen Hemminger if (rte_eal_process_type() == RTE_PROC_SECONDARY) 3247*a87ab9f7SStephen Hemminger rte_panic("Secondary process type not supported.\n"); 3248*a87ab9f7SStephen Hemminger 324997b5d8b5SThomas Monjalon ret = register_eth_event_callback(); 325097b5d8b5SThomas Monjalon if (ret != 0) 325197b5d8b5SThomas Monjalon rte_panic("Cannot register for ethdev events"); 325297b5d8b5SThomas Monjalon 32534aa0d012SAnatoly Burakov #ifdef RTE_LIBRTE_PDUMP 32544aa0d012SAnatoly Burakov /* initialize packet capture framework */ 3255e9436f54STiwei Bie rte_pdump_init(); 32564aa0d012SAnatoly Burakov #endif 32574aa0d012SAnatoly Burakov 32584918a357SXiaoyun Li count = 0; 32594918a357SXiaoyun Li RTE_ETH_FOREACH_DEV(port_id) { 32604918a357SXiaoyun Li ports_ids[count] = port_id; 32614918a357SXiaoyun Li count++; 32624918a357SXiaoyun Li } 32634918a357SXiaoyun Li nb_ports = (portid_t) count; 32644aa0d012SAnatoly Burakov if (nb_ports == 0) 32654aa0d012SAnatoly Burakov TESTPMD_LOG(WARNING, "No probed ethernet devices\n"); 32664aa0d012SAnatoly Burakov 32674aa0d012SAnatoly Burakov /* allocate port structures, and init them */ 32684aa0d012SAnatoly Burakov init_port(); 32694aa0d012SAnatoly Burakov 32704aa0d012SAnatoly Burakov set_def_fwd_config(); 32714aa0d012SAnatoly Burakov if (nb_lcores == 0) 32724aa0d012SAnatoly Burakov rte_panic("Empty set of forwarding logical cores - check the " 32734aa0d012SAnatoly Burakov "core mask supplied in the command parameters\n"); 32744aa0d012SAnatoly Burakov 3275e505d84cSAnatoly Burakov /* Bitrate/latency stats disabled by default */ 3276e505d84cSAnatoly Burakov #ifdef RTE_LIBRTE_BITRATE 3277e505d84cSAnatoly Burakov bitrate_enabled = 0; 3278e505d84cSAnatoly Burakov #endif 3279e505d84cSAnatoly Burakov #ifdef RTE_LIBRTE_LATENCY_STATS 3280e505d84cSAnatoly Burakov latencystats_enabled = 0; 3281e505d84cSAnatoly Burakov #endif 3282e505d84cSAnatoly Burakov 3283fb7b8b32SAnatoly Burakov /* on FreeBSD, mlockall() is disabled by default */ 32845fbc1d49SBruce Richardson #ifdef RTE_EXEC_ENV_FREEBSD 3285fb7b8b32SAnatoly Burakov do_mlockall = 0; 3286fb7b8b32SAnatoly Burakov #else 3287fb7b8b32SAnatoly Burakov do_mlockall = 1; 3288fb7b8b32SAnatoly Burakov #endif 3289fb7b8b32SAnatoly Burakov 3290e505d84cSAnatoly Burakov argc -= diag; 3291e505d84cSAnatoly Burakov argv += diag; 3292e505d84cSAnatoly Burakov if (argc > 1) 3293e505d84cSAnatoly Burakov launch_args_parse(argc, argv); 3294e505d84cSAnatoly Burakov 3295e505d84cSAnatoly Burakov if (do_mlockall && mlockall(MCL_CURRENT | MCL_FUTURE)) { 3296285fd101SOlivier Matz TESTPMD_LOG(NOTICE, "mlockall() failed with error \"%s\"\n", 32971c036b16SEelco Chaudron strerror(errno)); 32981c036b16SEelco Chaudron } 32991c036b16SEelco Chaudron 330099cabef0SPablo de Lara if (tx_first && interactive) 330199cabef0SPablo de Lara rte_exit(EXIT_FAILURE, "--tx-first cannot be used on " 330299cabef0SPablo de Lara "interactive mode.\n"); 33038820cba4SDavid Hunt 33048820cba4SDavid Hunt if (tx_first && lsc_interrupt) { 33058820cba4SDavid Hunt printf("Warning: lsc_interrupt needs to be off when " 33068820cba4SDavid Hunt " using tx_first. Disabling.\n"); 33078820cba4SDavid Hunt lsc_interrupt = 0; 33088820cba4SDavid Hunt } 33098820cba4SDavid Hunt 33105a8fb55cSReshma Pattan if (!nb_rxq && !nb_txq) 33115a8fb55cSReshma Pattan printf("Warning: Either rx or tx queues should be non-zero\n"); 33125a8fb55cSReshma Pattan 33135a8fb55cSReshma Pattan if (nb_rxq > 1 && nb_rxq > nb_txq) 3314af75078fSIntel printf("Warning: nb_rxq=%d enables RSS configuration, " 3315af75078fSIntel "but nb_txq=%d will prevent to fully test it.\n", 3316af75078fSIntel nb_rxq, nb_txq); 3317af75078fSIntel 3318af75078fSIntel init_config(); 3319fb73e096SJeff Guo 3320fb73e096SJeff Guo if (hot_plug) { 33212049c511SJeff Guo ret = rte_dev_hotplug_handle_enable(); 3322fb73e096SJeff Guo if (ret) { 33232049c511SJeff Guo RTE_LOG(ERR, EAL, 33242049c511SJeff Guo "fail to enable hotplug handling."); 3325fb73e096SJeff Guo return -1; 3326fb73e096SJeff Guo } 3327fb73e096SJeff Guo 33282049c511SJeff Guo ret = rte_dev_event_monitor_start(); 33292049c511SJeff Guo if (ret) { 33302049c511SJeff Guo RTE_LOG(ERR, EAL, 33312049c511SJeff Guo "fail to start device event monitoring."); 33322049c511SJeff Guo return -1; 33332049c511SJeff Guo } 33342049c511SJeff Guo 33352049c511SJeff Guo ret = rte_dev_event_callback_register(NULL, 3336cc1bf307SJeff Guo dev_event_callback, NULL); 33372049c511SJeff Guo if (ret) { 33382049c511SJeff Guo RTE_LOG(ERR, EAL, 33392049c511SJeff Guo "fail to register device event callback\n"); 33402049c511SJeff Guo return -1; 33412049c511SJeff Guo } 3342fb73e096SJeff Guo } 3343fb73e096SJeff Guo 33446937d210SStephen Hemminger if (!no_device_start && start_port(RTE_PORT_ALL) != 0) 3345148f963fSBruce Richardson rte_exit(EXIT_FAILURE, "Start ports failed\n"); 3346af75078fSIntel 3347ce8d5614SIntel /* set all ports to promiscuous mode by default */ 33487d89b261SGaetan Rivet RTE_ETH_FOREACH_DEV(port_id) 3349ce8d5614SIntel rte_eth_promiscuous_enable(port_id); 3350af75078fSIntel 33517e4441c8SRemy Horton /* Init metrics library */ 33527e4441c8SRemy Horton rte_metrics_init(rte_socket_id()); 33537e4441c8SRemy Horton 335462d3216dSReshma Pattan #ifdef RTE_LIBRTE_LATENCY_STATS 335562d3216dSReshma Pattan if (latencystats_enabled != 0) { 335662d3216dSReshma Pattan int ret = rte_latencystats_init(1, NULL); 335762d3216dSReshma Pattan if (ret) 335862d3216dSReshma Pattan printf("Warning: latencystats init()" 335962d3216dSReshma Pattan " returned error %d\n", ret); 336062d3216dSReshma Pattan printf("Latencystats running on lcore %d\n", 336162d3216dSReshma Pattan latencystats_lcore_id); 336262d3216dSReshma Pattan } 336362d3216dSReshma Pattan #endif 336462d3216dSReshma Pattan 33657e4441c8SRemy Horton /* Setup bitrate stats */ 33667e4441c8SRemy Horton #ifdef RTE_LIBRTE_BITRATE 3367e25e6c70SRemy Horton if (bitrate_enabled != 0) { 33687e4441c8SRemy Horton bitrate_data = rte_stats_bitrate_create(); 33697e4441c8SRemy Horton if (bitrate_data == NULL) 3370e25e6c70SRemy Horton rte_exit(EXIT_FAILURE, 3371e25e6c70SRemy Horton "Could not allocate bitrate data.\n"); 33727e4441c8SRemy Horton rte_stats_bitrate_reg(bitrate_data); 3373e25e6c70SRemy Horton } 33747e4441c8SRemy Horton #endif 33757e4441c8SRemy Horton 33760d56cb81SThomas Monjalon #ifdef RTE_LIBRTE_CMDLINE 337781ef862bSAllain Legacy if (strlen(cmdline_filename) != 0) 337881ef862bSAllain Legacy cmdline_read_from_file(cmdline_filename); 337981ef862bSAllain Legacy 3380ca7feb22SCyril Chemparathy if (interactive == 1) { 3381ca7feb22SCyril Chemparathy if (auto_start) { 3382ca7feb22SCyril Chemparathy printf("Start automatic packet forwarding\n"); 3383ca7feb22SCyril Chemparathy start_packet_forwarding(0); 3384ca7feb22SCyril Chemparathy } 3385af75078fSIntel prompt(); 33860de738cfSJiayu Hu pmd_test_exit(); 3387ca7feb22SCyril Chemparathy } else 33880d56cb81SThomas Monjalon #endif 33890d56cb81SThomas Monjalon { 3390af75078fSIntel char c; 3391af75078fSIntel int rc; 3392af75078fSIntel 3393d9a191a0SPhil Yang f_quit = 0; 3394d9a191a0SPhil Yang 3395af75078fSIntel printf("No commandline core given, start packet forwarding\n"); 339699cabef0SPablo de Lara start_packet_forwarding(tx_first); 3397cfea1f30SPablo de Lara if (stats_period != 0) { 3398cfea1f30SPablo de Lara uint64_t prev_time = 0, cur_time, diff_time = 0; 3399cfea1f30SPablo de Lara uint64_t timer_period; 3400cfea1f30SPablo de Lara 3401cfea1f30SPablo de Lara /* Convert to number of cycles */ 3402cfea1f30SPablo de Lara timer_period = stats_period * rte_get_timer_hz(); 3403cfea1f30SPablo de Lara 3404d9a191a0SPhil Yang while (f_quit == 0) { 3405cfea1f30SPablo de Lara cur_time = rte_get_timer_cycles(); 3406cfea1f30SPablo de Lara diff_time += cur_time - prev_time; 3407cfea1f30SPablo de Lara 3408cfea1f30SPablo de Lara if (diff_time >= timer_period) { 3409cfea1f30SPablo de Lara print_stats(); 3410cfea1f30SPablo de Lara /* Reset the timer */ 3411cfea1f30SPablo de Lara diff_time = 0; 3412cfea1f30SPablo de Lara } 3413cfea1f30SPablo de Lara /* Sleep to avoid unnecessary checks */ 3414cfea1f30SPablo de Lara prev_time = cur_time; 3415cfea1f30SPablo de Lara sleep(1); 3416cfea1f30SPablo de Lara } 3417cfea1f30SPablo de Lara } 3418cfea1f30SPablo de Lara 3419af75078fSIntel printf("Press enter to exit\n"); 3420af75078fSIntel rc = read(0, &c, 1); 3421d3a274ceSZhihong Wang pmd_test_exit(); 3422af75078fSIntel if (rc < 0) 3423af75078fSIntel return 1; 3424af75078fSIntel } 3425af75078fSIntel 3426af75078fSIntel return 0; 3427af75078fSIntel } 3428