xref: /dpdk/app/test-pmd/testpmd.c (revision bc70e55948380ce57cbc079930f217c73ea59b39)
1174a1631SBruce Richardson /* SPDX-License-Identifier: BSD-3-Clause
2174a1631SBruce Richardson  * Copyright(c) 2010-2017 Intel Corporation
3af75078fSIntel  */
4af75078fSIntel 
5af75078fSIntel #include <stdarg.h>
6af75078fSIntel #include <stdio.h>
7af75078fSIntel #include <stdlib.h>
8af75078fSIntel #include <signal.h>
9af75078fSIntel #include <string.h>
10af75078fSIntel #include <time.h>
11af75078fSIntel #include <fcntl.h>
12761f7ae1SJie Zhou #ifndef RTE_EXEC_ENV_WINDOWS
131c036b16SEelco Chaudron #include <sys/mman.h>
14761f7ae1SJie Zhou #endif
15af75078fSIntel #include <sys/types.h>
16af75078fSIntel #include <errno.h>
17fb73e096SJeff Guo #include <stdbool.h>
18af75078fSIntel 
19af75078fSIntel #include <sys/queue.h>
20af75078fSIntel #include <sys/stat.h>
21af75078fSIntel 
22af75078fSIntel #include <stdint.h>
23af75078fSIntel #include <unistd.h>
24af75078fSIntel #include <inttypes.h>
25af75078fSIntel 
26af75078fSIntel #include <rte_common.h>
27d1eb542eSOlivier Matz #include <rte_errno.h>
28af75078fSIntel #include <rte_byteorder.h>
29af75078fSIntel #include <rte_log.h>
30af75078fSIntel #include <rte_debug.h>
31af75078fSIntel #include <rte_cycles.h>
32af75078fSIntel #include <rte_memory.h>
33af75078fSIntel #include <rte_memcpy.h>
34af75078fSIntel #include <rte_launch.h>
35af75078fSIntel #include <rte_eal.h>
36284c908cSGaetan Rivet #include <rte_alarm.h>
37af75078fSIntel #include <rte_per_lcore.h>
38af75078fSIntel #include <rte_lcore.h>
39af75078fSIntel #include <rte_branch_prediction.h>
40af75078fSIntel #include <rte_mempool.h>
41af75078fSIntel #include <rte_malloc.h>
42af75078fSIntel #include <rte_mbuf.h>
430e798567SPavan Nikhilesh #include <rte_mbuf_pool_ops.h>
44af75078fSIntel #include <rte_interrupts.h>
45af75078fSIntel #include <rte_pci.h>
46af75078fSIntel #include <rte_ether.h>
47af75078fSIntel #include <rte_ethdev.h>
48edab33b1STetsuya Mukawa #include <rte_dev.h>
49af75078fSIntel #include <rte_string_fns.h>
50a8d0d473SBruce Richardson #ifdef RTE_NET_IXGBE
51e261265eSRadu Nicolau #include <rte_pmd_ixgbe.h>
52e261265eSRadu Nicolau #endif
53a8d0d473SBruce Richardson #ifdef RTE_LIB_PDUMP
54102b7329SReshma Pattan #include <rte_pdump.h>
55102b7329SReshma Pattan #endif
56938a184aSAdrien Mazarguil #include <rte_flow.h>
57bb9be9a4SDavid Marchand #ifdef RTE_LIB_METRICS
587e4441c8SRemy Horton #include <rte_metrics.h>
59bb9be9a4SDavid Marchand #endif
60a8d0d473SBruce Richardson #ifdef RTE_LIB_BITRATESTATS
617e4441c8SRemy Horton #include <rte_bitrate.h>
627e4441c8SRemy Horton #endif
63a8d0d473SBruce Richardson #ifdef RTE_LIB_LATENCYSTATS
6462d3216dSReshma Pattan #include <rte_latencystats.h>
6562d3216dSReshma Pattan #endif
66761f7ae1SJie Zhou #ifdef RTE_EXEC_ENV_WINDOWS
67761f7ae1SJie Zhou #include <process.h>
68761f7ae1SJie Zhou #endif
69e46372d7SHuisong Li #ifdef RTE_NET_BOND
70e46372d7SHuisong Li #include <rte_eth_bond.h>
71e46372d7SHuisong Li #endif
72af75078fSIntel 
73af75078fSIntel #include "testpmd.h"
74af75078fSIntel 
75c7f5dba7SAnatoly Burakov #ifndef MAP_HUGETLB
76c7f5dba7SAnatoly Burakov /* FreeBSD may not have MAP_HUGETLB (in fact, it probably doesn't) */
77c7f5dba7SAnatoly Burakov #define HUGE_FLAG (0x40000)
78c7f5dba7SAnatoly Burakov #else
79c7f5dba7SAnatoly Burakov #define HUGE_FLAG MAP_HUGETLB
80c7f5dba7SAnatoly Burakov #endif
81c7f5dba7SAnatoly Burakov 
82c7f5dba7SAnatoly Burakov #ifndef MAP_HUGE_SHIFT
83c7f5dba7SAnatoly Burakov /* older kernels (or FreeBSD) will not have this define */
84c7f5dba7SAnatoly Burakov #define HUGE_SHIFT (26)
85c7f5dba7SAnatoly Burakov #else
86c7f5dba7SAnatoly Burakov #define HUGE_SHIFT MAP_HUGE_SHIFT
87c7f5dba7SAnatoly Burakov #endif
88c7f5dba7SAnatoly Burakov 
89c7f5dba7SAnatoly Burakov #define EXTMEM_HEAP_NAME "extmem"
9013b19642SDmitry Kozlyuk /*
9113b19642SDmitry Kozlyuk  * Zone size with the malloc overhead (max of debug and release variants)
9213b19642SDmitry Kozlyuk  * must fit into the smallest supported hugepage size (2M),
9313b19642SDmitry Kozlyuk  * so that an IOVA-contiguous zone of this size can always be allocated
9413b19642SDmitry Kozlyuk  * if there are free 2M hugepages.
9513b19642SDmitry Kozlyuk  */
9613b19642SDmitry Kozlyuk #define EXTBUF_ZONE_SIZE (RTE_PGSIZE_2M - 4 * RTE_CACHE_LINE_SIZE)
97c7f5dba7SAnatoly Burakov 
98af75078fSIntel uint16_t verbose_level = 0; /**< Silent by default. */
99285fd101SOlivier Matz int testpmd_logtype; /**< Log type for testpmd logs */
100af75078fSIntel 
101cb056611SStephen Hemminger /* use main core for command line ? */
102af75078fSIntel uint8_t interactive = 0;
103ca7feb22SCyril Chemparathy uint8_t auto_start = 0;
10499cabef0SPablo de Lara uint8_t tx_first;
10581ef862bSAllain Legacy char cmdline_filename[PATH_MAX] = {0};
106af75078fSIntel 
107af75078fSIntel /*
108af75078fSIntel  * NUMA support configuration.
109af75078fSIntel  * When set, the NUMA support attempts to dispatch the allocation of the
110af75078fSIntel  * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
111af75078fSIntel  * probed ports among the CPU sockets 0 and 1.
112af75078fSIntel  * Otherwise, all memory is allocated from CPU socket 0.
113af75078fSIntel  */
114999b2ee0SBruce Richardson uint8_t numa_support = 1; /**< numa enabled by default */
115af75078fSIntel 
116af75078fSIntel /*
117b6ea6408SIntel  * In UMA mode,all memory is allocated from socket 0 if --socket-num is
118b6ea6408SIntel  * not configured.
119b6ea6408SIntel  */
120b6ea6408SIntel uint8_t socket_num = UMA_NO_CONFIG;
121b6ea6408SIntel 
122b6ea6408SIntel /*
123c7f5dba7SAnatoly Burakov  * Select mempool allocation type:
124c7f5dba7SAnatoly Burakov  * - native: use regular DPDK memory
125c7f5dba7SAnatoly Burakov  * - anon: use regular DPDK memory to create mempool, but populate using
126c7f5dba7SAnatoly Burakov  *         anonymous memory (may not be IOVA-contiguous)
127c7f5dba7SAnatoly Burakov  * - xmem: use externally allocated hugepage memory
128148f963fSBruce Richardson  */
129c7f5dba7SAnatoly Burakov uint8_t mp_alloc_type = MP_ALLOC_NATIVE;
130148f963fSBruce Richardson 
131148f963fSBruce Richardson /*
13263531389SGeorgios Katsikas  * Store specified sockets on which memory pool to be used by ports
13363531389SGeorgios Katsikas  * is allocated.
13463531389SGeorgios Katsikas  */
13563531389SGeorgios Katsikas uint8_t port_numa[RTE_MAX_ETHPORTS];
13663531389SGeorgios Katsikas 
13763531389SGeorgios Katsikas /*
13863531389SGeorgios Katsikas  * Store specified sockets on which RX ring to be used by ports
13963531389SGeorgios Katsikas  * is allocated.
14063531389SGeorgios Katsikas  */
14163531389SGeorgios Katsikas uint8_t rxring_numa[RTE_MAX_ETHPORTS];
14263531389SGeorgios Katsikas 
14363531389SGeorgios Katsikas /*
14463531389SGeorgios Katsikas  * Store specified sockets on which TX ring to be used by ports
14563531389SGeorgios Katsikas  * is allocated.
14663531389SGeorgios Katsikas  */
14763531389SGeorgios Katsikas uint8_t txring_numa[RTE_MAX_ETHPORTS];
14863531389SGeorgios Katsikas 
14963531389SGeorgios Katsikas /*
150af75078fSIntel  * Record the Ethernet address of peer target ports to which packets are
151af75078fSIntel  * forwarded.
152547d946cSNirmoy Das  * Must be instantiated with the ethernet addresses of peer traffic generator
153af75078fSIntel  * ports.
154af75078fSIntel  */
1556d13ea8eSOlivier Matz struct rte_ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
156af75078fSIntel portid_t nb_peer_eth_addrs = 0;
157af75078fSIntel 
158af75078fSIntel /*
159af75078fSIntel  * Probed Target Environment.
160af75078fSIntel  */
161af75078fSIntel struct rte_port *ports;	       /**< For all probed ethernet ports. */
162af75078fSIntel portid_t nb_ports;             /**< Number of probed ethernet ports. */
163af75078fSIntel struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
164af75078fSIntel lcoreid_t nb_lcores;           /**< Number of probed logical cores. */
165af75078fSIntel 
1664918a357SXiaoyun Li portid_t ports_ids[RTE_MAX_ETHPORTS]; /**< Store all port ids. */
1674918a357SXiaoyun Li 
168af75078fSIntel /*
169af75078fSIntel  * Test Forwarding Configuration.
170af75078fSIntel  *    nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
171af75078fSIntel  *    nb_fwd_ports  <= nb_cfg_ports  <= nb_ports
172af75078fSIntel  */
173af75078fSIntel lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
174af75078fSIntel lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
175af75078fSIntel portid_t  nb_cfg_ports;  /**< Number of configured ports. */
176af75078fSIntel portid_t  nb_fwd_ports;  /**< Number of forwarding ports. */
177af75078fSIntel 
178af75078fSIntel unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
179af75078fSIntel portid_t fwd_ports_ids[RTE_MAX_ETHPORTS];      /**< Port ids configuration. */
180af75078fSIntel 
181af75078fSIntel struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
182af75078fSIntel streamid_t nb_fwd_streams;       /**< Is equal to (nb_ports * nb_rxq). */
183af75078fSIntel 
184af75078fSIntel /*
185af75078fSIntel  * Forwarding engines.
186af75078fSIntel  */
187af75078fSIntel struct fwd_engine * fwd_engines[] = {
188af75078fSIntel 	&io_fwd_engine,
189af75078fSIntel 	&mac_fwd_engine,
190d47388f1SCyril Chemparathy 	&mac_swap_engine,
191e9e23a61SCyril Chemparathy 	&flow_gen_engine,
192af75078fSIntel 	&rx_only_engine,
193af75078fSIntel 	&tx_only_engine,
194af75078fSIntel 	&csum_fwd_engine,
195168dfa61SIvan Boule 	&icmp_echo_engine,
1963c156061SJens Freimann 	&noisy_vnf_engine,
1972564abdaSShiri Kuzin 	&five_tuple_swap_fwd_engine,
198af75078fSIntel #ifdef RTE_LIBRTE_IEEE1588
199af75078fSIntel 	&ieee1588_fwd_engine,
200af75078fSIntel #endif
20159840375SXueming Li 	&shared_rxq_engine,
202af75078fSIntel 	NULL,
203af75078fSIntel };
204af75078fSIntel 
20526cbb419SViacheslav Ovsiienko struct rte_mempool *mempools[RTE_MAX_NUMA_NODES * MAX_SEGS_BUFFER_SPLIT];
20659fcf854SShahaf Shuler uint16_t mempool_flags;
207401b744dSShahaf Shuler 
208af75078fSIntel struct fwd_config cur_fwd_config;
209af75078fSIntel struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
210bf56fce1SZhihong Wang uint32_t retry_enabled;
211bf56fce1SZhihong Wang uint32_t burst_tx_delay_time = BURST_TX_WAIT_US;
212bf56fce1SZhihong Wang uint32_t burst_tx_retry_num = BURST_TX_RETRIES;
213af75078fSIntel 
21426cbb419SViacheslav Ovsiienko uint32_t mbuf_data_size_n = 1; /* Number of specified mbuf sizes. */
21526cbb419SViacheslav Ovsiienko uint16_t mbuf_data_size[MAX_SEGS_BUFFER_SPLIT] = {
21626cbb419SViacheslav Ovsiienko 	DEFAULT_MBUF_DATA_SIZE
21726cbb419SViacheslav Ovsiienko }; /**< Mbuf data space size. */
218c8798818SIntel uint32_t param_total_num_mbufs = 0;  /**< number of mbufs in all pools - if
219c8798818SIntel                                       * specified on command-line. */
220cfea1f30SPablo de Lara uint16_t stats_period; /**< Period to show statistics (disabled by default) */
221d9a191a0SPhil Yang 
22263b72657SIvan Ilchenko /** Extended statistics to show. */
22363b72657SIvan Ilchenko struct rte_eth_xstat_name *xstats_display;
22463b72657SIvan Ilchenko 
22563b72657SIvan Ilchenko unsigned int xstats_display_num; /**< Size of extended statistics to show */
22663b72657SIvan Ilchenko 
227d9a191a0SPhil Yang /*
228d9a191a0SPhil Yang  * In container, it cannot terminate the process which running with 'stats-period'
229d9a191a0SPhil Yang  * option. Set flag to exit stats period loop after received SIGINT/SIGTERM.
230d9a191a0SPhil Yang  */
231d9a191a0SPhil Yang uint8_t f_quit;
232d9a191a0SPhil Yang 
233af75078fSIntel /*
2341bb4a528SFerruh Yigit  * Max Rx frame size, set by '--max-pkt-len' parameter.
2351bb4a528SFerruh Yigit  */
2361bb4a528SFerruh Yigit uint32_t max_rx_pkt_len;
2371bb4a528SFerruh Yigit 
2381bb4a528SFerruh Yigit /*
2390f2096d7SViacheslav Ovsiienko  * Configuration of packet segments used to scatter received packets
2400f2096d7SViacheslav Ovsiienko  * if some of split features is configured.
2410f2096d7SViacheslav Ovsiienko  */
2420f2096d7SViacheslav Ovsiienko uint16_t rx_pkt_seg_lengths[MAX_SEGS_BUFFER_SPLIT];
2430f2096d7SViacheslav Ovsiienko uint8_t  rx_pkt_nb_segs; /**< Number of segments to split */
24491c78e09SViacheslav Ovsiienko uint16_t rx_pkt_seg_offsets[MAX_SEGS_BUFFER_SPLIT];
24591c78e09SViacheslav Ovsiienko uint8_t  rx_pkt_nb_offs; /**< Number of specified offsets */
2460f2096d7SViacheslav Ovsiienko 
2470f2096d7SViacheslav Ovsiienko /*
248af75078fSIntel  * Configuration of packet segments used by the "txonly" processing engine.
249af75078fSIntel  */
250af75078fSIntel uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
251af75078fSIntel uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
252af75078fSIntel 	TXONLY_DEF_PACKET_LEN,
253af75078fSIntel };
254af75078fSIntel uint8_t  tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
255af75078fSIntel 
25679bec05bSKonstantin Ananyev enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF;
25779bec05bSKonstantin Ananyev /**< Split policy for packets to TX. */
25879bec05bSKonstantin Ananyev 
25982010ef5SYongseok Koh uint8_t txonly_multi_flow;
26082010ef5SYongseok Koh /**< Whether multiple flows are generated in TXONLY mode. */
26182010ef5SYongseok Koh 
2624940344dSViacheslav Ovsiienko uint32_t tx_pkt_times_inter;
2634940344dSViacheslav Ovsiienko /**< Timings for send scheduling in TXONLY mode, time between bursts. */
2644940344dSViacheslav Ovsiienko 
2654940344dSViacheslav Ovsiienko uint32_t tx_pkt_times_intra;
2664940344dSViacheslav Ovsiienko /**< Timings for send scheduling in TXONLY mode, time between packets. */
2674940344dSViacheslav Ovsiienko 
268af75078fSIntel uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
2696c02043eSIgor Russkikh uint16_t nb_pkt_flowgen_clones; /**< Number of Tx packet clones to send in flowgen mode. */
270861e7684SZhihong Wang int nb_flows_flowgen = 1024; /**< Number of flows in flowgen mode. */
271e9378bbcSCunming Liang uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */
272af75078fSIntel 
273900550deSIntel /* current configuration is in DCB or not,0 means it is not in DCB mode */
274900550deSIntel uint8_t dcb_config = 0;
275900550deSIntel 
276af75078fSIntel /*
277af75078fSIntel  * Configurable number of RX/TX queues.
278af75078fSIntel  */
2791c69df45SOri Kam queueid_t nb_hairpinq; /**< Number of hairpin queues per port. */
280af75078fSIntel queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
281af75078fSIntel queueid_t nb_txq = 1; /**< Number of TX queues per port. */
282af75078fSIntel 
283af75078fSIntel /*
284af75078fSIntel  * Configurable number of RX/TX ring descriptors.
2858599ed31SRemy Horton  * Defaults are supplied by drivers via ethdev.
286af75078fSIntel  */
2878599ed31SRemy Horton #define RTE_TEST_RX_DESC_DEFAULT 0
2888599ed31SRemy Horton #define RTE_TEST_TX_DESC_DEFAULT 0
289af75078fSIntel uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
290af75078fSIntel uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
291af75078fSIntel 
292f2c5125aSPablo de Lara #define RTE_PMD_PARAM_UNSET -1
293af75078fSIntel /*
294af75078fSIntel  * Configurable values of RX and TX ring threshold registers.
295af75078fSIntel  */
296af75078fSIntel 
297f2c5125aSPablo de Lara int8_t rx_pthresh = RTE_PMD_PARAM_UNSET;
298f2c5125aSPablo de Lara int8_t rx_hthresh = RTE_PMD_PARAM_UNSET;
299f2c5125aSPablo de Lara int8_t rx_wthresh = RTE_PMD_PARAM_UNSET;
300af75078fSIntel 
301f2c5125aSPablo de Lara int8_t tx_pthresh = RTE_PMD_PARAM_UNSET;
302f2c5125aSPablo de Lara int8_t tx_hthresh = RTE_PMD_PARAM_UNSET;
303f2c5125aSPablo de Lara int8_t tx_wthresh = RTE_PMD_PARAM_UNSET;
304af75078fSIntel 
305af75078fSIntel /*
306af75078fSIntel  * Configurable value of RX free threshold.
307af75078fSIntel  */
308f2c5125aSPablo de Lara int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET;
309af75078fSIntel 
310af75078fSIntel /*
311ce8d5614SIntel  * Configurable value of RX drop enable.
312ce8d5614SIntel  */
313f2c5125aSPablo de Lara int8_t rx_drop_en = RTE_PMD_PARAM_UNSET;
314ce8d5614SIntel 
315ce8d5614SIntel /*
316af75078fSIntel  * Configurable value of TX free threshold.
317af75078fSIntel  */
318f2c5125aSPablo de Lara int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET;
319af75078fSIntel 
320af75078fSIntel /*
321af75078fSIntel  * Configurable value of TX RS bit threshold.
322af75078fSIntel  */
323f2c5125aSPablo de Lara int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET;
324af75078fSIntel 
325af75078fSIntel /*
3263c156061SJens Freimann  * Configurable value of buffered packets before sending.
3273c156061SJens Freimann  */
3283c156061SJens Freimann uint16_t noisy_tx_sw_bufsz;
3293c156061SJens Freimann 
3303c156061SJens Freimann /*
3313c156061SJens Freimann  * Configurable value of packet buffer timeout.
3323c156061SJens Freimann  */
3333c156061SJens Freimann uint16_t noisy_tx_sw_buf_flush_time;
3343c156061SJens Freimann 
3353c156061SJens Freimann /*
3363c156061SJens Freimann  * Configurable value for size of VNF internal memory area
3373c156061SJens Freimann  * used for simulating noisy neighbour behaviour
3383c156061SJens Freimann  */
3393c156061SJens Freimann uint64_t noisy_lkup_mem_sz;
3403c156061SJens Freimann 
3413c156061SJens Freimann /*
3423c156061SJens Freimann  * Configurable value of number of random writes done in
3433c156061SJens Freimann  * VNF simulation memory area.
3443c156061SJens Freimann  */
3453c156061SJens Freimann uint64_t noisy_lkup_num_writes;
3463c156061SJens Freimann 
3473c156061SJens Freimann /*
3483c156061SJens Freimann  * Configurable value of number of random reads done in
3493c156061SJens Freimann  * VNF simulation memory area.
3503c156061SJens Freimann  */
3513c156061SJens Freimann uint64_t noisy_lkup_num_reads;
3523c156061SJens Freimann 
3533c156061SJens Freimann /*
3543c156061SJens Freimann  * Configurable value of number of random reads/writes done in
3553c156061SJens Freimann  * VNF simulation memory area.
3563c156061SJens Freimann  */
3573c156061SJens Freimann uint64_t noisy_lkup_num_reads_writes;
3583c156061SJens Freimann 
3593c156061SJens Freimann /*
360af75078fSIntel  * Receive Side Scaling (RSS) configuration.
361af75078fSIntel  */
362295968d1SFerruh Yigit uint64_t rss_hf = RTE_ETH_RSS_IP; /* RSS IP by default. */
363af75078fSIntel 
364af75078fSIntel /*
365af75078fSIntel  * Port topology configuration
366af75078fSIntel  */
367af75078fSIntel uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
368af75078fSIntel 
3697741e4cfSIntel /*
3707741e4cfSIntel  * Avoids to flush all the RX streams before starts forwarding.
3717741e4cfSIntel  */
3727741e4cfSIntel uint8_t no_flush_rx = 0; /* flush by default */
3737741e4cfSIntel 
374af75078fSIntel /*
3757ee3e944SVasily Philipov  * Flow API isolated mode.
3767ee3e944SVasily Philipov  */
3777ee3e944SVasily Philipov uint8_t flow_isolate_all;
3787ee3e944SVasily Philipov 
3797ee3e944SVasily Philipov /*
380bc202406SDavid Marchand  * Avoids to check link status when starting/stopping a port.
381bc202406SDavid Marchand  */
382bc202406SDavid Marchand uint8_t no_link_check = 0; /* check by default */
383bc202406SDavid Marchand 
384bc202406SDavid Marchand /*
3856937d210SStephen Hemminger  * Don't automatically start all ports in interactive mode.
3866937d210SStephen Hemminger  */
3876937d210SStephen Hemminger uint8_t no_device_start = 0;
3886937d210SStephen Hemminger 
3896937d210SStephen Hemminger /*
3908ea656f8SGaetan Rivet  * Enable link status change notification
3918ea656f8SGaetan Rivet  */
3928ea656f8SGaetan Rivet uint8_t lsc_interrupt = 1; /* enabled by default */
3938ea656f8SGaetan Rivet 
3948ea656f8SGaetan Rivet /*
395284c908cSGaetan Rivet  * Enable device removal notification.
396284c908cSGaetan Rivet  */
397284c908cSGaetan Rivet uint8_t rmv_interrupt = 1; /* enabled by default */
398284c908cSGaetan Rivet 
399fb73e096SJeff Guo uint8_t hot_plug = 0; /**< hotplug disabled by default. */
400fb73e096SJeff Guo 
4014f1ed78eSThomas Monjalon /* After attach, port setup is called on event or by iterator */
4024f1ed78eSThomas Monjalon bool setup_on_probe_event = true;
4034f1ed78eSThomas Monjalon 
404b0a9354aSPavan Nikhilesh /* Clear ptypes on port initialization. */
405b0a9354aSPavan Nikhilesh uint8_t clear_ptypes = true;
406b0a9354aSPavan Nikhilesh 
40701817b10SBing Zhao /* Hairpin ports configuration mode. */
40801817b10SBing Zhao uint16_t hairpin_mode;
40901817b10SBing Zhao 
41097b5d8b5SThomas Monjalon /* Pretty printing of ethdev events */
41197b5d8b5SThomas Monjalon static const char * const eth_event_desc[] = {
41297b5d8b5SThomas Monjalon 	[RTE_ETH_EVENT_UNKNOWN] = "unknown",
41397b5d8b5SThomas Monjalon 	[RTE_ETH_EVENT_INTR_LSC] = "link state change",
41497b5d8b5SThomas Monjalon 	[RTE_ETH_EVENT_QUEUE_STATE] = "queue state",
41597b5d8b5SThomas Monjalon 	[RTE_ETH_EVENT_INTR_RESET] = "reset",
41697b5d8b5SThomas Monjalon 	[RTE_ETH_EVENT_VF_MBOX] = "VF mbox",
41797b5d8b5SThomas Monjalon 	[RTE_ETH_EVENT_IPSEC] = "IPsec",
41897b5d8b5SThomas Monjalon 	[RTE_ETH_EVENT_MACSEC] = "MACsec",
41997b5d8b5SThomas Monjalon 	[RTE_ETH_EVENT_INTR_RMV] = "device removal",
42097b5d8b5SThomas Monjalon 	[RTE_ETH_EVENT_NEW] = "device probed",
42197b5d8b5SThomas Monjalon 	[RTE_ETH_EVENT_DESTROY] = "device released",
4220e459ffaSDong Zhou 	[RTE_ETH_EVENT_FLOW_AGED] = "flow aged",
423*bc70e559SSpike Du 	[RTE_ETH_EVENT_RX_AVAIL_THRESH] = "RxQ available descriptors threshold reached",
42497b5d8b5SThomas Monjalon 	[RTE_ETH_EVENT_MAX] = NULL,
42597b5d8b5SThomas Monjalon };
42697b5d8b5SThomas Monjalon 
427284c908cSGaetan Rivet /*
4283af72783SGaetan Rivet  * Display or mask ether events
4293af72783SGaetan Rivet  * Default to all events except VF_MBOX
4303af72783SGaetan Rivet  */
4313af72783SGaetan Rivet uint32_t event_print_mask = (UINT32_C(1) << RTE_ETH_EVENT_UNKNOWN) |
4323af72783SGaetan Rivet 			    (UINT32_C(1) << RTE_ETH_EVENT_INTR_LSC) |
4333af72783SGaetan Rivet 			    (UINT32_C(1) << RTE_ETH_EVENT_QUEUE_STATE) |
4343af72783SGaetan Rivet 			    (UINT32_C(1) << RTE_ETH_EVENT_INTR_RESET) |
435badb87c1SAnoob Joseph 			    (UINT32_C(1) << RTE_ETH_EVENT_IPSEC) |
4363af72783SGaetan Rivet 			    (UINT32_C(1) << RTE_ETH_EVENT_MACSEC) |
4370e459ffaSDong Zhou 			    (UINT32_C(1) << RTE_ETH_EVENT_INTR_RMV) |
4380e459ffaSDong Zhou 			    (UINT32_C(1) << RTE_ETH_EVENT_FLOW_AGED);
439e505d84cSAnatoly Burakov /*
440e505d84cSAnatoly Burakov  * Decide if all memory are locked for performance.
441e505d84cSAnatoly Burakov  */
442e505d84cSAnatoly Burakov int do_mlockall = 0;
4433af72783SGaetan Rivet 
4443af72783SGaetan Rivet /*
4457b7e5ba7SIntel  * NIC bypass mode configuration options.
4467b7e5ba7SIntel  */
4477b7e5ba7SIntel 
448a8d0d473SBruce Richardson #if defined RTE_NET_IXGBE && defined RTE_LIBRTE_IXGBE_BYPASS
4497b7e5ba7SIntel /* The NIC bypass watchdog timeout. */
450e261265eSRadu Nicolau uint32_t bypass_timeout = RTE_PMD_IXGBE_BYPASS_TMT_OFF;
4517b7e5ba7SIntel #endif
4527b7e5ba7SIntel 
453e261265eSRadu Nicolau 
454a8d0d473SBruce Richardson #ifdef RTE_LIB_LATENCYSTATS
45562d3216dSReshma Pattan 
45662d3216dSReshma Pattan /*
45762d3216dSReshma Pattan  * Set when latency stats is enabled in the commandline
45862d3216dSReshma Pattan  */
45962d3216dSReshma Pattan uint8_t latencystats_enabled;
46062d3216dSReshma Pattan 
46162d3216dSReshma Pattan /*
4627be78d02SJosh Soref  * Lcore ID to service latency statistics.
46362d3216dSReshma Pattan  */
46462d3216dSReshma Pattan lcoreid_t latencystats_lcore_id = -1;
46562d3216dSReshma Pattan 
46662d3216dSReshma Pattan #endif
46762d3216dSReshma Pattan 
4687b7e5ba7SIntel /*
469af75078fSIntel  * Ethernet device configuration.
470af75078fSIntel  */
4711bb4a528SFerruh Yigit struct rte_eth_rxmode rx_mode;
472af75078fSIntel 
47307e5f7bdSShahaf Shuler struct rte_eth_txmode tx_mode = {
474295968d1SFerruh Yigit 	.offloads = RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE,
47507e5f7bdSShahaf Shuler };
476fd8c20aaSShahaf Shuler 
477295968d1SFerruh Yigit struct rte_eth_fdir_conf fdir_conf = {
478af75078fSIntel 	.mode = RTE_FDIR_MODE_NONE,
479295968d1SFerruh Yigit 	.pballoc = RTE_ETH_FDIR_PBALLOC_64K,
480af75078fSIntel 	.status = RTE_FDIR_REPORT_STATUS,
481d9d5e6f2SJingjing Wu 	.mask = {
48226f579aaSWei Zhao 		.vlan_tci_mask = 0xFFEF,
483d9d5e6f2SJingjing Wu 		.ipv4_mask     = {
484d9d5e6f2SJingjing Wu 			.src_ip = 0xFFFFFFFF,
485d9d5e6f2SJingjing Wu 			.dst_ip = 0xFFFFFFFF,
486d9d5e6f2SJingjing Wu 		},
487d9d5e6f2SJingjing Wu 		.ipv6_mask     = {
488d9d5e6f2SJingjing Wu 			.src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
489d9d5e6f2SJingjing Wu 			.dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
490d9d5e6f2SJingjing Wu 		},
491d9d5e6f2SJingjing Wu 		.src_port_mask = 0xFFFF,
492d9d5e6f2SJingjing Wu 		.dst_port_mask = 0xFFFF,
49347b3ac6bSWenzhuo Lu 		.mac_addr_byte_mask = 0xFF,
49447b3ac6bSWenzhuo Lu 		.tunnel_type_mask = 1,
49547b3ac6bSWenzhuo Lu 		.tunnel_id_mask = 0xFFFFFFFF,
496d9d5e6f2SJingjing Wu 	},
497af75078fSIntel 	.drop_queue = 127,
498af75078fSIntel };
499af75078fSIntel 
5002950a769SDeclan Doherty volatile int test_done = 1; /* stop packet forwarding when set to 1. */
501af75078fSIntel 
502a4fd5eeeSElza Mathew /*
503a4fd5eeeSElza Mathew  * Display zero values by default for xstats
504a4fd5eeeSElza Mathew  */
505a4fd5eeeSElza Mathew uint8_t xstats_hide_zero;
506a4fd5eeeSElza Mathew 
507bc700b67SDharmik Thakkar /*
508bc700b67SDharmik Thakkar  * Measure of CPU cycles disabled by default
509bc700b67SDharmik Thakkar  */
510bc700b67SDharmik Thakkar uint8_t record_core_cycles;
511bc700b67SDharmik Thakkar 
5120e4b1963SDharmik Thakkar /*
5130e4b1963SDharmik Thakkar  * Display of RX and TX bursts disabled by default
5140e4b1963SDharmik Thakkar  */
5150e4b1963SDharmik Thakkar uint8_t record_burst_stats;
5160e4b1963SDharmik Thakkar 
517f4d178c1SXueming Li /*
518f4d178c1SXueming Li  * Number of ports per shared Rx queue group, 0 disable.
519f4d178c1SXueming Li  */
520f4d178c1SXueming Li uint32_t rxq_share;
521f4d178c1SXueming Li 
522c9cafcc8SShahaf Shuler unsigned int num_sockets = 0;
523c9cafcc8SShahaf Shuler unsigned int socket_ids[RTE_MAX_NUMA_NODES];
5247acf894dSStephen Hurd 
525a8d0d473SBruce Richardson #ifdef RTE_LIB_BITRATESTATS
5267e4441c8SRemy Horton /* Bitrate statistics */
5277e4441c8SRemy Horton struct rte_stats_bitrates *bitrate_data;
528e25e6c70SRemy Horton lcoreid_t bitrate_lcore_id;
529e25e6c70SRemy Horton uint8_t bitrate_enabled;
530e25e6c70SRemy Horton #endif
5317e4441c8SRemy Horton 
5326970401eSDavid Marchand #ifdef RTE_LIB_GRO
533b40f8d78SJiayu Hu struct gro_status gro_ports[RTE_MAX_ETHPORTS];
534b7091f1dSJiayu Hu uint8_t gro_flush_cycles = GRO_DEFAULT_FLUSH_CYCLES;
5356970401eSDavid Marchand #endif
536b40f8d78SJiayu Hu 
537f9295aa2SXiaoyu Min /*
538f9295aa2SXiaoyu Min  * hexadecimal bitmask of RX mq mode can be enabled.
539f9295aa2SXiaoyu Min  */
540295968d1SFerruh Yigit enum rte_eth_rx_mq_mode rx_mq_mode = RTE_ETH_MQ_RX_VMDQ_DCB_RSS;
541f9295aa2SXiaoyu Min 
542b7b78a08SAjit Khaparde /*
543b7b78a08SAjit Khaparde  * Used to set forced link speed
544b7b78a08SAjit Khaparde  */
545b7b78a08SAjit Khaparde uint32_t eth_link_speed;
546b7b78a08SAjit Khaparde 
547a550baf2SMin Hu (Connor) /*
548a550baf2SMin Hu (Connor)  * ID of the current process in multi-process, used to
549a550baf2SMin Hu (Connor)  * configure the queues to be polled.
550a550baf2SMin Hu (Connor)  */
551a550baf2SMin Hu (Connor) int proc_id;
552a550baf2SMin Hu (Connor) 
553a550baf2SMin Hu (Connor) /*
554a550baf2SMin Hu (Connor)  * Number of processes in multi-process, used to
555a550baf2SMin Hu (Connor)  * configure the queues to be polled.
556a550baf2SMin Hu (Connor)  */
557a550baf2SMin Hu (Connor) unsigned int num_procs = 1;
558a550baf2SMin Hu (Connor) 
559f6d8a6d3SIvan Malov static void
560f6d8a6d3SIvan Malov eth_rx_metadata_negotiate_mp(uint16_t port_id)
561f6d8a6d3SIvan Malov {
562f6d8a6d3SIvan Malov 	uint64_t rx_meta_features = 0;
563f6d8a6d3SIvan Malov 	int ret;
564f6d8a6d3SIvan Malov 
565f6d8a6d3SIvan Malov 	if (!is_proc_primary())
566f6d8a6d3SIvan Malov 		return;
567f6d8a6d3SIvan Malov 
568f6d8a6d3SIvan Malov 	rx_meta_features |= RTE_ETH_RX_METADATA_USER_FLAG;
569f6d8a6d3SIvan Malov 	rx_meta_features |= RTE_ETH_RX_METADATA_USER_MARK;
570f6d8a6d3SIvan Malov 	rx_meta_features |= RTE_ETH_RX_METADATA_TUNNEL_ID;
571f6d8a6d3SIvan Malov 
572f6d8a6d3SIvan Malov 	ret = rte_eth_rx_metadata_negotiate(port_id, &rx_meta_features);
573f6d8a6d3SIvan Malov 	if (ret == 0) {
574f6d8a6d3SIvan Malov 		if (!(rx_meta_features & RTE_ETH_RX_METADATA_USER_FLAG)) {
575f6d8a6d3SIvan Malov 			TESTPMD_LOG(DEBUG, "Flow action FLAG will not affect Rx mbufs on port %u\n",
576f6d8a6d3SIvan Malov 				    port_id);
577f6d8a6d3SIvan Malov 		}
578f6d8a6d3SIvan Malov 
579f6d8a6d3SIvan Malov 		if (!(rx_meta_features & RTE_ETH_RX_METADATA_USER_MARK)) {
580f6d8a6d3SIvan Malov 			TESTPMD_LOG(DEBUG, "Flow action MARK will not affect Rx mbufs on port %u\n",
581f6d8a6d3SIvan Malov 				    port_id);
582f6d8a6d3SIvan Malov 		}
583f6d8a6d3SIvan Malov 
584f6d8a6d3SIvan Malov 		if (!(rx_meta_features & RTE_ETH_RX_METADATA_TUNNEL_ID)) {
585f6d8a6d3SIvan Malov 			TESTPMD_LOG(DEBUG, "Flow tunnel offload support might be limited or unavailable on port %u\n",
586f6d8a6d3SIvan Malov 				    port_id);
587f6d8a6d3SIvan Malov 		}
588f6d8a6d3SIvan Malov 	} else if (ret != -ENOTSUP) {
589f6d8a6d3SIvan Malov 		rte_exit(EXIT_FAILURE, "Error when negotiating Rx meta features on port %u: %s\n",
590f6d8a6d3SIvan Malov 			 port_id, rte_strerror(-ret));
591f6d8a6d3SIvan Malov 	}
592f6d8a6d3SIvan Malov }
593f6d8a6d3SIvan Malov 
594a550baf2SMin Hu (Connor) static int
595a550baf2SMin Hu (Connor) eth_dev_configure_mp(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
596a550baf2SMin Hu (Connor) 		      const struct rte_eth_conf *dev_conf)
597a550baf2SMin Hu (Connor) {
598a550baf2SMin Hu (Connor) 	if (is_proc_primary())
599a550baf2SMin Hu (Connor) 		return rte_eth_dev_configure(port_id, nb_rx_q, nb_tx_q,
600a550baf2SMin Hu (Connor) 					dev_conf);
601a550baf2SMin Hu (Connor) 	return 0;
602a550baf2SMin Hu (Connor) }
603a550baf2SMin Hu (Connor) 
604a550baf2SMin Hu (Connor) static int
605e46372d7SHuisong Li change_bonding_slave_port_status(portid_t bond_pid, bool is_stop)
606e46372d7SHuisong Li {
607e46372d7SHuisong Li #ifdef RTE_NET_BOND
608e46372d7SHuisong Li 
609e46372d7SHuisong Li 	portid_t slave_pids[RTE_MAX_ETHPORTS];
610e46372d7SHuisong Li 	struct rte_port *port;
611e46372d7SHuisong Li 	int num_slaves;
612e46372d7SHuisong Li 	portid_t slave_pid;
613e46372d7SHuisong Li 	int i;
614e46372d7SHuisong Li 
615e46372d7SHuisong Li 	num_slaves = rte_eth_bond_slaves_get(bond_pid, slave_pids,
616e46372d7SHuisong Li 						RTE_MAX_ETHPORTS);
617e46372d7SHuisong Li 	if (num_slaves < 0) {
618e46372d7SHuisong Li 		fprintf(stderr, "Failed to get slave list for port = %u\n",
619e46372d7SHuisong Li 			bond_pid);
620e46372d7SHuisong Li 		return num_slaves;
621e46372d7SHuisong Li 	}
622e46372d7SHuisong Li 
623e46372d7SHuisong Li 	for (i = 0; i < num_slaves; i++) {
624e46372d7SHuisong Li 		slave_pid = slave_pids[i];
625e46372d7SHuisong Li 		port = &ports[slave_pid];
626e46372d7SHuisong Li 		port->port_status =
627e46372d7SHuisong Li 			is_stop ? RTE_PORT_STOPPED : RTE_PORT_STARTED;
628e46372d7SHuisong Li 	}
629e46372d7SHuisong Li #else
630e46372d7SHuisong Li 	RTE_SET_USED(bond_pid);
631e46372d7SHuisong Li 	RTE_SET_USED(is_stop);
632e46372d7SHuisong Li #endif
633e46372d7SHuisong Li 	return 0;
634e46372d7SHuisong Li }
635e46372d7SHuisong Li 
636e46372d7SHuisong Li static int
637a550baf2SMin Hu (Connor) eth_dev_start_mp(uint16_t port_id)
638a550baf2SMin Hu (Connor) {
639e46372d7SHuisong Li 	int ret;
640e46372d7SHuisong Li 
641e46372d7SHuisong Li 	if (is_proc_primary()) {
642e46372d7SHuisong Li 		ret = rte_eth_dev_start(port_id);
643e46372d7SHuisong Li 		if (ret != 0)
644e46372d7SHuisong Li 			return ret;
645e46372d7SHuisong Li 
646e46372d7SHuisong Li 		struct rte_port *port = &ports[port_id];
647e46372d7SHuisong Li 
648e46372d7SHuisong Li 		/*
649e46372d7SHuisong Li 		 * Starting a bonded port also starts all slaves under the bonded
650e46372d7SHuisong Li 		 * device. So if this port is bond device, we need to modify the
651e46372d7SHuisong Li 		 * port status of these slaves.
652e46372d7SHuisong Li 		 */
653e46372d7SHuisong Li 		if (port->bond_flag == 1)
654e46372d7SHuisong Li 			return change_bonding_slave_port_status(port_id, false);
655e46372d7SHuisong Li 	}
656a550baf2SMin Hu (Connor) 
657a550baf2SMin Hu (Connor) 	return 0;
658a550baf2SMin Hu (Connor) }
659a550baf2SMin Hu (Connor) 
660a550baf2SMin Hu (Connor) static int
661a550baf2SMin Hu (Connor) eth_dev_stop_mp(uint16_t port_id)
662a550baf2SMin Hu (Connor) {
663e46372d7SHuisong Li 	int ret;
664e46372d7SHuisong Li 
665e46372d7SHuisong Li 	if (is_proc_primary()) {
666e46372d7SHuisong Li 		ret = rte_eth_dev_stop(port_id);
667e46372d7SHuisong Li 		if (ret != 0)
668e46372d7SHuisong Li 			return ret;
669e46372d7SHuisong Li 
670e46372d7SHuisong Li 		struct rte_port *port = &ports[port_id];
671e46372d7SHuisong Li 
672e46372d7SHuisong Li 		/*
673e46372d7SHuisong Li 		 * Stopping a bonded port also stops all slaves under the bonded
674e46372d7SHuisong Li 		 * device. So if this port is bond device, we need to modify the
675e46372d7SHuisong Li 		 * port status of these slaves.
676e46372d7SHuisong Li 		 */
677e46372d7SHuisong Li 		if (port->bond_flag == 1)
678e46372d7SHuisong Li 			return change_bonding_slave_port_status(port_id, true);
679e46372d7SHuisong Li 	}
680a550baf2SMin Hu (Connor) 
681a550baf2SMin Hu (Connor) 	return 0;
682a550baf2SMin Hu (Connor) }
683a550baf2SMin Hu (Connor) 
684a550baf2SMin Hu (Connor) static void
685a550baf2SMin Hu (Connor) mempool_free_mp(struct rte_mempool *mp)
686a550baf2SMin Hu (Connor) {
687a550baf2SMin Hu (Connor) 	if (is_proc_primary())
688a550baf2SMin Hu (Connor) 		rte_mempool_free(mp);
689a550baf2SMin Hu (Connor) }
690a550baf2SMin Hu (Connor) 
691a550baf2SMin Hu (Connor) static int
692a550baf2SMin Hu (Connor) eth_dev_set_mtu_mp(uint16_t port_id, uint16_t mtu)
693a550baf2SMin Hu (Connor) {
694a550baf2SMin Hu (Connor) 	if (is_proc_primary())
695a550baf2SMin Hu (Connor) 		return rte_eth_dev_set_mtu(port_id, mtu);
696a550baf2SMin Hu (Connor) 
697a550baf2SMin Hu (Connor) 	return 0;
698a550baf2SMin Hu (Connor) }
699a550baf2SMin Hu (Connor) 
700ed30d9b6SIntel /* Forward function declarations */
701c9cce428SThomas Monjalon static void setup_attached_port(portid_t pi);
702edab33b1STetsuya Mukawa static void check_all_ports_link_status(uint32_t port_mask);
703f8244c63SZhiyong Yang static int eth_event_callback(portid_t port_id,
70476ad4a2dSGaetan Rivet 			      enum rte_eth_event_type type,
705d6af1a13SBernard Iremonger 			      void *param, void *ret_param);
706cc1bf307SJeff Guo static void dev_event_callback(const char *device_name,
707fb73e096SJeff Guo 				enum rte_dev_event_type type,
708fb73e096SJeff Guo 				void *param);
70963b72657SIvan Ilchenko static void fill_xstats_display_info(void);
710ce8d5614SIntel 
711ce8d5614SIntel /*
712ce8d5614SIntel  * Check if all the ports are started.
713ce8d5614SIntel  * If yes, return positive value. If not, return zero.
714ce8d5614SIntel  */
715ce8d5614SIntel static int all_ports_started(void);
716ed30d9b6SIntel 
7176970401eSDavid Marchand #ifdef RTE_LIB_GSO
71852f38a20SJiayu Hu struct gso_status gso_ports[RTE_MAX_ETHPORTS];
71935b2d13fSOlivier Matz uint16_t gso_max_segment_size = RTE_ETHER_MAX_LEN - RTE_ETHER_CRC_LEN;
7206970401eSDavid Marchand #endif
72152f38a20SJiayu Hu 
722b57b66a9SOri Kam /* Holds the registered mbuf dynamic flags names. */
723b57b66a9SOri Kam char dynf_names[64][RTE_MBUF_DYN_NAMESIZE];
724b57b66a9SOri Kam 
72563b72657SIvan Ilchenko 
726af75078fSIntel /*
72798a7ea33SJerin Jacob  * Helper function to check if socket is already discovered.
728c9cafcc8SShahaf Shuler  * If yes, return positive value. If not, return zero.
729c9cafcc8SShahaf Shuler  */
730c9cafcc8SShahaf Shuler int
731c9cafcc8SShahaf Shuler new_socket_id(unsigned int socket_id)
732c9cafcc8SShahaf Shuler {
733c9cafcc8SShahaf Shuler 	unsigned int i;
734c9cafcc8SShahaf Shuler 
735c9cafcc8SShahaf Shuler 	for (i = 0; i < num_sockets; i++) {
736c9cafcc8SShahaf Shuler 		if (socket_ids[i] == socket_id)
737c9cafcc8SShahaf Shuler 			return 0;
738c9cafcc8SShahaf Shuler 	}
739c9cafcc8SShahaf Shuler 	return 1;
740c9cafcc8SShahaf Shuler }
741c9cafcc8SShahaf Shuler 
742c9cafcc8SShahaf Shuler /*
743af75078fSIntel  * Setup default configuration.
744af75078fSIntel  */
745af75078fSIntel static void
746af75078fSIntel set_default_fwd_lcores_config(void)
747af75078fSIntel {
748af75078fSIntel 	unsigned int i;
749af75078fSIntel 	unsigned int nb_lc;
7507acf894dSStephen Hurd 	unsigned int sock_num;
751af75078fSIntel 
752af75078fSIntel 	nb_lc = 0;
753af75078fSIntel 	for (i = 0; i < RTE_MAX_LCORE; i++) {
754dbfb8ec7SPhil Yang 		if (!rte_lcore_is_enabled(i))
755dbfb8ec7SPhil Yang 			continue;
756c9cafcc8SShahaf Shuler 		sock_num = rte_lcore_to_socket_id(i);
757c9cafcc8SShahaf Shuler 		if (new_socket_id(sock_num)) {
758c9cafcc8SShahaf Shuler 			if (num_sockets >= RTE_MAX_NUMA_NODES) {
759c9cafcc8SShahaf Shuler 				rte_exit(EXIT_FAILURE,
760c9cafcc8SShahaf Shuler 					 "Total sockets greater than %u\n",
761c9cafcc8SShahaf Shuler 					 RTE_MAX_NUMA_NODES);
762c9cafcc8SShahaf Shuler 			}
763c9cafcc8SShahaf Shuler 			socket_ids[num_sockets++] = sock_num;
7647acf894dSStephen Hurd 		}
765cb056611SStephen Hemminger 		if (i == rte_get_main_lcore())
766f54fe5eeSStephen Hurd 			continue;
767f54fe5eeSStephen Hurd 		fwd_lcores_cpuids[nb_lc++] = i;
768af75078fSIntel 	}
769af75078fSIntel 	nb_lcores = (lcoreid_t) nb_lc;
770af75078fSIntel 	nb_cfg_lcores = nb_lcores;
771af75078fSIntel 	nb_fwd_lcores = 1;
772af75078fSIntel }
773af75078fSIntel 
774af75078fSIntel static void
775af75078fSIntel set_def_peer_eth_addrs(void)
776af75078fSIntel {
777af75078fSIntel 	portid_t i;
778af75078fSIntel 
779af75078fSIntel 	for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
78035b2d13fSOlivier Matz 		peer_eth_addrs[i].addr_bytes[0] = RTE_ETHER_LOCAL_ADMIN_ADDR;
781af75078fSIntel 		peer_eth_addrs[i].addr_bytes[5] = i;
782af75078fSIntel 	}
783af75078fSIntel }
784af75078fSIntel 
785af75078fSIntel static void
786af75078fSIntel set_default_fwd_ports_config(void)
787af75078fSIntel {
788af75078fSIntel 	portid_t pt_id;
78965a7360cSMatan Azrad 	int i = 0;
790af75078fSIntel 
791effdb8bbSPhil Yang 	RTE_ETH_FOREACH_DEV(pt_id) {
79265a7360cSMatan Azrad 		fwd_ports_ids[i++] = pt_id;
793af75078fSIntel 
794effdb8bbSPhil Yang 		/* Update sockets info according to the attached device */
795effdb8bbSPhil Yang 		int socket_id = rte_eth_dev_socket_id(pt_id);
796effdb8bbSPhil Yang 		if (socket_id >= 0 && new_socket_id(socket_id)) {
797effdb8bbSPhil Yang 			if (num_sockets >= RTE_MAX_NUMA_NODES) {
798effdb8bbSPhil Yang 				rte_exit(EXIT_FAILURE,
799effdb8bbSPhil Yang 					 "Total sockets greater than %u\n",
800effdb8bbSPhil Yang 					 RTE_MAX_NUMA_NODES);
801effdb8bbSPhil Yang 			}
802effdb8bbSPhil Yang 			socket_ids[num_sockets++] = socket_id;
803effdb8bbSPhil Yang 		}
804effdb8bbSPhil Yang 	}
805effdb8bbSPhil Yang 
806af75078fSIntel 	nb_cfg_ports = nb_ports;
807af75078fSIntel 	nb_fwd_ports = nb_ports;
808af75078fSIntel }
809af75078fSIntel 
810af75078fSIntel void
811af75078fSIntel set_def_fwd_config(void)
812af75078fSIntel {
813af75078fSIntel 	set_default_fwd_lcores_config();
814af75078fSIntel 	set_def_peer_eth_addrs();
815af75078fSIntel 	set_default_fwd_ports_config();
816af75078fSIntel }
817af75078fSIntel 
818761f7ae1SJie Zhou #ifndef RTE_EXEC_ENV_WINDOWS
819c7f5dba7SAnatoly Burakov /* extremely pessimistic estimation of memory required to create a mempool */
820c7f5dba7SAnatoly Burakov static int
821c7f5dba7SAnatoly Burakov calc_mem_size(uint32_t nb_mbufs, uint32_t mbuf_sz, size_t pgsz, size_t *out)
822c7f5dba7SAnatoly Burakov {
823c7f5dba7SAnatoly Burakov 	unsigned int n_pages, mbuf_per_pg, leftover;
824c7f5dba7SAnatoly Burakov 	uint64_t total_mem, mbuf_mem, obj_sz;
825c7f5dba7SAnatoly Burakov 
826c7f5dba7SAnatoly Burakov 	/* there is no good way to predict how much space the mempool will
827c7f5dba7SAnatoly Burakov 	 * occupy because it will allocate chunks on the fly, and some of those
828c7f5dba7SAnatoly Burakov 	 * will come from default DPDK memory while some will come from our
829c7f5dba7SAnatoly Burakov 	 * external memory, so just assume 128MB will be enough for everyone.
830c7f5dba7SAnatoly Burakov 	 */
831c7f5dba7SAnatoly Burakov 	uint64_t hdr_mem = 128 << 20;
832c7f5dba7SAnatoly Burakov 
833c7f5dba7SAnatoly Burakov 	/* account for possible non-contiguousness */
834c7f5dba7SAnatoly Burakov 	obj_sz = rte_mempool_calc_obj_size(mbuf_sz, 0, NULL);
835c7f5dba7SAnatoly Burakov 	if (obj_sz > pgsz) {
836c7f5dba7SAnatoly Burakov 		TESTPMD_LOG(ERR, "Object size is bigger than page size\n");
837c7f5dba7SAnatoly Burakov 		return -1;
838c7f5dba7SAnatoly Burakov 	}
839c7f5dba7SAnatoly Burakov 
840c7f5dba7SAnatoly Burakov 	mbuf_per_pg = pgsz / obj_sz;
841c7f5dba7SAnatoly Burakov 	leftover = (nb_mbufs % mbuf_per_pg) > 0;
842c7f5dba7SAnatoly Burakov 	n_pages = (nb_mbufs / mbuf_per_pg) + leftover;
843c7f5dba7SAnatoly Burakov 
844c7f5dba7SAnatoly Burakov 	mbuf_mem = n_pages * pgsz;
845c7f5dba7SAnatoly Burakov 
846c7f5dba7SAnatoly Burakov 	total_mem = RTE_ALIGN(hdr_mem + mbuf_mem, pgsz);
847c7f5dba7SAnatoly Burakov 
848c7f5dba7SAnatoly Burakov 	if (total_mem > SIZE_MAX) {
849c7f5dba7SAnatoly Burakov 		TESTPMD_LOG(ERR, "Memory size too big\n");
850c7f5dba7SAnatoly Burakov 		return -1;
851c7f5dba7SAnatoly Burakov 	}
852c7f5dba7SAnatoly Burakov 	*out = (size_t)total_mem;
853c7f5dba7SAnatoly Burakov 
854c7f5dba7SAnatoly Burakov 	return 0;
855c7f5dba7SAnatoly Burakov }
856c7f5dba7SAnatoly Burakov 
857c7f5dba7SAnatoly Burakov static int
858c7f5dba7SAnatoly Burakov pagesz_flags(uint64_t page_sz)
859c7f5dba7SAnatoly Burakov {
860c7f5dba7SAnatoly Burakov 	/* as per mmap() manpage, all page sizes are log2 of page size
861c7f5dba7SAnatoly Burakov 	 * shifted by MAP_HUGE_SHIFT
862c7f5dba7SAnatoly Burakov 	 */
8639d650537SAnatoly Burakov 	int log2 = rte_log2_u64(page_sz);
864c7f5dba7SAnatoly Burakov 
865c7f5dba7SAnatoly Burakov 	return (log2 << HUGE_SHIFT);
866c7f5dba7SAnatoly Burakov }
867c7f5dba7SAnatoly Burakov 
868c7f5dba7SAnatoly Burakov static void *
869c7f5dba7SAnatoly Burakov alloc_mem(size_t memsz, size_t pgsz, bool huge)
870c7f5dba7SAnatoly Burakov {
871c7f5dba7SAnatoly Burakov 	void *addr;
872c7f5dba7SAnatoly Burakov 	int flags;
873c7f5dba7SAnatoly Burakov 
874c7f5dba7SAnatoly Burakov 	/* allocate anonymous hugepages */
875c7f5dba7SAnatoly Burakov 	flags = MAP_ANONYMOUS | MAP_PRIVATE;
876c7f5dba7SAnatoly Burakov 	if (huge)
877c7f5dba7SAnatoly Burakov 		flags |= HUGE_FLAG | pagesz_flags(pgsz);
878c7f5dba7SAnatoly Burakov 
879c7f5dba7SAnatoly Burakov 	addr = mmap(NULL, memsz, PROT_READ | PROT_WRITE, flags, -1, 0);
880c7f5dba7SAnatoly Burakov 	if (addr == MAP_FAILED)
881c7f5dba7SAnatoly Burakov 		return NULL;
882c7f5dba7SAnatoly Burakov 
883c7f5dba7SAnatoly Burakov 	return addr;
884c7f5dba7SAnatoly Burakov }
885c7f5dba7SAnatoly Burakov 
886c7f5dba7SAnatoly Burakov struct extmem_param {
887c7f5dba7SAnatoly Burakov 	void *addr;
888c7f5dba7SAnatoly Burakov 	size_t len;
889c7f5dba7SAnatoly Burakov 	size_t pgsz;
890c7f5dba7SAnatoly Burakov 	rte_iova_t *iova_table;
891c7f5dba7SAnatoly Burakov 	unsigned int iova_table_len;
892c7f5dba7SAnatoly Burakov };
893c7f5dba7SAnatoly Burakov 
894c7f5dba7SAnatoly Burakov static int
895c7f5dba7SAnatoly Burakov create_extmem(uint32_t nb_mbufs, uint32_t mbuf_sz, struct extmem_param *param,
896c7f5dba7SAnatoly Burakov 		bool huge)
897c7f5dba7SAnatoly Burakov {
898c7f5dba7SAnatoly Burakov 	uint64_t pgsizes[] = {RTE_PGSIZE_2M, RTE_PGSIZE_1G, /* x86_64, ARM */
899c7f5dba7SAnatoly Burakov 			RTE_PGSIZE_16M, RTE_PGSIZE_16G};    /* POWER */
900c7f5dba7SAnatoly Burakov 	unsigned int cur_page, n_pages, pgsz_idx;
901c7f5dba7SAnatoly Burakov 	size_t mem_sz, cur_pgsz;
902c7f5dba7SAnatoly Burakov 	rte_iova_t *iovas = NULL;
903c7f5dba7SAnatoly Burakov 	void *addr;
904c7f5dba7SAnatoly Burakov 	int ret;
905c7f5dba7SAnatoly Burakov 
906c7f5dba7SAnatoly Burakov 	for (pgsz_idx = 0; pgsz_idx < RTE_DIM(pgsizes); pgsz_idx++) {
907c7f5dba7SAnatoly Burakov 		/* skip anything that is too big */
908c7f5dba7SAnatoly Burakov 		if (pgsizes[pgsz_idx] > SIZE_MAX)
909c7f5dba7SAnatoly Burakov 			continue;
910c7f5dba7SAnatoly Burakov 
911c7f5dba7SAnatoly Burakov 		cur_pgsz = pgsizes[pgsz_idx];
912c7f5dba7SAnatoly Burakov 
913c7f5dba7SAnatoly Burakov 		/* if we were told not to allocate hugepages, override */
914c7f5dba7SAnatoly Burakov 		if (!huge)
915c7f5dba7SAnatoly Burakov 			cur_pgsz = sysconf(_SC_PAGESIZE);
916c7f5dba7SAnatoly Burakov 
917c7f5dba7SAnatoly Burakov 		ret = calc_mem_size(nb_mbufs, mbuf_sz, cur_pgsz, &mem_sz);
918c7f5dba7SAnatoly Burakov 		if (ret < 0) {
919c7f5dba7SAnatoly Burakov 			TESTPMD_LOG(ERR, "Cannot calculate memory size\n");
920c7f5dba7SAnatoly Burakov 			return -1;
921c7f5dba7SAnatoly Burakov 		}
922c7f5dba7SAnatoly Burakov 
923c7f5dba7SAnatoly Burakov 		/* allocate our memory */
924c7f5dba7SAnatoly Burakov 		addr = alloc_mem(mem_sz, cur_pgsz, huge);
925c7f5dba7SAnatoly Burakov 
926c7f5dba7SAnatoly Burakov 		/* if we couldn't allocate memory with a specified page size,
927c7f5dba7SAnatoly Burakov 		 * that doesn't mean we can't do it with other page sizes, so
928c7f5dba7SAnatoly Burakov 		 * try another one.
929c7f5dba7SAnatoly Burakov 		 */
930c7f5dba7SAnatoly Burakov 		if (addr == NULL)
931c7f5dba7SAnatoly Burakov 			continue;
932c7f5dba7SAnatoly Burakov 
933c7f5dba7SAnatoly Burakov 		/* store IOVA addresses for every page in this memory area */
934c7f5dba7SAnatoly Burakov 		n_pages = mem_sz / cur_pgsz;
935c7f5dba7SAnatoly Burakov 
936c7f5dba7SAnatoly Burakov 		iovas = malloc(sizeof(*iovas) * n_pages);
937c7f5dba7SAnatoly Burakov 
938c7f5dba7SAnatoly Burakov 		if (iovas == NULL) {
939c7f5dba7SAnatoly Burakov 			TESTPMD_LOG(ERR, "Cannot allocate memory for iova addresses\n");
940c7f5dba7SAnatoly Burakov 			goto fail;
941c7f5dba7SAnatoly Burakov 		}
942c7f5dba7SAnatoly Burakov 		/* lock memory if it's not huge pages */
943c7f5dba7SAnatoly Burakov 		if (!huge)
944c7f5dba7SAnatoly Burakov 			mlock(addr, mem_sz);
945c7f5dba7SAnatoly Burakov 
946c7f5dba7SAnatoly Burakov 		/* populate IOVA addresses */
947c7f5dba7SAnatoly Burakov 		for (cur_page = 0; cur_page < n_pages; cur_page++) {
948c7f5dba7SAnatoly Burakov 			rte_iova_t iova;
949c7f5dba7SAnatoly Burakov 			size_t offset;
950c7f5dba7SAnatoly Burakov 			void *cur;
951c7f5dba7SAnatoly Burakov 
952c7f5dba7SAnatoly Burakov 			offset = cur_pgsz * cur_page;
953c7f5dba7SAnatoly Burakov 			cur = RTE_PTR_ADD(addr, offset);
954c7f5dba7SAnatoly Burakov 
955c7f5dba7SAnatoly Burakov 			/* touch the page before getting its IOVA */
956c7f5dba7SAnatoly Burakov 			*(volatile char *)cur = 0;
957c7f5dba7SAnatoly Burakov 
958c7f5dba7SAnatoly Burakov 			iova = rte_mem_virt2iova(cur);
959c7f5dba7SAnatoly Burakov 
960c7f5dba7SAnatoly Burakov 			iovas[cur_page] = iova;
961c7f5dba7SAnatoly Burakov 		}
962c7f5dba7SAnatoly Burakov 
963c7f5dba7SAnatoly Burakov 		break;
964c7f5dba7SAnatoly Burakov 	}
965c7f5dba7SAnatoly Burakov 	/* if we couldn't allocate anything */
966c7f5dba7SAnatoly Burakov 	if (iovas == NULL)
967c7f5dba7SAnatoly Burakov 		return -1;
968c7f5dba7SAnatoly Burakov 
969c7f5dba7SAnatoly Burakov 	param->addr = addr;
970c7f5dba7SAnatoly Burakov 	param->len = mem_sz;
971c7f5dba7SAnatoly Burakov 	param->pgsz = cur_pgsz;
972c7f5dba7SAnatoly Burakov 	param->iova_table = iovas;
973c7f5dba7SAnatoly Burakov 	param->iova_table_len = n_pages;
974c7f5dba7SAnatoly Burakov 
975c7f5dba7SAnatoly Burakov 	return 0;
976c7f5dba7SAnatoly Burakov fail:
977c7f5dba7SAnatoly Burakov 	free(iovas);
978c7f5dba7SAnatoly Burakov 	if (addr)
979c7f5dba7SAnatoly Burakov 		munmap(addr, mem_sz);
980c7f5dba7SAnatoly Burakov 
981c7f5dba7SAnatoly Burakov 	return -1;
982c7f5dba7SAnatoly Burakov }
983c7f5dba7SAnatoly Burakov 
984c7f5dba7SAnatoly Burakov static int
985c7f5dba7SAnatoly Burakov setup_extmem(uint32_t nb_mbufs, uint32_t mbuf_sz, bool huge)
986c7f5dba7SAnatoly Burakov {
987c7f5dba7SAnatoly Burakov 	struct extmem_param param;
988c7f5dba7SAnatoly Burakov 	int socket_id, ret;
989c7f5dba7SAnatoly Burakov 
990c7f5dba7SAnatoly Burakov 	memset(&param, 0, sizeof(param));
991c7f5dba7SAnatoly Burakov 
992c7f5dba7SAnatoly Burakov 	/* check if our heap exists */
993c7f5dba7SAnatoly Burakov 	socket_id = rte_malloc_heap_get_socket(EXTMEM_HEAP_NAME);
994c7f5dba7SAnatoly Burakov 	if (socket_id < 0) {
995c7f5dba7SAnatoly Burakov 		/* create our heap */
996c7f5dba7SAnatoly Burakov 		ret = rte_malloc_heap_create(EXTMEM_HEAP_NAME);
997c7f5dba7SAnatoly Burakov 		if (ret < 0) {
998c7f5dba7SAnatoly Burakov 			TESTPMD_LOG(ERR, "Cannot create heap\n");
999c7f5dba7SAnatoly Burakov 			return -1;
1000c7f5dba7SAnatoly Burakov 		}
1001c7f5dba7SAnatoly Burakov 	}
1002c7f5dba7SAnatoly Burakov 
1003c7f5dba7SAnatoly Burakov 	ret = create_extmem(nb_mbufs, mbuf_sz, &param, huge);
1004c7f5dba7SAnatoly Burakov 	if (ret < 0) {
1005c7f5dba7SAnatoly Burakov 		TESTPMD_LOG(ERR, "Cannot create memory area\n");
1006c7f5dba7SAnatoly Burakov 		return -1;
1007c7f5dba7SAnatoly Burakov 	}
1008c7f5dba7SAnatoly Burakov 
1009c7f5dba7SAnatoly Burakov 	/* we now have a valid memory area, so add it to heap */
1010c7f5dba7SAnatoly Burakov 	ret = rte_malloc_heap_memory_add(EXTMEM_HEAP_NAME,
1011c7f5dba7SAnatoly Burakov 			param.addr, param.len, param.iova_table,
1012c7f5dba7SAnatoly Burakov 			param.iova_table_len, param.pgsz);
1013c7f5dba7SAnatoly Burakov 
1014c7f5dba7SAnatoly Burakov 	/* when using VFIO, memory is automatically mapped for DMA by EAL */
1015c7f5dba7SAnatoly Burakov 
1016c7f5dba7SAnatoly Burakov 	/* not needed any more */
1017c7f5dba7SAnatoly Burakov 	free(param.iova_table);
1018c7f5dba7SAnatoly Burakov 
1019c7f5dba7SAnatoly Burakov 	if (ret < 0) {
1020c7f5dba7SAnatoly Burakov 		TESTPMD_LOG(ERR, "Cannot add memory to heap\n");
1021c7f5dba7SAnatoly Burakov 		munmap(param.addr, param.len);
1022c7f5dba7SAnatoly Burakov 		return -1;
1023c7f5dba7SAnatoly Burakov 	}
1024c7f5dba7SAnatoly Burakov 
1025c7f5dba7SAnatoly Burakov 	/* success */
1026c7f5dba7SAnatoly Burakov 
1027c7f5dba7SAnatoly Burakov 	TESTPMD_LOG(DEBUG, "Allocated %zuMB of external memory\n",
1028c7f5dba7SAnatoly Burakov 			param.len >> 20);
1029c7f5dba7SAnatoly Burakov 
1030c7f5dba7SAnatoly Burakov 	return 0;
1031c7f5dba7SAnatoly Burakov }
10323a0968c8SShahaf Shuler static void
10333a0968c8SShahaf Shuler dma_unmap_cb(struct rte_mempool *mp __rte_unused, void *opaque __rte_unused,
10343a0968c8SShahaf Shuler 	     struct rte_mempool_memhdr *memhdr, unsigned mem_idx __rte_unused)
10353a0968c8SShahaf Shuler {
10363a0968c8SShahaf Shuler 	uint16_t pid = 0;
10373a0968c8SShahaf Shuler 	int ret;
10383a0968c8SShahaf Shuler 
10393a0968c8SShahaf Shuler 	RTE_ETH_FOREACH_DEV(pid) {
10400a0821bcSPaulis Gributs 		struct rte_eth_dev_info dev_info;
10413a0968c8SShahaf Shuler 
10420a0821bcSPaulis Gributs 		ret = eth_dev_info_get_print_err(pid, &dev_info);
10430a0821bcSPaulis Gributs 		if (ret != 0) {
10440a0821bcSPaulis Gributs 			TESTPMD_LOG(DEBUG,
10450a0821bcSPaulis Gributs 				    "unable to get device info for port %d on addr 0x%p,"
10460a0821bcSPaulis Gributs 				    "mempool unmapping will not be performed\n",
10470a0821bcSPaulis Gributs 				    pid, memhdr->addr);
10480a0821bcSPaulis Gributs 			continue;
10490a0821bcSPaulis Gributs 		}
10500a0821bcSPaulis Gributs 
10510a0821bcSPaulis Gributs 		ret = rte_dev_dma_unmap(dev_info.device, memhdr->addr, 0, memhdr->len);
10523a0968c8SShahaf Shuler 		if (ret) {
10533a0968c8SShahaf Shuler 			TESTPMD_LOG(DEBUG,
10543a0968c8SShahaf Shuler 				    "unable to DMA unmap addr 0x%p "
10553a0968c8SShahaf Shuler 				    "for device %s\n",
10560a0821bcSPaulis Gributs 				    memhdr->addr, dev_info.device->name);
10573a0968c8SShahaf Shuler 		}
10583a0968c8SShahaf Shuler 	}
10593a0968c8SShahaf Shuler 	ret = rte_extmem_unregister(memhdr->addr, memhdr->len);
10603a0968c8SShahaf Shuler 	if (ret) {
10613a0968c8SShahaf Shuler 		TESTPMD_LOG(DEBUG,
10623a0968c8SShahaf Shuler 			    "unable to un-register addr 0x%p\n", memhdr->addr);
10633a0968c8SShahaf Shuler 	}
10643a0968c8SShahaf Shuler }
10653a0968c8SShahaf Shuler 
10663a0968c8SShahaf Shuler static void
10673a0968c8SShahaf Shuler dma_map_cb(struct rte_mempool *mp __rte_unused, void *opaque __rte_unused,
10683a0968c8SShahaf Shuler 	   struct rte_mempool_memhdr *memhdr, unsigned mem_idx __rte_unused)
10693a0968c8SShahaf Shuler {
10703a0968c8SShahaf Shuler 	uint16_t pid = 0;
10713a0968c8SShahaf Shuler 	size_t page_size = sysconf(_SC_PAGESIZE);
10723a0968c8SShahaf Shuler 	int ret;
10733a0968c8SShahaf Shuler 
10743a0968c8SShahaf Shuler 	ret = rte_extmem_register(memhdr->addr, memhdr->len, NULL, 0,
10753a0968c8SShahaf Shuler 				  page_size);
10763a0968c8SShahaf Shuler 	if (ret) {
10773a0968c8SShahaf Shuler 		TESTPMD_LOG(DEBUG,
10783a0968c8SShahaf Shuler 			    "unable to register addr 0x%p\n", memhdr->addr);
10793a0968c8SShahaf Shuler 		return;
10803a0968c8SShahaf Shuler 	}
10813a0968c8SShahaf Shuler 	RTE_ETH_FOREACH_DEV(pid) {
10820a0821bcSPaulis Gributs 		struct rte_eth_dev_info dev_info;
10833a0968c8SShahaf Shuler 
10840a0821bcSPaulis Gributs 		ret = eth_dev_info_get_print_err(pid, &dev_info);
10850a0821bcSPaulis Gributs 		if (ret != 0) {
10860a0821bcSPaulis Gributs 			TESTPMD_LOG(DEBUG,
10870a0821bcSPaulis Gributs 				    "unable to get device info for port %d on addr 0x%p,"
10880a0821bcSPaulis Gributs 				    "mempool mapping will not be performed\n",
10890a0821bcSPaulis Gributs 				    pid, memhdr->addr);
10900a0821bcSPaulis Gributs 			continue;
10910a0821bcSPaulis Gributs 		}
10920a0821bcSPaulis Gributs 		ret = rte_dev_dma_map(dev_info.device, memhdr->addr, 0, memhdr->len);
10933a0968c8SShahaf Shuler 		if (ret) {
10943a0968c8SShahaf Shuler 			TESTPMD_LOG(DEBUG,
10953a0968c8SShahaf Shuler 				    "unable to DMA map addr 0x%p "
10963a0968c8SShahaf Shuler 				    "for device %s\n",
10970a0821bcSPaulis Gributs 				    memhdr->addr, dev_info.device->name);
10983a0968c8SShahaf Shuler 		}
10993a0968c8SShahaf Shuler 	}
11003a0968c8SShahaf Shuler }
1101761f7ae1SJie Zhou #endif
1102c7f5dba7SAnatoly Burakov 
110372512e18SViacheslav Ovsiienko static unsigned int
110472512e18SViacheslav Ovsiienko setup_extbuf(uint32_t nb_mbufs, uint16_t mbuf_sz, unsigned int socket_id,
110572512e18SViacheslav Ovsiienko 	    char *pool_name, struct rte_pktmbuf_extmem **ext_mem)
110672512e18SViacheslav Ovsiienko {
110772512e18SViacheslav Ovsiienko 	struct rte_pktmbuf_extmem *xmem;
110872512e18SViacheslav Ovsiienko 	unsigned int ext_num, zone_num, elt_num;
110972512e18SViacheslav Ovsiienko 	uint16_t elt_size;
111072512e18SViacheslav Ovsiienko 
111172512e18SViacheslav Ovsiienko 	elt_size = RTE_ALIGN_CEIL(mbuf_sz, RTE_CACHE_LINE_SIZE);
111272512e18SViacheslav Ovsiienko 	elt_num = EXTBUF_ZONE_SIZE / elt_size;
111372512e18SViacheslav Ovsiienko 	zone_num = (nb_mbufs + elt_num - 1) / elt_num;
111472512e18SViacheslav Ovsiienko 
111572512e18SViacheslav Ovsiienko 	xmem = malloc(sizeof(struct rte_pktmbuf_extmem) * zone_num);
111672512e18SViacheslav Ovsiienko 	if (xmem == NULL) {
111772512e18SViacheslav Ovsiienko 		TESTPMD_LOG(ERR, "Cannot allocate memory for "
111872512e18SViacheslav Ovsiienko 				 "external buffer descriptors\n");
111972512e18SViacheslav Ovsiienko 		*ext_mem = NULL;
112072512e18SViacheslav Ovsiienko 		return 0;
112172512e18SViacheslav Ovsiienko 	}
112272512e18SViacheslav Ovsiienko 	for (ext_num = 0; ext_num < zone_num; ext_num++) {
112372512e18SViacheslav Ovsiienko 		struct rte_pktmbuf_extmem *xseg = xmem + ext_num;
112472512e18SViacheslav Ovsiienko 		const struct rte_memzone *mz;
112572512e18SViacheslav Ovsiienko 		char mz_name[RTE_MEMZONE_NAMESIZE];
112672512e18SViacheslav Ovsiienko 		int ret;
112772512e18SViacheslav Ovsiienko 
112872512e18SViacheslav Ovsiienko 		ret = snprintf(mz_name, sizeof(mz_name),
112972512e18SViacheslav Ovsiienko 			RTE_MEMPOOL_MZ_FORMAT "_xb_%u", pool_name, ext_num);
113072512e18SViacheslav Ovsiienko 		if (ret < 0 || ret >= (int)sizeof(mz_name)) {
113172512e18SViacheslav Ovsiienko 			errno = ENAMETOOLONG;
113272512e18SViacheslav Ovsiienko 			ext_num = 0;
113372512e18SViacheslav Ovsiienko 			break;
113472512e18SViacheslav Ovsiienko 		}
113513b19642SDmitry Kozlyuk 		mz = rte_memzone_reserve(mz_name, EXTBUF_ZONE_SIZE,
113672512e18SViacheslav Ovsiienko 					 socket_id,
113772512e18SViacheslav Ovsiienko 					 RTE_MEMZONE_IOVA_CONTIG |
113872512e18SViacheslav Ovsiienko 					 RTE_MEMZONE_1GB |
113913b19642SDmitry Kozlyuk 					 RTE_MEMZONE_SIZE_HINT_ONLY);
114072512e18SViacheslav Ovsiienko 		if (mz == NULL) {
114172512e18SViacheslav Ovsiienko 			/*
114272512e18SViacheslav Ovsiienko 			 * The caller exits on external buffer creation
114372512e18SViacheslav Ovsiienko 			 * error, so there is no need to free memzones.
114472512e18SViacheslav Ovsiienko 			 */
114572512e18SViacheslav Ovsiienko 			errno = ENOMEM;
114672512e18SViacheslav Ovsiienko 			ext_num = 0;
114772512e18SViacheslav Ovsiienko 			break;
114872512e18SViacheslav Ovsiienko 		}
114972512e18SViacheslav Ovsiienko 		xseg->buf_ptr = mz->addr;
115072512e18SViacheslav Ovsiienko 		xseg->buf_iova = mz->iova;
115172512e18SViacheslav Ovsiienko 		xseg->buf_len = EXTBUF_ZONE_SIZE;
115272512e18SViacheslav Ovsiienko 		xseg->elt_size = elt_size;
115372512e18SViacheslav Ovsiienko 	}
115472512e18SViacheslav Ovsiienko 	if (ext_num == 0 && xmem != NULL) {
115572512e18SViacheslav Ovsiienko 		free(xmem);
115672512e18SViacheslav Ovsiienko 		xmem = NULL;
115772512e18SViacheslav Ovsiienko 	}
115872512e18SViacheslav Ovsiienko 	*ext_mem = xmem;
115972512e18SViacheslav Ovsiienko 	return ext_num;
116072512e18SViacheslav Ovsiienko }
116172512e18SViacheslav Ovsiienko 
1162af75078fSIntel /*
1163af75078fSIntel  * Configuration initialisation done once at init time.
1164af75078fSIntel  */
1165401b744dSShahaf Shuler static struct rte_mempool *
1166af75078fSIntel mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
116726cbb419SViacheslav Ovsiienko 		 unsigned int socket_id, uint16_t size_idx)
1168af75078fSIntel {
1169af75078fSIntel 	char pool_name[RTE_MEMPOOL_NAMESIZE];
1170bece7b6cSChristian Ehrhardt 	struct rte_mempool *rte_mp = NULL;
1171761f7ae1SJie Zhou #ifndef RTE_EXEC_ENV_WINDOWS
1172af75078fSIntel 	uint32_t mb_size;
1173af75078fSIntel 
1174dfb03bbeSOlivier Matz 	mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size;
1175761f7ae1SJie Zhou #endif
117626cbb419SViacheslav Ovsiienko 	mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name), size_idx);
1177a550baf2SMin Hu (Connor) 	if (!is_proc_primary()) {
1178a550baf2SMin Hu (Connor) 		rte_mp = rte_mempool_lookup(pool_name);
1179a550baf2SMin Hu (Connor) 		if (rte_mp == NULL)
1180a550baf2SMin Hu (Connor) 			rte_exit(EXIT_FAILURE,
1181a550baf2SMin Hu (Connor) 				"Get mbuf pool for socket %u failed: %s\n",
1182a550baf2SMin Hu (Connor) 				socket_id, rte_strerror(rte_errno));
1183a550baf2SMin Hu (Connor) 		return rte_mp;
1184a550baf2SMin Hu (Connor) 	}
1185148f963fSBruce Richardson 
1186285fd101SOlivier Matz 	TESTPMD_LOG(INFO,
1187d1eb542eSOlivier Matz 		"create a new mbuf pool <%s>: n=%u, size=%u, socket=%u\n",
1188d1eb542eSOlivier Matz 		pool_name, nb_mbuf, mbuf_seg_size, socket_id);
1189d1eb542eSOlivier Matz 
1190c7f5dba7SAnatoly Burakov 	switch (mp_alloc_type) {
1191c7f5dba7SAnatoly Burakov 	case MP_ALLOC_NATIVE:
1192c7f5dba7SAnatoly Burakov 		{
1193c7f5dba7SAnatoly Burakov 			/* wrapper to rte_mempool_create() */
1194c7f5dba7SAnatoly Burakov 			TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n",
1195c7f5dba7SAnatoly Burakov 					rte_mbuf_best_mempool_ops());
1196c7f5dba7SAnatoly Burakov 			rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
1197c7f5dba7SAnatoly Burakov 				mb_mempool_cache, 0, mbuf_seg_size, socket_id);
1198c7f5dba7SAnatoly Burakov 			break;
1199c7f5dba7SAnatoly Burakov 		}
1200761f7ae1SJie Zhou #ifndef RTE_EXEC_ENV_WINDOWS
1201c7f5dba7SAnatoly Burakov 	case MP_ALLOC_ANON:
1202c7f5dba7SAnatoly Burakov 		{
1203b19a0c75SOlivier Matz 			rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf,
1204c7f5dba7SAnatoly Burakov 				mb_size, (unsigned int) mb_mempool_cache,
1205148f963fSBruce Richardson 				sizeof(struct rte_pktmbuf_pool_private),
120659fcf854SShahaf Shuler 				socket_id, mempool_flags);
120724427bb9SOlivier Matz 			if (rte_mp == NULL)
120824427bb9SOlivier Matz 				goto err;
1209b19a0c75SOlivier Matz 
1210b19a0c75SOlivier Matz 			if (rte_mempool_populate_anon(rte_mp) == 0) {
1211b19a0c75SOlivier Matz 				rte_mempool_free(rte_mp);
1212b19a0c75SOlivier Matz 				rte_mp = NULL;
121324427bb9SOlivier Matz 				goto err;
1214b19a0c75SOlivier Matz 			}
1215b19a0c75SOlivier Matz 			rte_pktmbuf_pool_init(rte_mp, NULL);
1216b19a0c75SOlivier Matz 			rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL);
12173a0968c8SShahaf Shuler 			rte_mempool_mem_iter(rte_mp, dma_map_cb, NULL);
1218c7f5dba7SAnatoly Burakov 			break;
1219c7f5dba7SAnatoly Burakov 		}
1220c7f5dba7SAnatoly Burakov 	case MP_ALLOC_XMEM:
1221c7f5dba7SAnatoly Burakov 	case MP_ALLOC_XMEM_HUGE:
1222c7f5dba7SAnatoly Burakov 		{
1223c7f5dba7SAnatoly Burakov 			int heap_socket;
1224c7f5dba7SAnatoly Burakov 			bool huge = mp_alloc_type == MP_ALLOC_XMEM_HUGE;
1225c7f5dba7SAnatoly Burakov 
1226c7f5dba7SAnatoly Burakov 			if (setup_extmem(nb_mbuf, mbuf_seg_size, huge) < 0)
1227c7f5dba7SAnatoly Burakov 				rte_exit(EXIT_FAILURE, "Could not create external memory\n");
1228c7f5dba7SAnatoly Burakov 
1229c7f5dba7SAnatoly Burakov 			heap_socket =
1230c7f5dba7SAnatoly Burakov 				rte_malloc_heap_get_socket(EXTMEM_HEAP_NAME);
1231c7f5dba7SAnatoly Burakov 			if (heap_socket < 0)
1232c7f5dba7SAnatoly Burakov 				rte_exit(EXIT_FAILURE, "Could not get external memory socket ID\n");
1233c7f5dba7SAnatoly Burakov 
12340e798567SPavan Nikhilesh 			TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n",
12350e798567SPavan Nikhilesh 					rte_mbuf_best_mempool_ops());
1236ea0c20eaSOlivier Matz 			rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
1237c7f5dba7SAnatoly Burakov 					mb_mempool_cache, 0, mbuf_seg_size,
1238c7f5dba7SAnatoly Burakov 					heap_socket);
1239c7f5dba7SAnatoly Burakov 			break;
1240c7f5dba7SAnatoly Burakov 		}
1241761f7ae1SJie Zhou #endif
124272512e18SViacheslav Ovsiienko 	case MP_ALLOC_XBUF:
124372512e18SViacheslav Ovsiienko 		{
124472512e18SViacheslav Ovsiienko 			struct rte_pktmbuf_extmem *ext_mem;
124572512e18SViacheslav Ovsiienko 			unsigned int ext_num;
124672512e18SViacheslav Ovsiienko 
124772512e18SViacheslav Ovsiienko 			ext_num = setup_extbuf(nb_mbuf,	mbuf_seg_size,
124872512e18SViacheslav Ovsiienko 					       socket_id, pool_name, &ext_mem);
124972512e18SViacheslav Ovsiienko 			if (ext_num == 0)
125072512e18SViacheslav Ovsiienko 				rte_exit(EXIT_FAILURE,
125172512e18SViacheslav Ovsiienko 					 "Can't create pinned data buffers\n");
125272512e18SViacheslav Ovsiienko 
125372512e18SViacheslav Ovsiienko 			TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n",
125472512e18SViacheslav Ovsiienko 					rte_mbuf_best_mempool_ops());
125572512e18SViacheslav Ovsiienko 			rte_mp = rte_pktmbuf_pool_create_extbuf
125672512e18SViacheslav Ovsiienko 					(pool_name, nb_mbuf, mb_mempool_cache,
125772512e18SViacheslav Ovsiienko 					 0, mbuf_seg_size, socket_id,
125872512e18SViacheslav Ovsiienko 					 ext_mem, ext_num);
125972512e18SViacheslav Ovsiienko 			free(ext_mem);
126072512e18SViacheslav Ovsiienko 			break;
126172512e18SViacheslav Ovsiienko 		}
1262c7f5dba7SAnatoly Burakov 	default:
1263c7f5dba7SAnatoly Burakov 		{
1264c7f5dba7SAnatoly Burakov 			rte_exit(EXIT_FAILURE, "Invalid mempool creation mode\n");
1265c7f5dba7SAnatoly Burakov 		}
1266bece7b6cSChristian Ehrhardt 	}
1267148f963fSBruce Richardson 
1268761f7ae1SJie Zhou #ifndef RTE_EXEC_ENV_WINDOWS
126924427bb9SOlivier Matz err:
1270761f7ae1SJie Zhou #endif
1271af75078fSIntel 	if (rte_mp == NULL) {
1272d1eb542eSOlivier Matz 		rte_exit(EXIT_FAILURE,
1273d1eb542eSOlivier Matz 			"Creation of mbuf pool for socket %u failed: %s\n",
1274d1eb542eSOlivier Matz 			socket_id, rte_strerror(rte_errno));
1275148f963fSBruce Richardson 	} else if (verbose_level > 0) {
1276591a9d79SStephen Hemminger 		rte_mempool_dump(stdout, rte_mp);
1277af75078fSIntel 	}
1278401b744dSShahaf Shuler 	return rte_mp;
1279af75078fSIntel }
1280af75078fSIntel 
128120a0286fSLiu Xiaofeng /*
128220a0286fSLiu Xiaofeng  * Check given socket id is valid or not with NUMA mode,
128320a0286fSLiu Xiaofeng  * if valid, return 0, else return -1
128420a0286fSLiu Xiaofeng  */
128520a0286fSLiu Xiaofeng static int
128620a0286fSLiu Xiaofeng check_socket_id(const unsigned int socket_id)
128720a0286fSLiu Xiaofeng {
128820a0286fSLiu Xiaofeng 	static int warning_once = 0;
128920a0286fSLiu Xiaofeng 
1290c9cafcc8SShahaf Shuler 	if (new_socket_id(socket_id)) {
129120a0286fSLiu Xiaofeng 		if (!warning_once && numa_support)
129261a3b0e5SAndrew Rybchenko 			fprintf(stderr,
129361a3b0e5SAndrew Rybchenko 				"Warning: NUMA should be configured manually by using --port-numa-config and --ring-numa-config parameters along with --numa.\n");
129420a0286fSLiu Xiaofeng 		warning_once = 1;
129520a0286fSLiu Xiaofeng 		return -1;
129620a0286fSLiu Xiaofeng 	}
129720a0286fSLiu Xiaofeng 	return 0;
129820a0286fSLiu Xiaofeng }
129920a0286fSLiu Xiaofeng 
13003f7311baSWei Dai /*
13013f7311baSWei Dai  * Get the allowed maximum number of RX queues.
13023f7311baSWei Dai  * *pid return the port id which has minimal value of
13033f7311baSWei Dai  * max_rx_queues in all ports.
13043f7311baSWei Dai  */
13053f7311baSWei Dai queueid_t
13063f7311baSWei Dai get_allowed_max_nb_rxq(portid_t *pid)
13073f7311baSWei Dai {
13089e6b36c3SDavid Marchand 	queueid_t allowed_max_rxq = RTE_MAX_QUEUES_PER_PORT;
13096f51deb9SIvan Ilchenko 	bool max_rxq_valid = false;
13103f7311baSWei Dai 	portid_t pi;
13113f7311baSWei Dai 	struct rte_eth_dev_info dev_info;
13123f7311baSWei Dai 
13133f7311baSWei Dai 	RTE_ETH_FOREACH_DEV(pi) {
13146f51deb9SIvan Ilchenko 		if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
13156f51deb9SIvan Ilchenko 			continue;
13166f51deb9SIvan Ilchenko 
13176f51deb9SIvan Ilchenko 		max_rxq_valid = true;
13183f7311baSWei Dai 		if (dev_info.max_rx_queues < allowed_max_rxq) {
13193f7311baSWei Dai 			allowed_max_rxq = dev_info.max_rx_queues;
13203f7311baSWei Dai 			*pid = pi;
13213f7311baSWei Dai 		}
13223f7311baSWei Dai 	}
13236f51deb9SIvan Ilchenko 	return max_rxq_valid ? allowed_max_rxq : 0;
13243f7311baSWei Dai }
13253f7311baSWei Dai 
13263f7311baSWei Dai /*
13273f7311baSWei Dai  * Check input rxq is valid or not.
13283f7311baSWei Dai  * If input rxq is not greater than any of maximum number
13293f7311baSWei Dai  * of RX queues of all ports, it is valid.
13303f7311baSWei Dai  * if valid, return 0, else return -1
13313f7311baSWei Dai  */
13323f7311baSWei Dai int
13333f7311baSWei Dai check_nb_rxq(queueid_t rxq)
13343f7311baSWei Dai {
13353f7311baSWei Dai 	queueid_t allowed_max_rxq;
13363f7311baSWei Dai 	portid_t pid = 0;
13373f7311baSWei Dai 
13383f7311baSWei Dai 	allowed_max_rxq = get_allowed_max_nb_rxq(&pid);
13393f7311baSWei Dai 	if (rxq > allowed_max_rxq) {
134061a3b0e5SAndrew Rybchenko 		fprintf(stderr,
134161a3b0e5SAndrew Rybchenko 			"Fail: input rxq (%u) can't be greater than max_rx_queues (%u) of port %u\n",
134261a3b0e5SAndrew Rybchenko 			rxq, allowed_max_rxq, pid);
13433f7311baSWei Dai 		return -1;
13443f7311baSWei Dai 	}
13453f7311baSWei Dai 	return 0;
13463f7311baSWei Dai }
13473f7311baSWei Dai 
134836db4f6cSWei Dai /*
134936db4f6cSWei Dai  * Get the allowed maximum number of TX queues.
135036db4f6cSWei Dai  * *pid return the port id which has minimal value of
135136db4f6cSWei Dai  * max_tx_queues in all ports.
135236db4f6cSWei Dai  */
135336db4f6cSWei Dai queueid_t
135436db4f6cSWei Dai get_allowed_max_nb_txq(portid_t *pid)
135536db4f6cSWei Dai {
13569e6b36c3SDavid Marchand 	queueid_t allowed_max_txq = RTE_MAX_QUEUES_PER_PORT;
13576f51deb9SIvan Ilchenko 	bool max_txq_valid = false;
135836db4f6cSWei Dai 	portid_t pi;
135936db4f6cSWei Dai 	struct rte_eth_dev_info dev_info;
136036db4f6cSWei Dai 
136136db4f6cSWei Dai 	RTE_ETH_FOREACH_DEV(pi) {
13626f51deb9SIvan Ilchenko 		if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
13636f51deb9SIvan Ilchenko 			continue;
13646f51deb9SIvan Ilchenko 
13656f51deb9SIvan Ilchenko 		max_txq_valid = true;
136636db4f6cSWei Dai 		if (dev_info.max_tx_queues < allowed_max_txq) {
136736db4f6cSWei Dai 			allowed_max_txq = dev_info.max_tx_queues;
136836db4f6cSWei Dai 			*pid = pi;
136936db4f6cSWei Dai 		}
137036db4f6cSWei Dai 	}
13716f51deb9SIvan Ilchenko 	return max_txq_valid ? allowed_max_txq : 0;
137236db4f6cSWei Dai }
137336db4f6cSWei Dai 
137436db4f6cSWei Dai /*
137536db4f6cSWei Dai  * Check input txq is valid or not.
137636db4f6cSWei Dai  * If input txq is not greater than any of maximum number
137736db4f6cSWei Dai  * of TX queues of all ports, it is valid.
137836db4f6cSWei Dai  * if valid, return 0, else return -1
137936db4f6cSWei Dai  */
138036db4f6cSWei Dai int
138136db4f6cSWei Dai check_nb_txq(queueid_t txq)
138236db4f6cSWei Dai {
138336db4f6cSWei Dai 	queueid_t allowed_max_txq;
138436db4f6cSWei Dai 	portid_t pid = 0;
138536db4f6cSWei Dai 
138636db4f6cSWei Dai 	allowed_max_txq = get_allowed_max_nb_txq(&pid);
138736db4f6cSWei Dai 	if (txq > allowed_max_txq) {
138861a3b0e5SAndrew Rybchenko 		fprintf(stderr,
138961a3b0e5SAndrew Rybchenko 			"Fail: input txq (%u) can't be greater than max_tx_queues (%u) of port %u\n",
139061a3b0e5SAndrew Rybchenko 			txq, allowed_max_txq, pid);
139136db4f6cSWei Dai 		return -1;
139236db4f6cSWei Dai 	}
139336db4f6cSWei Dai 	return 0;
139436db4f6cSWei Dai }
139536db4f6cSWei Dai 
13961c69df45SOri Kam /*
139799e040d3SLijun Ou  * Get the allowed maximum number of RXDs of every rx queue.
139899e040d3SLijun Ou  * *pid return the port id which has minimal value of
139999e040d3SLijun Ou  * max_rxd in all queues of all ports.
140099e040d3SLijun Ou  */
140199e040d3SLijun Ou static uint16_t
140299e040d3SLijun Ou get_allowed_max_nb_rxd(portid_t *pid)
140399e040d3SLijun Ou {
140499e040d3SLijun Ou 	uint16_t allowed_max_rxd = UINT16_MAX;
140599e040d3SLijun Ou 	portid_t pi;
140699e040d3SLijun Ou 	struct rte_eth_dev_info dev_info;
140799e040d3SLijun Ou 
140899e040d3SLijun Ou 	RTE_ETH_FOREACH_DEV(pi) {
140999e040d3SLijun Ou 		if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
141099e040d3SLijun Ou 			continue;
141199e040d3SLijun Ou 
141299e040d3SLijun Ou 		if (dev_info.rx_desc_lim.nb_max < allowed_max_rxd) {
141399e040d3SLijun Ou 			allowed_max_rxd = dev_info.rx_desc_lim.nb_max;
141499e040d3SLijun Ou 			*pid = pi;
141599e040d3SLijun Ou 		}
141699e040d3SLijun Ou 	}
141799e040d3SLijun Ou 	return allowed_max_rxd;
141899e040d3SLijun Ou }
141999e040d3SLijun Ou 
142099e040d3SLijun Ou /*
142199e040d3SLijun Ou  * Get the allowed minimal number of RXDs of every rx queue.
142299e040d3SLijun Ou  * *pid return the port id which has minimal value of
142399e040d3SLijun Ou  * min_rxd in all queues of all ports.
142499e040d3SLijun Ou  */
142599e040d3SLijun Ou static uint16_t
142699e040d3SLijun Ou get_allowed_min_nb_rxd(portid_t *pid)
142799e040d3SLijun Ou {
142899e040d3SLijun Ou 	uint16_t allowed_min_rxd = 0;
142999e040d3SLijun Ou 	portid_t pi;
143099e040d3SLijun Ou 	struct rte_eth_dev_info dev_info;
143199e040d3SLijun Ou 
143299e040d3SLijun Ou 	RTE_ETH_FOREACH_DEV(pi) {
143399e040d3SLijun Ou 		if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
143499e040d3SLijun Ou 			continue;
143599e040d3SLijun Ou 
143699e040d3SLijun Ou 		if (dev_info.rx_desc_lim.nb_min > allowed_min_rxd) {
143799e040d3SLijun Ou 			allowed_min_rxd = dev_info.rx_desc_lim.nb_min;
143899e040d3SLijun Ou 			*pid = pi;
143999e040d3SLijun Ou 		}
144099e040d3SLijun Ou 	}
144199e040d3SLijun Ou 
144299e040d3SLijun Ou 	return allowed_min_rxd;
144399e040d3SLijun Ou }
144499e040d3SLijun Ou 
144599e040d3SLijun Ou /*
144699e040d3SLijun Ou  * Check input rxd is valid or not.
144799e040d3SLijun Ou  * If input rxd is not greater than any of maximum number
144899e040d3SLijun Ou  * of RXDs of every Rx queues and is not less than any of
144999e040d3SLijun Ou  * minimal number of RXDs of every Rx queues, it is valid.
145099e040d3SLijun Ou  * if valid, return 0, else return -1
145199e040d3SLijun Ou  */
145299e040d3SLijun Ou int
145399e040d3SLijun Ou check_nb_rxd(queueid_t rxd)
145499e040d3SLijun Ou {
145599e040d3SLijun Ou 	uint16_t allowed_max_rxd;
145699e040d3SLijun Ou 	uint16_t allowed_min_rxd;
145799e040d3SLijun Ou 	portid_t pid = 0;
145899e040d3SLijun Ou 
145999e040d3SLijun Ou 	allowed_max_rxd = get_allowed_max_nb_rxd(&pid);
146099e040d3SLijun Ou 	if (rxd > allowed_max_rxd) {
146161a3b0e5SAndrew Rybchenko 		fprintf(stderr,
146261a3b0e5SAndrew Rybchenko 			"Fail: input rxd (%u) can't be greater than max_rxds (%u) of port %u\n",
146361a3b0e5SAndrew Rybchenko 			rxd, allowed_max_rxd, pid);
146499e040d3SLijun Ou 		return -1;
146599e040d3SLijun Ou 	}
146699e040d3SLijun Ou 
146799e040d3SLijun Ou 	allowed_min_rxd = get_allowed_min_nb_rxd(&pid);
146899e040d3SLijun Ou 	if (rxd < allowed_min_rxd) {
146961a3b0e5SAndrew Rybchenko 		fprintf(stderr,
147061a3b0e5SAndrew Rybchenko 			"Fail: input rxd (%u) can't be less than min_rxds (%u) of port %u\n",
147161a3b0e5SAndrew Rybchenko 			rxd, allowed_min_rxd, pid);
147299e040d3SLijun Ou 		return -1;
147399e040d3SLijun Ou 	}
147499e040d3SLijun Ou 
147599e040d3SLijun Ou 	return 0;
147699e040d3SLijun Ou }
147799e040d3SLijun Ou 
147899e040d3SLijun Ou /*
147999e040d3SLijun Ou  * Get the allowed maximum number of TXDs of every rx queues.
148099e040d3SLijun Ou  * *pid return the port id which has minimal value of
148199e040d3SLijun Ou  * max_txd in every tx queue.
148299e040d3SLijun Ou  */
148399e040d3SLijun Ou static uint16_t
148499e040d3SLijun Ou get_allowed_max_nb_txd(portid_t *pid)
148599e040d3SLijun Ou {
148699e040d3SLijun Ou 	uint16_t allowed_max_txd = UINT16_MAX;
148799e040d3SLijun Ou 	portid_t pi;
148899e040d3SLijun Ou 	struct rte_eth_dev_info dev_info;
148999e040d3SLijun Ou 
149099e040d3SLijun Ou 	RTE_ETH_FOREACH_DEV(pi) {
149199e040d3SLijun Ou 		if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
149299e040d3SLijun Ou 			continue;
149399e040d3SLijun Ou 
149499e040d3SLijun Ou 		if (dev_info.tx_desc_lim.nb_max < allowed_max_txd) {
149599e040d3SLijun Ou 			allowed_max_txd = dev_info.tx_desc_lim.nb_max;
149699e040d3SLijun Ou 			*pid = pi;
149799e040d3SLijun Ou 		}
149899e040d3SLijun Ou 	}
149999e040d3SLijun Ou 	return allowed_max_txd;
150099e040d3SLijun Ou }
150199e040d3SLijun Ou 
150299e040d3SLijun Ou /*
150399e040d3SLijun Ou  * Get the allowed maximum number of TXDs of every tx queues.
150499e040d3SLijun Ou  * *pid return the port id which has minimal value of
150599e040d3SLijun Ou  * min_txd in every tx queue.
150699e040d3SLijun Ou  */
150799e040d3SLijun Ou static uint16_t
150899e040d3SLijun Ou get_allowed_min_nb_txd(portid_t *pid)
150999e040d3SLijun Ou {
151099e040d3SLijun Ou 	uint16_t allowed_min_txd = 0;
151199e040d3SLijun Ou 	portid_t pi;
151299e040d3SLijun Ou 	struct rte_eth_dev_info dev_info;
151399e040d3SLijun Ou 
151499e040d3SLijun Ou 	RTE_ETH_FOREACH_DEV(pi) {
151599e040d3SLijun Ou 		if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
151699e040d3SLijun Ou 			continue;
151799e040d3SLijun Ou 
151899e040d3SLijun Ou 		if (dev_info.tx_desc_lim.nb_min > allowed_min_txd) {
151999e040d3SLijun Ou 			allowed_min_txd = dev_info.tx_desc_lim.nb_min;
152099e040d3SLijun Ou 			*pid = pi;
152199e040d3SLijun Ou 		}
152299e040d3SLijun Ou 	}
152399e040d3SLijun Ou 
152499e040d3SLijun Ou 	return allowed_min_txd;
152599e040d3SLijun Ou }
152699e040d3SLijun Ou 
152799e040d3SLijun Ou /*
152899e040d3SLijun Ou  * Check input txd is valid or not.
152999e040d3SLijun Ou  * If input txd is not greater than any of maximum number
153099e040d3SLijun Ou  * of TXDs of every Rx queues, it is valid.
153199e040d3SLijun Ou  * if valid, return 0, else return -1
153299e040d3SLijun Ou  */
153399e040d3SLijun Ou int
153499e040d3SLijun Ou check_nb_txd(queueid_t txd)
153599e040d3SLijun Ou {
153699e040d3SLijun Ou 	uint16_t allowed_max_txd;
153799e040d3SLijun Ou 	uint16_t allowed_min_txd;
153899e040d3SLijun Ou 	portid_t pid = 0;
153999e040d3SLijun Ou 
154099e040d3SLijun Ou 	allowed_max_txd = get_allowed_max_nb_txd(&pid);
154199e040d3SLijun Ou 	if (txd > allowed_max_txd) {
154261a3b0e5SAndrew Rybchenko 		fprintf(stderr,
154361a3b0e5SAndrew Rybchenko 			"Fail: input txd (%u) can't be greater than max_txds (%u) of port %u\n",
154461a3b0e5SAndrew Rybchenko 			txd, allowed_max_txd, pid);
154599e040d3SLijun Ou 		return -1;
154699e040d3SLijun Ou 	}
154799e040d3SLijun Ou 
154899e040d3SLijun Ou 	allowed_min_txd = get_allowed_min_nb_txd(&pid);
154999e040d3SLijun Ou 	if (txd < allowed_min_txd) {
155061a3b0e5SAndrew Rybchenko 		fprintf(stderr,
155161a3b0e5SAndrew Rybchenko 			"Fail: input txd (%u) can't be less than min_txds (%u) of port %u\n",
155261a3b0e5SAndrew Rybchenko 			txd, allowed_min_txd, pid);
155399e040d3SLijun Ou 		return -1;
155499e040d3SLijun Ou 	}
155599e040d3SLijun Ou 	return 0;
155699e040d3SLijun Ou }
155799e040d3SLijun Ou 
155899e040d3SLijun Ou 
155999e040d3SLijun Ou /*
15601c69df45SOri Kam  * Get the allowed maximum number of hairpin queues.
15611c69df45SOri Kam  * *pid return the port id which has minimal value of
15621c69df45SOri Kam  * max_hairpin_queues in all ports.
15631c69df45SOri Kam  */
15641c69df45SOri Kam queueid_t
15651c69df45SOri Kam get_allowed_max_nb_hairpinq(portid_t *pid)
15661c69df45SOri Kam {
15679e6b36c3SDavid Marchand 	queueid_t allowed_max_hairpinq = RTE_MAX_QUEUES_PER_PORT;
15681c69df45SOri Kam 	portid_t pi;
15691c69df45SOri Kam 	struct rte_eth_hairpin_cap cap;
15701c69df45SOri Kam 
15711c69df45SOri Kam 	RTE_ETH_FOREACH_DEV(pi) {
15721c69df45SOri Kam 		if (rte_eth_dev_hairpin_capability_get(pi, &cap) != 0) {
15731c69df45SOri Kam 			*pid = pi;
15741c69df45SOri Kam 			return 0;
15751c69df45SOri Kam 		}
15761c69df45SOri Kam 		if (cap.max_nb_queues < allowed_max_hairpinq) {
15771c69df45SOri Kam 			allowed_max_hairpinq = cap.max_nb_queues;
15781c69df45SOri Kam 			*pid = pi;
15791c69df45SOri Kam 		}
15801c69df45SOri Kam 	}
15811c69df45SOri Kam 	return allowed_max_hairpinq;
15821c69df45SOri Kam }
15831c69df45SOri Kam 
15841c69df45SOri Kam /*
15851c69df45SOri Kam  * Check input hairpin is valid or not.
15861c69df45SOri Kam  * If input hairpin is not greater than any of maximum number
15871c69df45SOri Kam  * of hairpin queues of all ports, it is valid.
15881c69df45SOri Kam  * if valid, return 0, else return -1
15891c69df45SOri Kam  */
15901c69df45SOri Kam int
15911c69df45SOri Kam check_nb_hairpinq(queueid_t hairpinq)
15921c69df45SOri Kam {
15931c69df45SOri Kam 	queueid_t allowed_max_hairpinq;
15941c69df45SOri Kam 	portid_t pid = 0;
15951c69df45SOri Kam 
15961c69df45SOri Kam 	allowed_max_hairpinq = get_allowed_max_nb_hairpinq(&pid);
15971c69df45SOri Kam 	if (hairpinq > allowed_max_hairpinq) {
159861a3b0e5SAndrew Rybchenko 		fprintf(stderr,
159961a3b0e5SAndrew Rybchenko 			"Fail: input hairpin (%u) can't be greater than max_hairpin_queues (%u) of port %u\n",
16001c69df45SOri Kam 			hairpinq, allowed_max_hairpinq, pid);
16011c69df45SOri Kam 		return -1;
16021c69df45SOri Kam 	}
16031c69df45SOri Kam 	return 0;
16041c69df45SOri Kam }
16051c69df45SOri Kam 
16061bb4a528SFerruh Yigit static int
16071bb4a528SFerruh Yigit get_eth_overhead(struct rte_eth_dev_info *dev_info)
16081bb4a528SFerruh Yigit {
16091bb4a528SFerruh Yigit 	uint32_t eth_overhead;
16101bb4a528SFerruh Yigit 
16111bb4a528SFerruh Yigit 	if (dev_info->max_mtu != UINT16_MAX &&
16121bb4a528SFerruh Yigit 	    dev_info->max_rx_pktlen > dev_info->max_mtu)
16131bb4a528SFerruh Yigit 		eth_overhead = dev_info->max_rx_pktlen - dev_info->max_mtu;
16141bb4a528SFerruh Yigit 	else
16151bb4a528SFerruh Yigit 		eth_overhead = RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
16161bb4a528SFerruh Yigit 
16171bb4a528SFerruh Yigit 	return eth_overhead;
16181bb4a528SFerruh Yigit }
16191bb4a528SFerruh Yigit 
1620af75078fSIntel static void
1621b6b8a1ebSViacheslav Ovsiienko init_config_port_offloads(portid_t pid, uint32_t socket_id)
1622b6b8a1ebSViacheslav Ovsiienko {
1623b6b8a1ebSViacheslav Ovsiienko 	struct rte_port *port = &ports[pid];
1624b6b8a1ebSViacheslav Ovsiienko 	int ret;
1625b6b8a1ebSViacheslav Ovsiienko 	int i;
1626b6b8a1ebSViacheslav Ovsiienko 
1627f6d8a6d3SIvan Malov 	eth_rx_metadata_negotiate_mp(pid);
1628f6d8a6d3SIvan Malov 
1629b6b8a1ebSViacheslav Ovsiienko 	port->dev_conf.txmode = tx_mode;
1630b6b8a1ebSViacheslav Ovsiienko 	port->dev_conf.rxmode = rx_mode;
1631b6b8a1ebSViacheslav Ovsiienko 
1632b6b8a1ebSViacheslav Ovsiienko 	ret = eth_dev_info_get_print_err(pid, &port->dev_info);
1633b6b8a1ebSViacheslav Ovsiienko 	if (ret != 0)
1634b6b8a1ebSViacheslav Ovsiienko 		rte_exit(EXIT_FAILURE, "rte_eth_dev_info_get() failed\n");
1635b6b8a1ebSViacheslav Ovsiienko 
1636295968d1SFerruh Yigit 	if (!(port->dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE))
1637b6b8a1ebSViacheslav Ovsiienko 		port->dev_conf.txmode.offloads &=
1638295968d1SFerruh Yigit 			~RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
1639b6b8a1ebSViacheslav Ovsiienko 
1640b6b8a1ebSViacheslav Ovsiienko 	/* Apply Rx offloads configuration */
1641b6b8a1ebSViacheslav Ovsiienko 	for (i = 0; i < port->dev_info.max_rx_queues; i++)
16423c4426dbSDmitry Kozlyuk 		port->rxq[i].conf.offloads = port->dev_conf.rxmode.offloads;
1643b6b8a1ebSViacheslav Ovsiienko 	/* Apply Tx offloads configuration */
1644b6b8a1ebSViacheslav Ovsiienko 	for (i = 0; i < port->dev_info.max_tx_queues; i++)
16453c4426dbSDmitry Kozlyuk 		port->txq[i].conf.offloads = port->dev_conf.txmode.offloads;
1646b6b8a1ebSViacheslav Ovsiienko 
1647b6b8a1ebSViacheslav Ovsiienko 	if (eth_link_speed)
1648b6b8a1ebSViacheslav Ovsiienko 		port->dev_conf.link_speeds = eth_link_speed;
1649b6b8a1ebSViacheslav Ovsiienko 
16501bb4a528SFerruh Yigit 	if (max_rx_pkt_len)
16511bb4a528SFerruh Yigit 		port->dev_conf.rxmode.mtu = max_rx_pkt_len -
16521bb4a528SFerruh Yigit 			get_eth_overhead(&port->dev_info);
16531bb4a528SFerruh Yigit 
1654b6b8a1ebSViacheslav Ovsiienko 	/* set flag to initialize port/queue */
1655b6b8a1ebSViacheslav Ovsiienko 	port->need_reconfig = 1;
1656b6b8a1ebSViacheslav Ovsiienko 	port->need_reconfig_queues = 1;
1657b6b8a1ebSViacheslav Ovsiienko 	port->socket_id = socket_id;
1658b6b8a1ebSViacheslav Ovsiienko 	port->tx_metadata = 0;
1659b6b8a1ebSViacheslav Ovsiienko 
1660b6b8a1ebSViacheslav Ovsiienko 	/*
1661b6b8a1ebSViacheslav Ovsiienko 	 * Check for maximum number of segments per MTU.
1662b6b8a1ebSViacheslav Ovsiienko 	 * Accordingly update the mbuf data size.
1663b6b8a1ebSViacheslav Ovsiienko 	 */
1664b6b8a1ebSViacheslav Ovsiienko 	if (port->dev_info.rx_desc_lim.nb_mtu_seg_max != UINT16_MAX &&
1665b6b8a1ebSViacheslav Ovsiienko 	    port->dev_info.rx_desc_lim.nb_mtu_seg_max != 0) {
16661bb4a528SFerruh Yigit 		uint32_t eth_overhead = get_eth_overhead(&port->dev_info);
16671bb4a528SFerruh Yigit 		uint16_t mtu;
1668b6b8a1ebSViacheslav Ovsiienko 
16691bb4a528SFerruh Yigit 		if (rte_eth_dev_get_mtu(pid, &mtu) == 0) {
16701bb4a528SFerruh Yigit 			uint16_t data_size = (mtu + eth_overhead) /
16711bb4a528SFerruh Yigit 				port->dev_info.rx_desc_lim.nb_mtu_seg_max;
16721bb4a528SFerruh Yigit 			uint16_t buffer_size = data_size + RTE_PKTMBUF_HEADROOM;
16731bb4a528SFerruh Yigit 
16741bb4a528SFerruh Yigit 			if (buffer_size > mbuf_data_size[0]) {
16751bb4a528SFerruh Yigit 				mbuf_data_size[0] = buffer_size;
1676b6b8a1ebSViacheslav Ovsiienko 				TESTPMD_LOG(WARNING,
1677b6b8a1ebSViacheslav Ovsiienko 					"Configured mbuf size of the first segment %hu\n",
1678b6b8a1ebSViacheslav Ovsiienko 					mbuf_data_size[0]);
1679b6b8a1ebSViacheslav Ovsiienko 			}
1680b6b8a1ebSViacheslav Ovsiienko 		}
1681b6b8a1ebSViacheslav Ovsiienko 	}
16821bb4a528SFerruh Yigit }
1683b6b8a1ebSViacheslav Ovsiienko 
1684b6b8a1ebSViacheslav Ovsiienko static void
1685af75078fSIntel init_config(void)
1686af75078fSIntel {
1687ce8d5614SIntel 	portid_t pid;
1688af75078fSIntel 	struct rte_mempool *mbp;
1689af75078fSIntel 	unsigned int nb_mbuf_per_pool;
1690af75078fSIntel 	lcoreid_t  lc_id;
16916970401eSDavid Marchand #ifdef RTE_LIB_GRO
1692b7091f1dSJiayu Hu 	struct rte_gro_param gro_param;
16936970401eSDavid Marchand #endif
16946970401eSDavid Marchand #ifdef RTE_LIB_GSO
169552f38a20SJiayu Hu 	uint32_t gso_types;
16966970401eSDavid Marchand #endif
1697487f9a59SYulong Pei 
1698af75078fSIntel 	/* Configuration of logical cores. */
1699af75078fSIntel 	fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
1700af75078fSIntel 				sizeof(struct fwd_lcore *) * nb_lcores,
1701fdf20fa7SSergio Gonzalez Monroy 				RTE_CACHE_LINE_SIZE);
1702af75078fSIntel 	if (fwd_lcores == NULL) {
1703ce8d5614SIntel 		rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
1704ce8d5614SIntel 							"failed\n", nb_lcores);
1705af75078fSIntel 	}
1706af75078fSIntel 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
1707af75078fSIntel 		fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
1708af75078fSIntel 					       sizeof(struct fwd_lcore),
1709fdf20fa7SSergio Gonzalez Monroy 					       RTE_CACHE_LINE_SIZE);
1710af75078fSIntel 		if (fwd_lcores[lc_id] == NULL) {
1711ce8d5614SIntel 			rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
1712ce8d5614SIntel 								"failed\n");
1713af75078fSIntel 		}
1714af75078fSIntel 		fwd_lcores[lc_id]->cpuid_idx = lc_id;
1715af75078fSIntel 	}
1716af75078fSIntel 
17177d89b261SGaetan Rivet 	RTE_ETH_FOREACH_DEV(pid) {
1718b6b8a1ebSViacheslav Ovsiienko 		uint32_t socket_id;
17196f51deb9SIvan Ilchenko 
1720b6ea6408SIntel 		if (numa_support) {
1721b6b8a1ebSViacheslav Ovsiienko 			socket_id = port_numa[pid];
1722b6b8a1ebSViacheslav Ovsiienko 			if (port_numa[pid] == NUMA_NO_CONFIG) {
1723b6b8a1ebSViacheslav Ovsiienko 				socket_id = rte_eth_dev_socket_id(pid);
172420a0286fSLiu Xiaofeng 
172529841336SPhil Yang 				/*
172629841336SPhil Yang 				 * if socket_id is invalid,
172729841336SPhil Yang 				 * set to the first available socket.
172829841336SPhil Yang 				 */
172920a0286fSLiu Xiaofeng 				if (check_socket_id(socket_id) < 0)
173029841336SPhil Yang 					socket_id = socket_ids[0];
1731b6ea6408SIntel 			}
1732b6b8a1ebSViacheslav Ovsiienko 		} else {
1733b6b8a1ebSViacheslav Ovsiienko 			socket_id = (socket_num == UMA_NO_CONFIG) ?
1734b6b8a1ebSViacheslav Ovsiienko 				    0 : socket_num;
1735b6ea6408SIntel 		}
1736b6b8a1ebSViacheslav Ovsiienko 		/* Apply default TxRx configuration for all ports */
1737b6b8a1ebSViacheslav Ovsiienko 		init_config_port_offloads(pid, socket_id);
1738ce8d5614SIntel 	}
17393ab64341SOlivier Matz 	/*
17403ab64341SOlivier Matz 	 * Create pools of mbuf.
17413ab64341SOlivier Matz 	 * If NUMA support is disabled, create a single pool of mbuf in
17423ab64341SOlivier Matz 	 * socket 0 memory by default.
17433ab64341SOlivier Matz 	 * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
17443ab64341SOlivier Matz 	 *
17453ab64341SOlivier Matz 	 * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
17463ab64341SOlivier Matz 	 * nb_txd can be configured at run time.
17473ab64341SOlivier Matz 	 */
17483ab64341SOlivier Matz 	if (param_total_num_mbufs)
17493ab64341SOlivier Matz 		nb_mbuf_per_pool = param_total_num_mbufs;
17503ab64341SOlivier Matz 	else {
17513ab64341SOlivier Matz 		nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX +
17523ab64341SOlivier Matz 			(nb_lcores * mb_mempool_cache) +
17533ab64341SOlivier Matz 			RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
17543ab64341SOlivier Matz 		nb_mbuf_per_pool *= RTE_MAX_ETHPORTS;
17553ab64341SOlivier Matz 	}
17563ab64341SOlivier Matz 
1757b6ea6408SIntel 	if (numa_support) {
175826cbb419SViacheslav Ovsiienko 		uint8_t i, j;
1759ce8d5614SIntel 
1760c9cafcc8SShahaf Shuler 		for (i = 0; i < num_sockets; i++)
176126cbb419SViacheslav Ovsiienko 			for (j = 0; j < mbuf_data_size_n; j++)
176226cbb419SViacheslav Ovsiienko 				mempools[i * MAX_SEGS_BUFFER_SPLIT + j] =
176326cbb419SViacheslav Ovsiienko 					mbuf_pool_create(mbuf_data_size[j],
1764401b744dSShahaf Shuler 							  nb_mbuf_per_pool,
176526cbb419SViacheslav Ovsiienko 							  socket_ids[i], j);
17663ab64341SOlivier Matz 	} else {
176726cbb419SViacheslav Ovsiienko 		uint8_t i;
176826cbb419SViacheslav Ovsiienko 
176926cbb419SViacheslav Ovsiienko 		for (i = 0; i < mbuf_data_size_n; i++)
177026cbb419SViacheslav Ovsiienko 			mempools[i] = mbuf_pool_create
177126cbb419SViacheslav Ovsiienko 					(mbuf_data_size[i],
1772401b744dSShahaf Shuler 					 nb_mbuf_per_pool,
177326cbb419SViacheslav Ovsiienko 					 socket_num == UMA_NO_CONFIG ?
177426cbb419SViacheslav Ovsiienko 					 0 : socket_num, i);
17753ab64341SOlivier Matz 	}
1776b6ea6408SIntel 
1777b6ea6408SIntel 	init_port_config();
17785886ae07SAdrien Mazarguil 
17796970401eSDavid Marchand #ifdef RTE_LIB_GSO
1780295968d1SFerruh Yigit 	gso_types = RTE_ETH_TX_OFFLOAD_TCP_TSO | RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
1781295968d1SFerruh Yigit 		RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO | RTE_ETH_TX_OFFLOAD_UDP_TSO;
17826970401eSDavid Marchand #endif
17835886ae07SAdrien Mazarguil 	/*
17845886ae07SAdrien Mazarguil 	 * Records which Mbuf pool to use by each logical core, if needed.
17855886ae07SAdrien Mazarguil 	 */
17865886ae07SAdrien Mazarguil 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
17878fd8bebcSAdrien Mazarguil 		mbp = mbuf_pool_find(
178826cbb419SViacheslav Ovsiienko 			rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]), 0);
17898fd8bebcSAdrien Mazarguil 
17905886ae07SAdrien Mazarguil 		if (mbp == NULL)
179126cbb419SViacheslav Ovsiienko 			mbp = mbuf_pool_find(0, 0);
17925886ae07SAdrien Mazarguil 		fwd_lcores[lc_id]->mbp = mbp;
17936970401eSDavid Marchand #ifdef RTE_LIB_GSO
179452f38a20SJiayu Hu 		/* initialize GSO context */
179552f38a20SJiayu Hu 		fwd_lcores[lc_id]->gso_ctx.direct_pool = mbp;
179652f38a20SJiayu Hu 		fwd_lcores[lc_id]->gso_ctx.indirect_pool = mbp;
179752f38a20SJiayu Hu 		fwd_lcores[lc_id]->gso_ctx.gso_types = gso_types;
179835b2d13fSOlivier Matz 		fwd_lcores[lc_id]->gso_ctx.gso_size = RTE_ETHER_MAX_LEN -
179935b2d13fSOlivier Matz 			RTE_ETHER_CRC_LEN;
180052f38a20SJiayu Hu 		fwd_lcores[lc_id]->gso_ctx.flag = 0;
18016970401eSDavid Marchand #endif
18025886ae07SAdrien Mazarguil 	}
18035886ae07SAdrien Mazarguil 
18040c0db76fSBernard Iremonger 	fwd_config_setup();
1805b7091f1dSJiayu Hu 
18066970401eSDavid Marchand #ifdef RTE_LIB_GRO
1807b7091f1dSJiayu Hu 	/* create a gro context for each lcore */
1808b7091f1dSJiayu Hu 	gro_param.gro_types = RTE_GRO_TCP_IPV4;
1809b7091f1dSJiayu Hu 	gro_param.max_flow_num = GRO_MAX_FLUSH_CYCLES;
1810b7091f1dSJiayu Hu 	gro_param.max_item_per_flow = MAX_PKT_BURST;
1811b7091f1dSJiayu Hu 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
1812b7091f1dSJiayu Hu 		gro_param.socket_id = rte_lcore_to_socket_id(
1813b7091f1dSJiayu Hu 				fwd_lcores_cpuids[lc_id]);
1814b7091f1dSJiayu Hu 		fwd_lcores[lc_id]->gro_ctx = rte_gro_ctx_create(&gro_param);
1815b7091f1dSJiayu Hu 		if (fwd_lcores[lc_id]->gro_ctx == NULL) {
1816b7091f1dSJiayu Hu 			rte_exit(EXIT_FAILURE,
1817b7091f1dSJiayu Hu 					"rte_gro_ctx_create() failed\n");
1818b7091f1dSJiayu Hu 		}
1819b7091f1dSJiayu Hu 	}
18206970401eSDavid Marchand #endif
1821ce8d5614SIntel }
1822ce8d5614SIntel 
18232950a769SDeclan Doherty 
18242950a769SDeclan Doherty void
1825a21d5a4bSDeclan Doherty reconfig(portid_t new_port_id, unsigned socket_id)
18262950a769SDeclan Doherty {
18272950a769SDeclan Doherty 	/* Reconfiguration of Ethernet ports. */
1828b6b8a1ebSViacheslav Ovsiienko 	init_config_port_offloads(new_port_id, socket_id);
18292950a769SDeclan Doherty 	init_port_config();
18302950a769SDeclan Doherty }
18312950a769SDeclan Doherty 
1832ce8d5614SIntel int
1833ce8d5614SIntel init_fwd_streams(void)
1834ce8d5614SIntel {
1835ce8d5614SIntel 	portid_t pid;
1836ce8d5614SIntel 	struct rte_port *port;
1837ce8d5614SIntel 	streamid_t sm_id, nb_fwd_streams_new;
18385a8fb55cSReshma Pattan 	queueid_t q;
1839ce8d5614SIntel 
1840ce8d5614SIntel 	/* set socket id according to numa or not */
18417d89b261SGaetan Rivet 	RTE_ETH_FOREACH_DEV(pid) {
1842ce8d5614SIntel 		port = &ports[pid];
1843ce8d5614SIntel 		if (nb_rxq > port->dev_info.max_rx_queues) {
184461a3b0e5SAndrew Rybchenko 			fprintf(stderr,
184561a3b0e5SAndrew Rybchenko 				"Fail: nb_rxq(%d) is greater than max_rx_queues(%d)\n",
184661a3b0e5SAndrew Rybchenko 				nb_rxq, port->dev_info.max_rx_queues);
1847ce8d5614SIntel 			return -1;
1848ce8d5614SIntel 		}
1849ce8d5614SIntel 		if (nb_txq > port->dev_info.max_tx_queues) {
185061a3b0e5SAndrew Rybchenko 			fprintf(stderr,
185161a3b0e5SAndrew Rybchenko 				"Fail: nb_txq(%d) is greater than max_tx_queues(%d)\n",
185261a3b0e5SAndrew Rybchenko 				nb_txq, port->dev_info.max_tx_queues);
1853ce8d5614SIntel 			return -1;
1854ce8d5614SIntel 		}
185520a0286fSLiu Xiaofeng 		if (numa_support) {
185620a0286fSLiu Xiaofeng 			if (port_numa[pid] != NUMA_NO_CONFIG)
185720a0286fSLiu Xiaofeng 				port->socket_id = port_numa[pid];
185820a0286fSLiu Xiaofeng 			else {
1859b6ea6408SIntel 				port->socket_id = rte_eth_dev_socket_id(pid);
186020a0286fSLiu Xiaofeng 
186129841336SPhil Yang 				/*
186229841336SPhil Yang 				 * if socket_id is invalid,
186329841336SPhil Yang 				 * set to the first available socket.
186429841336SPhil Yang 				 */
186520a0286fSLiu Xiaofeng 				if (check_socket_id(port->socket_id) < 0)
186629841336SPhil Yang 					port->socket_id = socket_ids[0];
186720a0286fSLiu Xiaofeng 			}
186820a0286fSLiu Xiaofeng 		}
1869b6ea6408SIntel 		else {
1870b6ea6408SIntel 			if (socket_num == UMA_NO_CONFIG)
1871af75078fSIntel 				port->socket_id = 0;
1872b6ea6408SIntel 			else
1873b6ea6408SIntel 				port->socket_id = socket_num;
1874b6ea6408SIntel 		}
1875af75078fSIntel 	}
1876af75078fSIntel 
18775a8fb55cSReshma Pattan 	q = RTE_MAX(nb_rxq, nb_txq);
18785a8fb55cSReshma Pattan 	if (q == 0) {
187961a3b0e5SAndrew Rybchenko 		fprintf(stderr,
188061a3b0e5SAndrew Rybchenko 			"Fail: Cannot allocate fwd streams as number of queues is 0\n");
18815a8fb55cSReshma Pattan 		return -1;
18825a8fb55cSReshma Pattan 	}
18835a8fb55cSReshma Pattan 	nb_fwd_streams_new = (streamid_t)(nb_ports * q);
1884ce8d5614SIntel 	if (nb_fwd_streams_new == nb_fwd_streams)
1885ce8d5614SIntel 		return 0;
1886ce8d5614SIntel 	/* clear the old */
1887ce8d5614SIntel 	if (fwd_streams != NULL) {
1888ce8d5614SIntel 		for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
1889ce8d5614SIntel 			if (fwd_streams[sm_id] == NULL)
1890ce8d5614SIntel 				continue;
1891ce8d5614SIntel 			rte_free(fwd_streams[sm_id]);
1892ce8d5614SIntel 			fwd_streams[sm_id] = NULL;
1893af75078fSIntel 		}
1894ce8d5614SIntel 		rte_free(fwd_streams);
1895ce8d5614SIntel 		fwd_streams = NULL;
1896ce8d5614SIntel 	}
1897ce8d5614SIntel 
1898ce8d5614SIntel 	/* init new */
1899ce8d5614SIntel 	nb_fwd_streams = nb_fwd_streams_new;
19001f84c469SMatan Azrad 	if (nb_fwd_streams) {
1901ce8d5614SIntel 		fwd_streams = rte_zmalloc("testpmd: fwd_streams",
19021f84c469SMatan Azrad 			sizeof(struct fwd_stream *) * nb_fwd_streams,
19031f84c469SMatan Azrad 			RTE_CACHE_LINE_SIZE);
1904ce8d5614SIntel 		if (fwd_streams == NULL)
19051f84c469SMatan Azrad 			rte_exit(EXIT_FAILURE, "rte_zmalloc(%d"
19061f84c469SMatan Azrad 				 " (struct fwd_stream *)) failed\n",
19071f84c469SMatan Azrad 				 nb_fwd_streams);
1908ce8d5614SIntel 
1909af75078fSIntel 		for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
19101f84c469SMatan Azrad 			fwd_streams[sm_id] = rte_zmalloc("testpmd:"
19111f84c469SMatan Azrad 				" struct fwd_stream", sizeof(struct fwd_stream),
19121f84c469SMatan Azrad 				RTE_CACHE_LINE_SIZE);
1913ce8d5614SIntel 			if (fwd_streams[sm_id] == NULL)
19141f84c469SMatan Azrad 				rte_exit(EXIT_FAILURE, "rte_zmalloc"
19151f84c469SMatan Azrad 					 "(struct fwd_stream) failed\n");
19161f84c469SMatan Azrad 		}
1917af75078fSIntel 	}
1918ce8d5614SIntel 
1919ce8d5614SIntel 	return 0;
1920af75078fSIntel }
1921af75078fSIntel 
1922af75078fSIntel static void
1923af75078fSIntel pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
1924af75078fSIntel {
19257569b8c1SHonnappa Nagarahalli 	uint64_t total_burst, sburst;
192685de481aSHonnappa Nagarahalli 	uint64_t nb_burst;
19277569b8c1SHonnappa Nagarahalli 	uint64_t burst_stats[4];
19287569b8c1SHonnappa Nagarahalli 	uint16_t pktnb_stats[4];
1929af75078fSIntel 	uint16_t nb_pkt;
19307569b8c1SHonnappa Nagarahalli 	int burst_percent[4], sburstp;
19317569b8c1SHonnappa Nagarahalli 	int i;
1932af75078fSIntel 
1933af75078fSIntel 	/*
1934af75078fSIntel 	 * First compute the total number of packet bursts and the
1935af75078fSIntel 	 * two highest numbers of bursts of the same number of packets.
1936af75078fSIntel 	 */
19377569b8c1SHonnappa Nagarahalli 	memset(&burst_stats, 0x0, sizeof(burst_stats));
19387569b8c1SHonnappa Nagarahalli 	memset(&pktnb_stats, 0x0, sizeof(pktnb_stats));
19397569b8c1SHonnappa Nagarahalli 
19407569b8c1SHonnappa Nagarahalli 	/* Show stats for 0 burst size always */
19417569b8c1SHonnappa Nagarahalli 	total_burst = pbs->pkt_burst_spread[0];
19427569b8c1SHonnappa Nagarahalli 	burst_stats[0] = pbs->pkt_burst_spread[0];
19437569b8c1SHonnappa Nagarahalli 	pktnb_stats[0] = 0;
19447569b8c1SHonnappa Nagarahalli 
19457569b8c1SHonnappa Nagarahalli 	/* Find the next 2 burst sizes with highest occurrences. */
19466a8b64fdSEli Britstein 	for (nb_pkt = 1; nb_pkt < MAX_PKT_BURST + 1; nb_pkt++) {
1947af75078fSIntel 		nb_burst = pbs->pkt_burst_spread[nb_pkt];
19487569b8c1SHonnappa Nagarahalli 
1949af75078fSIntel 		if (nb_burst == 0)
1950af75078fSIntel 			continue;
19517569b8c1SHonnappa Nagarahalli 
1952af75078fSIntel 		total_burst += nb_burst;
19537569b8c1SHonnappa Nagarahalli 
19547569b8c1SHonnappa Nagarahalli 		if (nb_burst > burst_stats[1]) {
19557569b8c1SHonnappa Nagarahalli 			burst_stats[2] = burst_stats[1];
19567569b8c1SHonnappa Nagarahalli 			pktnb_stats[2] = pktnb_stats[1];
1957fe613657SDaniel Shelepov 			burst_stats[1] = nb_burst;
1958fe613657SDaniel Shelepov 			pktnb_stats[1] = nb_pkt;
19597569b8c1SHonnappa Nagarahalli 		} else if (nb_burst > burst_stats[2]) {
19607569b8c1SHonnappa Nagarahalli 			burst_stats[2] = nb_burst;
19617569b8c1SHonnappa Nagarahalli 			pktnb_stats[2] = nb_pkt;
1962af75078fSIntel 		}
1963af75078fSIntel 	}
1964af75078fSIntel 	if (total_burst == 0)
1965af75078fSIntel 		return;
19667569b8c1SHonnappa Nagarahalli 
19677569b8c1SHonnappa Nagarahalli 	printf("  %s-bursts : %"PRIu64" [", rx_tx, total_burst);
19687569b8c1SHonnappa Nagarahalli 	for (i = 0, sburst = 0, sburstp = 0; i < 4; i++) {
19697569b8c1SHonnappa Nagarahalli 		if (i == 3) {
19707569b8c1SHonnappa Nagarahalli 			printf("%d%% of other]\n", 100 - sburstp);
1971af75078fSIntel 			return;
1972af75078fSIntel 		}
19737569b8c1SHonnappa Nagarahalli 
19747569b8c1SHonnappa Nagarahalli 		sburst += burst_stats[i];
19757569b8c1SHonnappa Nagarahalli 		if (sburst == total_burst) {
19767569b8c1SHonnappa Nagarahalli 			printf("%d%% of %d pkts]\n",
19777569b8c1SHonnappa Nagarahalli 				100 - sburstp, (int) pktnb_stats[i]);
1978af75078fSIntel 			return;
1979af75078fSIntel 		}
19807569b8c1SHonnappa Nagarahalli 
19817569b8c1SHonnappa Nagarahalli 		burst_percent[i] =
19827569b8c1SHonnappa Nagarahalli 			(double)burst_stats[i] / total_burst * 100;
19837569b8c1SHonnappa Nagarahalli 		printf("%d%% of %d pkts + ",
19847569b8c1SHonnappa Nagarahalli 			burst_percent[i], (int) pktnb_stats[i]);
19857569b8c1SHonnappa Nagarahalli 		sburstp += burst_percent[i];
1986af75078fSIntel 	}
1987af75078fSIntel }
1988af75078fSIntel 
1989af75078fSIntel static void
1990af75078fSIntel fwd_stream_stats_display(streamid_t stream_id)
1991af75078fSIntel {
1992af75078fSIntel 	struct fwd_stream *fs;
1993af75078fSIntel 	static const char *fwd_top_stats_border = "-------";
1994af75078fSIntel 
1995af75078fSIntel 	fs = fwd_streams[stream_id];
1996af75078fSIntel 	if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
1997af75078fSIntel 	    (fs->fwd_dropped == 0))
1998af75078fSIntel 		return;
1999af75078fSIntel 	printf("\n  %s Forward Stats for RX Port=%2d/Queue=%2d -> "
2000af75078fSIntel 	       "TX Port=%2d/Queue=%2d %s\n",
2001af75078fSIntel 	       fwd_top_stats_border, fs->rx_port, fs->rx_queue,
2002af75078fSIntel 	       fs->tx_port, fs->tx_queue, fwd_top_stats_border);
2003c185d42cSDavid Marchand 	printf("  RX-packets: %-14"PRIu64" TX-packets: %-14"PRIu64
2004c185d42cSDavid Marchand 	       " TX-dropped: %-14"PRIu64,
2005af75078fSIntel 	       fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
2006af75078fSIntel 
2007af75078fSIntel 	/* if checksum mode */
2008af75078fSIntel 	if (cur_fwd_eng == &csum_fwd_engine) {
2009c185d42cSDavid Marchand 		printf("  RX- bad IP checksum: %-14"PRIu64
2010c185d42cSDavid Marchand 		       "  Rx- bad L4 checksum: %-14"PRIu64
2011c185d42cSDavid Marchand 		       " Rx- bad outer L4 checksum: %-14"PRIu64"\n",
201258d475b7SJerin Jacob 			fs->rx_bad_ip_csum, fs->rx_bad_l4_csum,
201358d475b7SJerin Jacob 			fs->rx_bad_outer_l4_csum);
2014d139cf23SLance Richardson 		printf(" RX- bad outer IP checksum: %-14"PRIu64"\n",
2015d139cf23SLance Richardson 			fs->rx_bad_outer_ip_csum);
201694d65546SDavid Marchand 	} else {
201794d65546SDavid Marchand 		printf("\n");
2018af75078fSIntel 	}
2019af75078fSIntel 
20200e4b1963SDharmik Thakkar 	if (record_burst_stats) {
2021af75078fSIntel 		pkt_burst_stats_display("RX", &fs->rx_burst_stats);
2022af75078fSIntel 		pkt_burst_stats_display("TX", &fs->tx_burst_stats);
20230e4b1963SDharmik Thakkar 	}
2024af75078fSIntel }
2025af75078fSIntel 
202653324971SDavid Marchand void
202753324971SDavid Marchand fwd_stats_display(void)
202853324971SDavid Marchand {
202953324971SDavid Marchand 	static const char *fwd_stats_border = "----------------------";
203053324971SDavid Marchand 	static const char *acc_stats_border = "+++++++++++++++";
203153324971SDavid Marchand 	struct {
203253324971SDavid Marchand 		struct fwd_stream *rx_stream;
203353324971SDavid Marchand 		struct fwd_stream *tx_stream;
203453324971SDavid Marchand 		uint64_t tx_dropped;
203553324971SDavid Marchand 		uint64_t rx_bad_ip_csum;
203653324971SDavid Marchand 		uint64_t rx_bad_l4_csum;
203753324971SDavid Marchand 		uint64_t rx_bad_outer_l4_csum;
2038d139cf23SLance Richardson 		uint64_t rx_bad_outer_ip_csum;
203953324971SDavid Marchand 	} ports_stats[RTE_MAX_ETHPORTS];
204053324971SDavid Marchand 	uint64_t total_rx_dropped = 0;
204153324971SDavid Marchand 	uint64_t total_tx_dropped = 0;
204253324971SDavid Marchand 	uint64_t total_rx_nombuf = 0;
204353324971SDavid Marchand 	struct rte_eth_stats stats;
204453324971SDavid Marchand 	uint64_t fwd_cycles = 0;
204553324971SDavid Marchand 	uint64_t total_recv = 0;
204653324971SDavid Marchand 	uint64_t total_xmit = 0;
204753324971SDavid Marchand 	struct rte_port *port;
204853324971SDavid Marchand 	streamid_t sm_id;
204953324971SDavid Marchand 	portid_t pt_id;
2050baef6bbfSMin Hu (Connor) 	int ret;
205153324971SDavid Marchand 	int i;
205253324971SDavid Marchand 
205353324971SDavid Marchand 	memset(ports_stats, 0, sizeof(ports_stats));
205453324971SDavid Marchand 
205553324971SDavid Marchand 	for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
205653324971SDavid Marchand 		struct fwd_stream *fs = fwd_streams[sm_id];
205753324971SDavid Marchand 
205853324971SDavid Marchand 		if (cur_fwd_config.nb_fwd_streams >
205953324971SDavid Marchand 		    cur_fwd_config.nb_fwd_ports) {
206053324971SDavid Marchand 			fwd_stream_stats_display(sm_id);
206153324971SDavid Marchand 		} else {
206253324971SDavid Marchand 			ports_stats[fs->tx_port].tx_stream = fs;
206353324971SDavid Marchand 			ports_stats[fs->rx_port].rx_stream = fs;
206453324971SDavid Marchand 		}
206553324971SDavid Marchand 
206653324971SDavid Marchand 		ports_stats[fs->tx_port].tx_dropped += fs->fwd_dropped;
206753324971SDavid Marchand 
206853324971SDavid Marchand 		ports_stats[fs->rx_port].rx_bad_ip_csum += fs->rx_bad_ip_csum;
206953324971SDavid Marchand 		ports_stats[fs->rx_port].rx_bad_l4_csum += fs->rx_bad_l4_csum;
207053324971SDavid Marchand 		ports_stats[fs->rx_port].rx_bad_outer_l4_csum +=
207153324971SDavid Marchand 				fs->rx_bad_outer_l4_csum;
2072d139cf23SLance Richardson 		ports_stats[fs->rx_port].rx_bad_outer_ip_csum +=
2073d139cf23SLance Richardson 				fs->rx_bad_outer_ip_csum;
207453324971SDavid Marchand 
2075bc700b67SDharmik Thakkar 		if (record_core_cycles)
207653324971SDavid Marchand 			fwd_cycles += fs->core_cycles;
207753324971SDavid Marchand 	}
207853324971SDavid Marchand 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
207953324971SDavid Marchand 		pt_id = fwd_ports_ids[i];
208053324971SDavid Marchand 		port = &ports[pt_id];
208153324971SDavid Marchand 
2082baef6bbfSMin Hu (Connor) 		ret = rte_eth_stats_get(pt_id, &stats);
2083baef6bbfSMin Hu (Connor) 		if (ret != 0) {
2084baef6bbfSMin Hu (Connor) 			fprintf(stderr,
2085baef6bbfSMin Hu (Connor) 				"%s: Error: failed to get stats (port %u): %d",
2086baef6bbfSMin Hu (Connor) 				__func__, pt_id, ret);
2087baef6bbfSMin Hu (Connor) 			continue;
2088baef6bbfSMin Hu (Connor) 		}
208953324971SDavid Marchand 		stats.ipackets -= port->stats.ipackets;
209053324971SDavid Marchand 		stats.opackets -= port->stats.opackets;
209153324971SDavid Marchand 		stats.ibytes -= port->stats.ibytes;
209253324971SDavid Marchand 		stats.obytes -= port->stats.obytes;
209353324971SDavid Marchand 		stats.imissed -= port->stats.imissed;
209453324971SDavid Marchand 		stats.oerrors -= port->stats.oerrors;
209553324971SDavid Marchand 		stats.rx_nombuf -= port->stats.rx_nombuf;
209653324971SDavid Marchand 
209753324971SDavid Marchand 		total_recv += stats.ipackets;
209853324971SDavid Marchand 		total_xmit += stats.opackets;
209953324971SDavid Marchand 		total_rx_dropped += stats.imissed;
210053324971SDavid Marchand 		total_tx_dropped += ports_stats[pt_id].tx_dropped;
210153324971SDavid Marchand 		total_tx_dropped += stats.oerrors;
210253324971SDavid Marchand 		total_rx_nombuf  += stats.rx_nombuf;
210353324971SDavid Marchand 
210453324971SDavid Marchand 		printf("\n  %s Forward statistics for port %-2d %s\n",
210553324971SDavid Marchand 		       fwd_stats_border, pt_id, fwd_stats_border);
210653324971SDavid Marchand 
210708dcd187SHuisong Li 		printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64
210808dcd187SHuisong Li 		       "RX-total: %-"PRIu64"\n", stats.ipackets, stats.imissed,
210953324971SDavid Marchand 		       stats.ipackets + stats.imissed);
211053324971SDavid Marchand 
2111d139cf23SLance Richardson 		if (cur_fwd_eng == &csum_fwd_engine) {
211253324971SDavid Marchand 			printf("  Bad-ipcsum: %-14"PRIu64
211353324971SDavid Marchand 			       " Bad-l4csum: %-14"PRIu64
211453324971SDavid Marchand 			       "Bad-outer-l4csum: %-14"PRIu64"\n",
211553324971SDavid Marchand 			       ports_stats[pt_id].rx_bad_ip_csum,
211653324971SDavid Marchand 			       ports_stats[pt_id].rx_bad_l4_csum,
211753324971SDavid Marchand 			       ports_stats[pt_id].rx_bad_outer_l4_csum);
2118d139cf23SLance Richardson 			printf("  Bad-outer-ipcsum: %-14"PRIu64"\n",
2119d139cf23SLance Richardson 			       ports_stats[pt_id].rx_bad_outer_ip_csum);
2120d139cf23SLance Richardson 		}
212153324971SDavid Marchand 		if (stats.ierrors + stats.rx_nombuf > 0) {
212208dcd187SHuisong Li 			printf("  RX-error: %-"PRIu64"\n", stats.ierrors);
212308dcd187SHuisong Li 			printf("  RX-nombufs: %-14"PRIu64"\n", stats.rx_nombuf);
212453324971SDavid Marchand 		}
212553324971SDavid Marchand 
212608dcd187SHuisong Li 		printf("  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64
212753324971SDavid Marchand 		       "TX-total: %-"PRIu64"\n",
212853324971SDavid Marchand 		       stats.opackets, ports_stats[pt_id].tx_dropped,
212953324971SDavid Marchand 		       stats.opackets + ports_stats[pt_id].tx_dropped);
213053324971SDavid Marchand 
21310e4b1963SDharmik Thakkar 		if (record_burst_stats) {
213253324971SDavid Marchand 			if (ports_stats[pt_id].rx_stream)
213353324971SDavid Marchand 				pkt_burst_stats_display("RX",
213453324971SDavid Marchand 					&ports_stats[pt_id].rx_stream->rx_burst_stats);
213553324971SDavid Marchand 			if (ports_stats[pt_id].tx_stream)
213653324971SDavid Marchand 				pkt_burst_stats_display("TX",
213753324971SDavid Marchand 				&ports_stats[pt_id].tx_stream->tx_burst_stats);
21380e4b1963SDharmik Thakkar 		}
213953324971SDavid Marchand 
214053324971SDavid Marchand 		printf("  %s--------------------------------%s\n",
214153324971SDavid Marchand 		       fwd_stats_border, fwd_stats_border);
214253324971SDavid Marchand 	}
214353324971SDavid Marchand 
214453324971SDavid Marchand 	printf("\n  %s Accumulated forward statistics for all ports"
214553324971SDavid Marchand 	       "%s\n",
214653324971SDavid Marchand 	       acc_stats_border, acc_stats_border);
214753324971SDavid Marchand 	printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
214853324971SDavid Marchand 	       "%-"PRIu64"\n"
214953324971SDavid Marchand 	       "  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
215053324971SDavid Marchand 	       "%-"PRIu64"\n",
215153324971SDavid Marchand 	       total_recv, total_rx_dropped, total_recv + total_rx_dropped,
215253324971SDavid Marchand 	       total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
215353324971SDavid Marchand 	if (total_rx_nombuf > 0)
215453324971SDavid Marchand 		printf("  RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
215553324971SDavid Marchand 	printf("  %s++++++++++++++++++++++++++++++++++++++++++++++"
215653324971SDavid Marchand 	       "%s\n",
215753324971SDavid Marchand 	       acc_stats_border, acc_stats_border);
2158bc700b67SDharmik Thakkar 	if (record_core_cycles) {
21594c0497b1SDharmik Thakkar #define CYC_PER_MHZ 1E6
21603a164e00SPhil Yang 		if (total_recv > 0 || total_xmit > 0) {
21613a164e00SPhil Yang 			uint64_t total_pkts = 0;
21623a164e00SPhil Yang 			if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 ||
21633a164e00SPhil Yang 			    strcmp(cur_fwd_eng->fwd_mode_name, "flowgen") == 0)
21643a164e00SPhil Yang 				total_pkts = total_xmit;
21653a164e00SPhil Yang 			else
21663a164e00SPhil Yang 				total_pkts = total_recv;
21673a164e00SPhil Yang 
21681920832aSDharmik Thakkar 			printf("\n  CPU cycles/packet=%.2F (total cycles="
21693a164e00SPhil Yang 			       "%"PRIu64" / total %s packets=%"PRIu64") at %"PRIu64
21704c0497b1SDharmik Thakkar 			       " MHz Clock\n",
21713a164e00SPhil Yang 			       (double) fwd_cycles / total_pkts,
21723a164e00SPhil Yang 			       fwd_cycles, cur_fwd_eng->fwd_mode_name, total_pkts,
21734c0497b1SDharmik Thakkar 			       (uint64_t)(rte_get_tsc_hz() / CYC_PER_MHZ));
21743a164e00SPhil Yang 		}
2175bc700b67SDharmik Thakkar 	}
217653324971SDavid Marchand }
217753324971SDavid Marchand 
217853324971SDavid Marchand void
217953324971SDavid Marchand fwd_stats_reset(void)
218053324971SDavid Marchand {
218153324971SDavid Marchand 	streamid_t sm_id;
218253324971SDavid Marchand 	portid_t pt_id;
2183baef6bbfSMin Hu (Connor) 	int ret;
218453324971SDavid Marchand 	int i;
218553324971SDavid Marchand 
218653324971SDavid Marchand 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
218753324971SDavid Marchand 		pt_id = fwd_ports_ids[i];
2188baef6bbfSMin Hu (Connor) 		ret = rte_eth_stats_get(pt_id, &ports[pt_id].stats);
2189baef6bbfSMin Hu (Connor) 		if (ret != 0)
2190baef6bbfSMin Hu (Connor) 			fprintf(stderr,
2191baef6bbfSMin Hu (Connor) 				"%s: Error: failed to clear stats (port %u):%d",
2192baef6bbfSMin Hu (Connor) 				__func__, pt_id, ret);
219353324971SDavid Marchand 	}
219453324971SDavid Marchand 	for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
219553324971SDavid Marchand 		struct fwd_stream *fs = fwd_streams[sm_id];
219653324971SDavid Marchand 
219753324971SDavid Marchand 		fs->rx_packets = 0;
219853324971SDavid Marchand 		fs->tx_packets = 0;
219953324971SDavid Marchand 		fs->fwd_dropped = 0;
220053324971SDavid Marchand 		fs->rx_bad_ip_csum = 0;
220153324971SDavid Marchand 		fs->rx_bad_l4_csum = 0;
220253324971SDavid Marchand 		fs->rx_bad_outer_l4_csum = 0;
2203d139cf23SLance Richardson 		fs->rx_bad_outer_ip_csum = 0;
220453324971SDavid Marchand 
220553324971SDavid Marchand 		memset(&fs->rx_burst_stats, 0, sizeof(fs->rx_burst_stats));
220653324971SDavid Marchand 		memset(&fs->tx_burst_stats, 0, sizeof(fs->tx_burst_stats));
220753324971SDavid Marchand 		fs->core_cycles = 0;
220853324971SDavid Marchand 	}
220953324971SDavid Marchand }
221053324971SDavid Marchand 
2211af75078fSIntel static void
22127741e4cfSIntel flush_fwd_rx_queues(void)
2213af75078fSIntel {
2214af75078fSIntel 	struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
2215af75078fSIntel 	portid_t  rxp;
22167741e4cfSIntel 	portid_t port_id;
2217af75078fSIntel 	queueid_t rxq;
2218af75078fSIntel 	uint16_t  nb_rx;
2219af75078fSIntel 	uint16_t  i;
2220af75078fSIntel 	uint8_t   j;
2221f487715fSReshma Pattan 	uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0;
2222594302c7SJames Poole 	uint64_t timer_period;
2223f487715fSReshma Pattan 
2224a550baf2SMin Hu (Connor) 	if (num_procs > 1) {
2225a550baf2SMin Hu (Connor) 		printf("multi-process not support for flushing fwd Rx queues, skip the below lines and return.\n");
2226a550baf2SMin Hu (Connor) 		return;
2227a550baf2SMin Hu (Connor) 	}
2228a550baf2SMin Hu (Connor) 
2229f487715fSReshma Pattan 	/* convert to number of cycles */
2230594302c7SJames Poole 	timer_period = rte_get_timer_hz(); /* 1 second timeout */
2231af75078fSIntel 
2232af75078fSIntel 	for (j = 0; j < 2; j++) {
22337741e4cfSIntel 		for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
2234af75078fSIntel 			for (rxq = 0; rxq < nb_rxq; rxq++) {
22357741e4cfSIntel 				port_id = fwd_ports_ids[rxp];
22363c4426dbSDmitry Kozlyuk 
22373c4426dbSDmitry Kozlyuk 				/* Polling stopped queues is prohibited. */
22383c4426dbSDmitry Kozlyuk 				if (ports[port_id].rxq[rxq].state ==
22393c4426dbSDmitry Kozlyuk 				    RTE_ETH_QUEUE_STATE_STOPPED)
22403c4426dbSDmitry Kozlyuk 					continue;
22413c4426dbSDmitry Kozlyuk 
2242f487715fSReshma Pattan 				/**
2243f487715fSReshma Pattan 				* testpmd can stuck in the below do while loop
2244f487715fSReshma Pattan 				* if rte_eth_rx_burst() always returns nonzero
2245f487715fSReshma Pattan 				* packets. So timer is added to exit this loop
2246f487715fSReshma Pattan 				* after 1sec timer expiry.
2247f487715fSReshma Pattan 				*/
2248f487715fSReshma Pattan 				prev_tsc = rte_rdtsc();
2249af75078fSIntel 				do {
22507741e4cfSIntel 					nb_rx = rte_eth_rx_burst(port_id, rxq,
2251013af9b6SIntel 						pkts_burst, MAX_PKT_BURST);
2252af75078fSIntel 					for (i = 0; i < nb_rx; i++)
2253af75078fSIntel 						rte_pktmbuf_free(pkts_burst[i]);
2254f487715fSReshma Pattan 
2255f487715fSReshma Pattan 					cur_tsc = rte_rdtsc();
2256f487715fSReshma Pattan 					diff_tsc = cur_tsc - prev_tsc;
2257f487715fSReshma Pattan 					timer_tsc += diff_tsc;
2258f487715fSReshma Pattan 				} while ((nb_rx > 0) &&
2259f487715fSReshma Pattan 					(timer_tsc < timer_period));
2260f487715fSReshma Pattan 				timer_tsc = 0;
2261af75078fSIntel 			}
2262af75078fSIntel 		}
2263af75078fSIntel 		rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
2264af75078fSIntel 	}
2265af75078fSIntel }
2266af75078fSIntel 
2267af75078fSIntel static void
2268af75078fSIntel run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
2269af75078fSIntel {
2270af75078fSIntel 	struct fwd_stream **fsm;
2271af75078fSIntel 	streamid_t nb_fs;
2272af75078fSIntel 	streamid_t sm_id;
2273a8d0d473SBruce Richardson #ifdef RTE_LIB_BITRATESTATS
22747e4441c8SRemy Horton 	uint64_t tics_per_1sec;
22757e4441c8SRemy Horton 	uint64_t tics_datum;
22767e4441c8SRemy Horton 	uint64_t tics_current;
22774918a357SXiaoyun Li 	uint16_t i, cnt_ports;
2278af75078fSIntel 
22794918a357SXiaoyun Li 	cnt_ports = nb_ports;
22807e4441c8SRemy Horton 	tics_datum = rte_rdtsc();
22817e4441c8SRemy Horton 	tics_per_1sec = rte_get_timer_hz();
22827e4441c8SRemy Horton #endif
2283af75078fSIntel 	fsm = &fwd_streams[fc->stream_idx];
2284af75078fSIntel 	nb_fs = fc->stream_nb;
2285af75078fSIntel 	do {
2286af75078fSIntel 		for (sm_id = 0; sm_id < nb_fs; sm_id++)
22873c4426dbSDmitry Kozlyuk 			if (!fsm[sm_id]->disabled)
2288af75078fSIntel 				(*pkt_fwd)(fsm[sm_id]);
2289a8d0d473SBruce Richardson #ifdef RTE_LIB_BITRATESTATS
2290e25e6c70SRemy Horton 		if (bitrate_enabled != 0 &&
2291e25e6c70SRemy Horton 				bitrate_lcore_id == rte_lcore_id()) {
22927e4441c8SRemy Horton 			tics_current = rte_rdtsc();
22937e4441c8SRemy Horton 			if (tics_current - tics_datum >= tics_per_1sec) {
22947e4441c8SRemy Horton 				/* Periodic bitrate calculation */
22954918a357SXiaoyun Li 				for (i = 0; i < cnt_ports; i++)
2296e25e6c70SRemy Horton 					rte_stats_bitrate_calc(bitrate_data,
22974918a357SXiaoyun Li 						ports_ids[i]);
22987e4441c8SRemy Horton 				tics_datum = tics_current;
22997e4441c8SRemy Horton 			}
2300e25e6c70SRemy Horton 		}
23017e4441c8SRemy Horton #endif
2302a8d0d473SBruce Richardson #ifdef RTE_LIB_LATENCYSTATS
230365eb1e54SPablo de Lara 		if (latencystats_enabled != 0 &&
230465eb1e54SPablo de Lara 				latencystats_lcore_id == rte_lcore_id())
230562d3216dSReshma Pattan 			rte_latencystats_update();
230662d3216dSReshma Pattan #endif
230762d3216dSReshma Pattan 
2308af75078fSIntel 	} while (! fc->stopped);
2309af75078fSIntel }
2310af75078fSIntel 
2311af75078fSIntel static int
2312af75078fSIntel start_pkt_forward_on_core(void *fwd_arg)
2313af75078fSIntel {
2314af75078fSIntel 	run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
2315af75078fSIntel 			     cur_fwd_config.fwd_eng->packet_fwd);
2316af75078fSIntel 	return 0;
2317af75078fSIntel }
2318af75078fSIntel 
2319af75078fSIntel /*
2320af75078fSIntel  * Run the TXONLY packet forwarding engine to send a single burst of packets.
2321af75078fSIntel  * Used to start communication flows in network loopback test configurations.
2322af75078fSIntel  */
2323af75078fSIntel static int
2324af75078fSIntel run_one_txonly_burst_on_core(void *fwd_arg)
2325af75078fSIntel {
2326af75078fSIntel 	struct fwd_lcore *fwd_lc;
2327af75078fSIntel 	struct fwd_lcore tmp_lcore;
2328af75078fSIntel 
2329af75078fSIntel 	fwd_lc = (struct fwd_lcore *) fwd_arg;
2330af75078fSIntel 	tmp_lcore = *fwd_lc;
2331af75078fSIntel 	tmp_lcore.stopped = 1;
2332af75078fSIntel 	run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
2333af75078fSIntel 	return 0;
2334af75078fSIntel }
2335af75078fSIntel 
2336af75078fSIntel /*
2337af75078fSIntel  * Launch packet forwarding:
2338af75078fSIntel  *     - Setup per-port forwarding context.
2339af75078fSIntel  *     - launch logical cores with their forwarding configuration.
2340af75078fSIntel  */
2341af75078fSIntel static void
2342af75078fSIntel launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
2343af75078fSIntel {
2344af75078fSIntel 	unsigned int i;
2345af75078fSIntel 	unsigned int lc_id;
2346af75078fSIntel 	int diag;
2347af75078fSIntel 
2348af75078fSIntel 	for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
2349af75078fSIntel 		lc_id = fwd_lcores_cpuids[i];
2350af75078fSIntel 		if ((interactive == 0) || (lc_id != rte_lcore_id())) {
2351af75078fSIntel 			fwd_lcores[i]->stopped = 0;
2352af75078fSIntel 			diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
2353af75078fSIntel 						     fwd_lcores[i], lc_id);
2354af75078fSIntel 			if (diag != 0)
235561a3b0e5SAndrew Rybchenko 				fprintf(stderr,
235661a3b0e5SAndrew Rybchenko 					"launch lcore %u failed - diag=%d\n",
2357af75078fSIntel 					lc_id, diag);
2358af75078fSIntel 		}
2359af75078fSIntel 	}
2360af75078fSIntel }
2361af75078fSIntel 
2362af75078fSIntel /*
2363af75078fSIntel  * Launch packet forwarding configuration.
2364af75078fSIntel  */
2365af75078fSIntel void
2366af75078fSIntel start_packet_forwarding(int with_tx_first)
2367af75078fSIntel {
2368af75078fSIntel 	port_fwd_begin_t port_fwd_begin;
2369af75078fSIntel 	port_fwd_end_t  port_fwd_end;
23703c4426dbSDmitry Kozlyuk 	stream_init_t stream_init = cur_fwd_eng->stream_init;
2371af75078fSIntel 	unsigned int i;
2372af75078fSIntel 
23735a8fb55cSReshma Pattan 	if (strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") == 0 && !nb_rxq)
23745a8fb55cSReshma Pattan 		rte_exit(EXIT_FAILURE, "rxq are 0, cannot use rxonly fwd mode\n");
23755a8fb55cSReshma Pattan 
23765a8fb55cSReshma Pattan 	if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 && !nb_txq)
23775a8fb55cSReshma Pattan 		rte_exit(EXIT_FAILURE, "txq are 0, cannot use txonly fwd mode\n");
23785a8fb55cSReshma Pattan 
23795a8fb55cSReshma Pattan 	if ((strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") != 0 &&
23805a8fb55cSReshma Pattan 		strcmp(cur_fwd_eng->fwd_mode_name, "txonly") != 0) &&
23815a8fb55cSReshma Pattan 		(!nb_rxq || !nb_txq))
23825a8fb55cSReshma Pattan 		rte_exit(EXIT_FAILURE,
23835a8fb55cSReshma Pattan 			"Either rxq or txq are 0, cannot use %s fwd mode\n",
23845a8fb55cSReshma Pattan 			cur_fwd_eng->fwd_mode_name);
23855a8fb55cSReshma Pattan 
2386ce8d5614SIntel 	if (all_ports_started() == 0) {
238761a3b0e5SAndrew Rybchenko 		fprintf(stderr, "Not all ports were started\n");
2388ce8d5614SIntel 		return;
2389ce8d5614SIntel 	}
2390af75078fSIntel 	if (test_done == 0) {
239161a3b0e5SAndrew Rybchenko 		fprintf(stderr, "Packet forwarding already started\n");
2392af75078fSIntel 		return;
2393af75078fSIntel 	}
23947741e4cfSIntel 
239547a767b2SMatan Azrad 	fwd_config_setup();
239647a767b2SMatan Azrad 
239765744833SXueming Li 	pkt_fwd_config_display(&cur_fwd_config);
239865744833SXueming Li 	if (!pkt_fwd_shared_rxq_check())
239965744833SXueming Li 		return;
240065744833SXueming Li 
24013c4426dbSDmitry Kozlyuk 	if (stream_init != NULL)
24023c4426dbSDmitry Kozlyuk 		for (i = 0; i < cur_fwd_config.nb_fwd_streams; i++)
24033c4426dbSDmitry Kozlyuk 			stream_init(fwd_streams[i]);
24043c4426dbSDmitry Kozlyuk 
2405a78040c9SAlvin Zhang 	port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
2406a78040c9SAlvin Zhang 	if (port_fwd_begin != NULL) {
2407a78040c9SAlvin Zhang 		for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
2408a78040c9SAlvin Zhang 			if (port_fwd_begin(fwd_ports_ids[i])) {
2409a78040c9SAlvin Zhang 				fprintf(stderr,
2410a78040c9SAlvin Zhang 					"Packet forwarding is not ready\n");
2411a78040c9SAlvin Zhang 				return;
2412a78040c9SAlvin Zhang 			}
2413a78040c9SAlvin Zhang 		}
2414a78040c9SAlvin Zhang 	}
2415a78040c9SAlvin Zhang 
2416a78040c9SAlvin Zhang 	if (with_tx_first) {
2417a78040c9SAlvin Zhang 		port_fwd_begin = tx_only_engine.port_fwd_begin;
2418a78040c9SAlvin Zhang 		if (port_fwd_begin != NULL) {
2419a78040c9SAlvin Zhang 			for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
2420a78040c9SAlvin Zhang 				if (port_fwd_begin(fwd_ports_ids[i])) {
2421a78040c9SAlvin Zhang 					fprintf(stderr,
2422a78040c9SAlvin Zhang 						"Packet forwarding is not ready\n");
2423a78040c9SAlvin Zhang 					return;
2424a78040c9SAlvin Zhang 				}
2425a78040c9SAlvin Zhang 			}
2426a78040c9SAlvin Zhang 		}
2427a78040c9SAlvin Zhang 	}
2428a78040c9SAlvin Zhang 
2429a78040c9SAlvin Zhang 	test_done = 0;
2430a78040c9SAlvin Zhang 
24317741e4cfSIntel 	if(!no_flush_rx)
24327741e4cfSIntel 		flush_fwd_rx_queues();
24337741e4cfSIntel 
2434af75078fSIntel 	rxtx_config_display();
2435af75078fSIntel 
243653324971SDavid Marchand 	fwd_stats_reset();
2437af75078fSIntel 	if (with_tx_first) {
2438acbf77a6SZhihong Wang 		while (with_tx_first--) {
2439acbf77a6SZhihong Wang 			launch_packet_forwarding(
2440acbf77a6SZhihong Wang 					run_one_txonly_burst_on_core);
2441af75078fSIntel 			rte_eal_mp_wait_lcore();
2442acbf77a6SZhihong Wang 		}
2443af75078fSIntel 		port_fwd_end = tx_only_engine.port_fwd_end;
2444af75078fSIntel 		if (port_fwd_end != NULL) {
2445af75078fSIntel 			for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
2446af75078fSIntel 				(*port_fwd_end)(fwd_ports_ids[i]);
2447af75078fSIntel 		}
2448af75078fSIntel 	}
2449af75078fSIntel 	launch_packet_forwarding(start_pkt_forward_on_core);
2450af75078fSIntel }
2451af75078fSIntel 
2452af75078fSIntel void
2453af75078fSIntel stop_packet_forwarding(void)
2454af75078fSIntel {
2455af75078fSIntel 	port_fwd_end_t port_fwd_end;
2456af75078fSIntel 	lcoreid_t lc_id;
245753324971SDavid Marchand 	portid_t pt_id;
245853324971SDavid Marchand 	int i;
2459af75078fSIntel 
2460af75078fSIntel 	if (test_done) {
246161a3b0e5SAndrew Rybchenko 		fprintf(stderr, "Packet forwarding not started\n");
2462af75078fSIntel 		return;
2463af75078fSIntel 	}
2464af75078fSIntel 	printf("Telling cores to stop...");
2465af75078fSIntel 	for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
2466af75078fSIntel 		fwd_lcores[lc_id]->stopped = 1;
2467af75078fSIntel 	printf("\nWaiting for lcores to finish...\n");
2468af75078fSIntel 	rte_eal_mp_wait_lcore();
2469af75078fSIntel 	port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
2470af75078fSIntel 	if (port_fwd_end != NULL) {
2471af75078fSIntel 		for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
2472af75078fSIntel 			pt_id = fwd_ports_ids[i];
2473af75078fSIntel 			(*port_fwd_end)(pt_id);
2474af75078fSIntel 		}
2475af75078fSIntel 	}
2476c185d42cSDavid Marchand 
247753324971SDavid Marchand 	fwd_stats_display();
247858d475b7SJerin Jacob 
2479af75078fSIntel 	printf("\nDone.\n");
2480af75078fSIntel 	test_done = 1;
2481af75078fSIntel }
2482af75078fSIntel 
2483cfae07fdSOuyang Changchun void
2484cfae07fdSOuyang Changchun dev_set_link_up(portid_t pid)
2485cfae07fdSOuyang Changchun {
2486492ab604SZhiyong Yang 	if (rte_eth_dev_set_link_up(pid) < 0)
248761a3b0e5SAndrew Rybchenko 		fprintf(stderr, "\nSet link up fail.\n");
2488cfae07fdSOuyang Changchun }
2489cfae07fdSOuyang Changchun 
2490cfae07fdSOuyang Changchun void
2491cfae07fdSOuyang Changchun dev_set_link_down(portid_t pid)
2492cfae07fdSOuyang Changchun {
2493492ab604SZhiyong Yang 	if (rte_eth_dev_set_link_down(pid) < 0)
249461a3b0e5SAndrew Rybchenko 		fprintf(stderr, "\nSet link down fail.\n");
2495cfae07fdSOuyang Changchun }
2496cfae07fdSOuyang Changchun 
2497ce8d5614SIntel static int
2498ce8d5614SIntel all_ports_started(void)
2499ce8d5614SIntel {
2500ce8d5614SIntel 	portid_t pi;
2501ce8d5614SIntel 	struct rte_port *port;
2502ce8d5614SIntel 
25037d89b261SGaetan Rivet 	RTE_ETH_FOREACH_DEV(pi) {
2504ce8d5614SIntel 		port = &ports[pi];
2505ce8d5614SIntel 		/* Check if there is a port which is not started */
250641b05095SBernard Iremonger 		if ((port->port_status != RTE_PORT_STARTED) &&
250741b05095SBernard Iremonger 			(port->slave_flag == 0))
2508ce8d5614SIntel 			return 0;
2509ce8d5614SIntel 	}
2510ce8d5614SIntel 
2511ce8d5614SIntel 	/* No port is not started */
2512ce8d5614SIntel 	return 1;
2513ce8d5614SIntel }
2514ce8d5614SIntel 
2515148f963fSBruce Richardson int
25166018eb8cSShahaf Shuler port_is_stopped(portid_t port_id)
25176018eb8cSShahaf Shuler {
25186018eb8cSShahaf Shuler 	struct rte_port *port = &ports[port_id];
25196018eb8cSShahaf Shuler 
25206018eb8cSShahaf Shuler 	if ((port->port_status != RTE_PORT_STOPPED) &&
25216018eb8cSShahaf Shuler 	    (port->slave_flag == 0))
25226018eb8cSShahaf Shuler 		return 0;
25236018eb8cSShahaf Shuler 	return 1;
25246018eb8cSShahaf Shuler }
25256018eb8cSShahaf Shuler 
25266018eb8cSShahaf Shuler int
2527edab33b1STetsuya Mukawa all_ports_stopped(void)
2528edab33b1STetsuya Mukawa {
2529edab33b1STetsuya Mukawa 	portid_t pi;
2530edab33b1STetsuya Mukawa 
25317d89b261SGaetan Rivet 	RTE_ETH_FOREACH_DEV(pi) {
25326018eb8cSShahaf Shuler 		if (!port_is_stopped(pi))
2533edab33b1STetsuya Mukawa 			return 0;
2534edab33b1STetsuya Mukawa 	}
2535edab33b1STetsuya Mukawa 
2536edab33b1STetsuya Mukawa 	return 1;
2537edab33b1STetsuya Mukawa }
2538edab33b1STetsuya Mukawa 
2539edab33b1STetsuya Mukawa int
2540edab33b1STetsuya Mukawa port_is_started(portid_t port_id)
2541edab33b1STetsuya Mukawa {
2542edab33b1STetsuya Mukawa 	if (port_id_is_invalid(port_id, ENABLED_WARN))
2543edab33b1STetsuya Mukawa 		return 0;
2544edab33b1STetsuya Mukawa 
2545edab33b1STetsuya Mukawa 	if (ports[port_id].port_status != RTE_PORT_STARTED)
2546edab33b1STetsuya Mukawa 		return 0;
2547edab33b1STetsuya Mukawa 
2548edab33b1STetsuya Mukawa 	return 1;
2549edab33b1STetsuya Mukawa }
2550edab33b1STetsuya Mukawa 
25511c69df45SOri Kam /* Configure the Rx and Tx hairpin queues for the selected port. */
25521c69df45SOri Kam static int
255301817b10SBing Zhao setup_hairpin_queues(portid_t pi, portid_t p_pi, uint16_t cnt_pi)
25541c69df45SOri Kam {
25551c69df45SOri Kam 	queueid_t qi;
25561c69df45SOri Kam 	struct rte_eth_hairpin_conf hairpin_conf = {
25571c69df45SOri Kam 		.peer_count = 1,
25581c69df45SOri Kam 	};
25591c69df45SOri Kam 	int i;
25601c69df45SOri Kam 	int diag;
25611c69df45SOri Kam 	struct rte_port *port = &ports[pi];
256201817b10SBing Zhao 	uint16_t peer_rx_port = pi;
256301817b10SBing Zhao 	uint16_t peer_tx_port = pi;
256401817b10SBing Zhao 	uint32_t manual = 1;
256501817b10SBing Zhao 	uint32_t tx_exp = hairpin_mode & 0x10;
256601817b10SBing Zhao 
256701817b10SBing Zhao 	if (!(hairpin_mode & 0xf)) {
256801817b10SBing Zhao 		peer_rx_port = pi;
256901817b10SBing Zhao 		peer_tx_port = pi;
257001817b10SBing Zhao 		manual = 0;
257101817b10SBing Zhao 	} else if (hairpin_mode & 0x1) {
257201817b10SBing Zhao 		peer_tx_port = rte_eth_find_next_owned_by(pi + 1,
257301817b10SBing Zhao 						       RTE_ETH_DEV_NO_OWNER);
257401817b10SBing Zhao 		if (peer_tx_port >= RTE_MAX_ETHPORTS)
257501817b10SBing Zhao 			peer_tx_port = rte_eth_find_next_owned_by(0,
257601817b10SBing Zhao 						RTE_ETH_DEV_NO_OWNER);
257701817b10SBing Zhao 		if (p_pi != RTE_MAX_ETHPORTS) {
257801817b10SBing Zhao 			peer_rx_port = p_pi;
257901817b10SBing Zhao 		} else {
258001817b10SBing Zhao 			uint16_t next_pi;
258101817b10SBing Zhao 
258201817b10SBing Zhao 			/* Last port will be the peer RX port of the first. */
258301817b10SBing Zhao 			RTE_ETH_FOREACH_DEV(next_pi)
258401817b10SBing Zhao 				peer_rx_port = next_pi;
258501817b10SBing Zhao 		}
258601817b10SBing Zhao 		manual = 1;
258701817b10SBing Zhao 	} else if (hairpin_mode & 0x2) {
258801817b10SBing Zhao 		if (cnt_pi & 0x1) {
258901817b10SBing Zhao 			peer_rx_port = p_pi;
259001817b10SBing Zhao 		} else {
259101817b10SBing Zhao 			peer_rx_port = rte_eth_find_next_owned_by(pi + 1,
259201817b10SBing Zhao 						RTE_ETH_DEV_NO_OWNER);
259301817b10SBing Zhao 			if (peer_rx_port >= RTE_MAX_ETHPORTS)
259401817b10SBing Zhao 				peer_rx_port = pi;
259501817b10SBing Zhao 		}
259601817b10SBing Zhao 		peer_tx_port = peer_rx_port;
259701817b10SBing Zhao 		manual = 1;
259801817b10SBing Zhao 	}
25991c69df45SOri Kam 
26001c69df45SOri Kam 	for (qi = nb_txq, i = 0; qi < nb_hairpinq + nb_txq; qi++) {
260101817b10SBing Zhao 		hairpin_conf.peers[0].port = peer_rx_port;
26021c69df45SOri Kam 		hairpin_conf.peers[0].queue = i + nb_rxq;
260301817b10SBing Zhao 		hairpin_conf.manual_bind = !!manual;
260401817b10SBing Zhao 		hairpin_conf.tx_explicit = !!tx_exp;
26051c69df45SOri Kam 		diag = rte_eth_tx_hairpin_queue_setup
26061c69df45SOri Kam 			(pi, qi, nb_txd, &hairpin_conf);
26071c69df45SOri Kam 		i++;
26081c69df45SOri Kam 		if (diag == 0)
26091c69df45SOri Kam 			continue;
26101c69df45SOri Kam 
26111c69df45SOri Kam 		/* Fail to setup rx queue, return */
2612eac341d3SJoyce Kong 		if (port->port_status == RTE_PORT_HANDLING)
2613eac341d3SJoyce Kong 			port->port_status = RTE_PORT_STOPPED;
2614eac341d3SJoyce Kong 		else
261561a3b0e5SAndrew Rybchenko 			fprintf(stderr,
261661a3b0e5SAndrew Rybchenko 				"Port %d can not be set back to stopped\n", pi);
261761a3b0e5SAndrew Rybchenko 		fprintf(stderr, "Fail to configure port %d hairpin queues\n",
261861a3b0e5SAndrew Rybchenko 			pi);
26191c69df45SOri Kam 		/* try to reconfigure queues next time */
26201c69df45SOri Kam 		port->need_reconfig_queues = 1;
26211c69df45SOri Kam 		return -1;
26221c69df45SOri Kam 	}
26231c69df45SOri Kam 	for (qi = nb_rxq, i = 0; qi < nb_hairpinq + nb_rxq; qi++) {
262401817b10SBing Zhao 		hairpin_conf.peers[0].port = peer_tx_port;
26251c69df45SOri Kam 		hairpin_conf.peers[0].queue = i + nb_txq;
262601817b10SBing Zhao 		hairpin_conf.manual_bind = !!manual;
262701817b10SBing Zhao 		hairpin_conf.tx_explicit = !!tx_exp;
26281c69df45SOri Kam 		diag = rte_eth_rx_hairpin_queue_setup
26291c69df45SOri Kam 			(pi, qi, nb_rxd, &hairpin_conf);
26301c69df45SOri Kam 		i++;
26311c69df45SOri Kam 		if (diag == 0)
26321c69df45SOri Kam 			continue;
26331c69df45SOri Kam 
26341c69df45SOri Kam 		/* Fail to setup rx queue, return */
2635eac341d3SJoyce Kong 		if (port->port_status == RTE_PORT_HANDLING)
2636eac341d3SJoyce Kong 			port->port_status = RTE_PORT_STOPPED;
2637eac341d3SJoyce Kong 		else
263861a3b0e5SAndrew Rybchenko 			fprintf(stderr,
263961a3b0e5SAndrew Rybchenko 				"Port %d can not be set back to stopped\n", pi);
264061a3b0e5SAndrew Rybchenko 		fprintf(stderr, "Fail to configure port %d hairpin queues\n",
264161a3b0e5SAndrew Rybchenko 			pi);
26421c69df45SOri Kam 		/* try to reconfigure queues next time */
26431c69df45SOri Kam 		port->need_reconfig_queues = 1;
26441c69df45SOri Kam 		return -1;
26451c69df45SOri Kam 	}
26461c69df45SOri Kam 	return 0;
26471c69df45SOri Kam }
26481c69df45SOri Kam 
26492befc67fSViacheslav Ovsiienko /* Configure the Rx with optional split. */
26502befc67fSViacheslav Ovsiienko int
26512befc67fSViacheslav Ovsiienko rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
26522befc67fSViacheslav Ovsiienko 	       uint16_t nb_rx_desc, unsigned int socket_id,
26532befc67fSViacheslav Ovsiienko 	       struct rte_eth_rxconf *rx_conf, struct rte_mempool *mp)
26542befc67fSViacheslav Ovsiienko {
26552befc67fSViacheslav Ovsiienko 	union rte_eth_rxseg rx_useg[MAX_SEGS_BUFFER_SPLIT] = {};
26562befc67fSViacheslav Ovsiienko 	unsigned int i, mp_n;
26572befc67fSViacheslav Ovsiienko 	int ret;
26582befc67fSViacheslav Ovsiienko 
26592befc67fSViacheslav Ovsiienko 	if (rx_pkt_nb_segs <= 1 ||
26602befc67fSViacheslav Ovsiienko 	    (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) == 0) {
26612befc67fSViacheslav Ovsiienko 		rx_conf->rx_seg = NULL;
26622befc67fSViacheslav Ovsiienko 		rx_conf->rx_nseg = 0;
26632befc67fSViacheslav Ovsiienko 		ret = rte_eth_rx_queue_setup(port_id, rx_queue_id,
26642befc67fSViacheslav Ovsiienko 					     nb_rx_desc, socket_id,
26652befc67fSViacheslav Ovsiienko 					     rx_conf, mp);
26663c4426dbSDmitry Kozlyuk 		goto exit;
26672befc67fSViacheslav Ovsiienko 	}
26682befc67fSViacheslav Ovsiienko 	for (i = 0; i < rx_pkt_nb_segs; i++) {
26692befc67fSViacheslav Ovsiienko 		struct rte_eth_rxseg_split *rx_seg = &rx_useg[i].split;
26702befc67fSViacheslav Ovsiienko 		struct rte_mempool *mpx;
26712befc67fSViacheslav Ovsiienko 		/*
26722befc67fSViacheslav Ovsiienko 		 * Use last valid pool for the segments with number
26732befc67fSViacheslav Ovsiienko 		 * exceeding the pool index.
26742befc67fSViacheslav Ovsiienko 		 */
26751108c33eSRaja Zidane 		mp_n = (i >= mbuf_data_size_n) ? mbuf_data_size_n - 1 : i;
26762befc67fSViacheslav Ovsiienko 		mpx = mbuf_pool_find(socket_id, mp_n);
26772befc67fSViacheslav Ovsiienko 		/* Handle zero as mbuf data buffer size. */
26782befc67fSViacheslav Ovsiienko 		rx_seg->length = rx_pkt_seg_lengths[i] ?
26792befc67fSViacheslav Ovsiienko 				   rx_pkt_seg_lengths[i] :
26802befc67fSViacheslav Ovsiienko 				   mbuf_data_size[mp_n];
26812befc67fSViacheslav Ovsiienko 		rx_seg->offset = i < rx_pkt_nb_offs ?
26822befc67fSViacheslav Ovsiienko 				   rx_pkt_seg_offsets[i] : 0;
26832befc67fSViacheslav Ovsiienko 		rx_seg->mp = mpx ? mpx : mp;
26842befc67fSViacheslav Ovsiienko 	}
26852befc67fSViacheslav Ovsiienko 	rx_conf->rx_nseg = rx_pkt_nb_segs;
26862befc67fSViacheslav Ovsiienko 	rx_conf->rx_seg = rx_useg;
26872befc67fSViacheslav Ovsiienko 	ret = rte_eth_rx_queue_setup(port_id, rx_queue_id, nb_rx_desc,
26882befc67fSViacheslav Ovsiienko 				    socket_id, rx_conf, NULL);
26892befc67fSViacheslav Ovsiienko 	rx_conf->rx_seg = NULL;
26902befc67fSViacheslav Ovsiienko 	rx_conf->rx_nseg = 0;
26913c4426dbSDmitry Kozlyuk exit:
26923c4426dbSDmitry Kozlyuk 	ports[port_id].rxq[rx_queue_id].state = rx_conf->rx_deferred_start ?
26933c4426dbSDmitry Kozlyuk 						RTE_ETH_QUEUE_STATE_STOPPED :
26943c4426dbSDmitry Kozlyuk 						RTE_ETH_QUEUE_STATE_STARTED;
26952befc67fSViacheslav Ovsiienko 	return ret;
26962befc67fSViacheslav Ovsiienko }
26972befc67fSViacheslav Ovsiienko 
269863b72657SIvan Ilchenko static int
269963b72657SIvan Ilchenko alloc_xstats_display_info(portid_t pi)
270063b72657SIvan Ilchenko {
270163b72657SIvan Ilchenko 	uint64_t **ids_supp = &ports[pi].xstats_info.ids_supp;
270263b72657SIvan Ilchenko 	uint64_t **prev_values = &ports[pi].xstats_info.prev_values;
270363b72657SIvan Ilchenko 	uint64_t **curr_values = &ports[pi].xstats_info.curr_values;
270463b72657SIvan Ilchenko 
270563b72657SIvan Ilchenko 	if (xstats_display_num == 0)
270663b72657SIvan Ilchenko 		return 0;
270763b72657SIvan Ilchenko 
270863b72657SIvan Ilchenko 	*ids_supp = calloc(xstats_display_num, sizeof(**ids_supp));
270963b72657SIvan Ilchenko 	if (*ids_supp == NULL)
271063b72657SIvan Ilchenko 		goto fail_ids_supp;
271163b72657SIvan Ilchenko 
271263b72657SIvan Ilchenko 	*prev_values = calloc(xstats_display_num,
271363b72657SIvan Ilchenko 			      sizeof(**prev_values));
271463b72657SIvan Ilchenko 	if (*prev_values == NULL)
271563b72657SIvan Ilchenko 		goto fail_prev_values;
271663b72657SIvan Ilchenko 
271763b72657SIvan Ilchenko 	*curr_values = calloc(xstats_display_num,
271863b72657SIvan Ilchenko 			      sizeof(**curr_values));
271963b72657SIvan Ilchenko 	if (*curr_values == NULL)
272063b72657SIvan Ilchenko 		goto fail_curr_values;
272163b72657SIvan Ilchenko 
272263b72657SIvan Ilchenko 	ports[pi].xstats_info.allocated = true;
272363b72657SIvan Ilchenko 
272463b72657SIvan Ilchenko 	return 0;
272563b72657SIvan Ilchenko 
272663b72657SIvan Ilchenko fail_curr_values:
272763b72657SIvan Ilchenko 	free(*prev_values);
272863b72657SIvan Ilchenko fail_prev_values:
272963b72657SIvan Ilchenko 	free(*ids_supp);
273063b72657SIvan Ilchenko fail_ids_supp:
273163b72657SIvan Ilchenko 	return -ENOMEM;
273263b72657SIvan Ilchenko }
273363b72657SIvan Ilchenko 
273463b72657SIvan Ilchenko static void
273563b72657SIvan Ilchenko free_xstats_display_info(portid_t pi)
273663b72657SIvan Ilchenko {
273763b72657SIvan Ilchenko 	if (!ports[pi].xstats_info.allocated)
273863b72657SIvan Ilchenko 		return;
273963b72657SIvan Ilchenko 	free(ports[pi].xstats_info.ids_supp);
274063b72657SIvan Ilchenko 	free(ports[pi].xstats_info.prev_values);
274163b72657SIvan Ilchenko 	free(ports[pi].xstats_info.curr_values);
274263b72657SIvan Ilchenko 	ports[pi].xstats_info.allocated = false;
274363b72657SIvan Ilchenko }
274463b72657SIvan Ilchenko 
274563b72657SIvan Ilchenko /** Fill helper structures for specified port to show extended statistics. */
274663b72657SIvan Ilchenko static void
274763b72657SIvan Ilchenko fill_xstats_display_info_for_port(portid_t pi)
274863b72657SIvan Ilchenko {
274963b72657SIvan Ilchenko 	unsigned int stat, stat_supp;
275063b72657SIvan Ilchenko 	const char *xstat_name;
275163b72657SIvan Ilchenko 	struct rte_port *port;
275263b72657SIvan Ilchenko 	uint64_t *ids_supp;
275363b72657SIvan Ilchenko 	int rc;
275463b72657SIvan Ilchenko 
275563b72657SIvan Ilchenko 	if (xstats_display_num == 0)
275663b72657SIvan Ilchenko 		return;
275763b72657SIvan Ilchenko 
275863b72657SIvan Ilchenko 	if (pi == (portid_t)RTE_PORT_ALL) {
275963b72657SIvan Ilchenko 		fill_xstats_display_info();
276063b72657SIvan Ilchenko 		return;
276163b72657SIvan Ilchenko 	}
276263b72657SIvan Ilchenko 
276363b72657SIvan Ilchenko 	port = &ports[pi];
276463b72657SIvan Ilchenko 	if (port->port_status != RTE_PORT_STARTED)
276563b72657SIvan Ilchenko 		return;
276663b72657SIvan Ilchenko 
276763b72657SIvan Ilchenko 	if (!port->xstats_info.allocated && alloc_xstats_display_info(pi) != 0)
276863b72657SIvan Ilchenko 		rte_exit(EXIT_FAILURE,
276963b72657SIvan Ilchenko 			 "Failed to allocate xstats display memory\n");
277063b72657SIvan Ilchenko 
277163b72657SIvan Ilchenko 	ids_supp = port->xstats_info.ids_supp;
277263b72657SIvan Ilchenko 	for (stat = stat_supp = 0; stat < xstats_display_num; stat++) {
277363b72657SIvan Ilchenko 		xstat_name = xstats_display[stat].name;
277463b72657SIvan Ilchenko 		rc = rte_eth_xstats_get_id_by_name(pi, xstat_name,
277563b72657SIvan Ilchenko 						   ids_supp + stat_supp);
277663b72657SIvan Ilchenko 		if (rc != 0) {
277763b72657SIvan Ilchenko 			fprintf(stderr, "No xstat '%s' on port %u - skip it %u\n",
277863b72657SIvan Ilchenko 				xstat_name, pi, stat);
277963b72657SIvan Ilchenko 			continue;
278063b72657SIvan Ilchenko 		}
278163b72657SIvan Ilchenko 		stat_supp++;
278263b72657SIvan Ilchenko 	}
278363b72657SIvan Ilchenko 
278463b72657SIvan Ilchenko 	port->xstats_info.ids_supp_sz = stat_supp;
278563b72657SIvan Ilchenko }
278663b72657SIvan Ilchenko 
278763b72657SIvan Ilchenko /** Fill helper structures for all ports to show extended statistics. */
278863b72657SIvan Ilchenko static void
278963b72657SIvan Ilchenko fill_xstats_display_info(void)
279063b72657SIvan Ilchenko {
279163b72657SIvan Ilchenko 	portid_t pi;
279263b72657SIvan Ilchenko 
279363b72657SIvan Ilchenko 	if (xstats_display_num == 0)
279463b72657SIvan Ilchenko 		return;
279563b72657SIvan Ilchenko 
279663b72657SIvan Ilchenko 	RTE_ETH_FOREACH_DEV(pi)
279763b72657SIvan Ilchenko 		fill_xstats_display_info_for_port(pi);
279863b72657SIvan Ilchenko }
279963b72657SIvan Ilchenko 
2800edab33b1STetsuya Mukawa int
2801ce8d5614SIntel start_port(portid_t pid)
2802ce8d5614SIntel {
280392d2703eSMichael Qiu 	int diag, need_check_link_status = -1;
2804ce8d5614SIntel 	portid_t pi;
280501817b10SBing Zhao 	portid_t p_pi = RTE_MAX_ETHPORTS;
280601817b10SBing Zhao 	portid_t pl[RTE_MAX_ETHPORTS];
280701817b10SBing Zhao 	portid_t peer_pl[RTE_MAX_ETHPORTS];
280801817b10SBing Zhao 	uint16_t cnt_pi = 0;
280901817b10SBing Zhao 	uint16_t cfg_pi = 0;
281001817b10SBing Zhao 	int peer_pi;
2811ce8d5614SIntel 	queueid_t qi;
2812ce8d5614SIntel 	struct rte_port *port;
28131c69df45SOri Kam 	struct rte_eth_hairpin_cap cap;
2814ce8d5614SIntel 
28154468635fSMichael Qiu 	if (port_id_is_invalid(pid, ENABLED_WARN))
28164468635fSMichael Qiu 		return 0;
28174468635fSMichael Qiu 
28187d89b261SGaetan Rivet 	RTE_ETH_FOREACH_DEV(pi) {
2819edab33b1STetsuya Mukawa 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
2820ce8d5614SIntel 			continue;
2821ce8d5614SIntel 
2822d8c079a5SMin Hu (Connor) 		if (port_is_bonding_slave(pi)) {
2823d8c079a5SMin Hu (Connor) 			fprintf(stderr,
2824d8c079a5SMin Hu (Connor) 				"Please remove port %d from bonded device.\n",
2825d8c079a5SMin Hu (Connor) 				pi);
2826d8c079a5SMin Hu (Connor) 			continue;
2827d8c079a5SMin Hu (Connor) 		}
2828d8c079a5SMin Hu (Connor) 
282992d2703eSMichael Qiu 		need_check_link_status = 0;
2830ce8d5614SIntel 		port = &ports[pi];
2831eac341d3SJoyce Kong 		if (port->port_status == RTE_PORT_STOPPED)
2832eac341d3SJoyce Kong 			port->port_status = RTE_PORT_HANDLING;
2833eac341d3SJoyce Kong 		else {
283461a3b0e5SAndrew Rybchenko 			fprintf(stderr, "Port %d is now not stopped\n", pi);
2835ce8d5614SIntel 			continue;
2836ce8d5614SIntel 		}
2837ce8d5614SIntel 
2838ce8d5614SIntel 		if (port->need_reconfig > 0) {
2839655eae01SJie Wang 			struct rte_eth_conf dev_conf;
2840655eae01SJie Wang 			int k;
2841655eae01SJie Wang 
2842ce8d5614SIntel 			port->need_reconfig = 0;
2843ce8d5614SIntel 
28447ee3e944SVasily Philipov 			if (flow_isolate_all) {
28457ee3e944SVasily Philipov 				int ret = port_flow_isolate(pi, 1);
28467ee3e944SVasily Philipov 				if (ret) {
284761a3b0e5SAndrew Rybchenko 					fprintf(stderr,
284861a3b0e5SAndrew Rybchenko 						"Failed to apply isolated mode on port %d\n",
284961a3b0e5SAndrew Rybchenko 						pi);
28507ee3e944SVasily Philipov 					return -1;
28517ee3e944SVasily Philipov 				}
28527ee3e944SVasily Philipov 			}
2853b5b38ed8SRaslan Darawsheh 			configure_rxtx_dump_callbacks(0);
28545706de65SJulien Cretin 			printf("Configuring Port %d (socket %u)\n", pi,
285520a0286fSLiu Xiaofeng 					port->socket_id);
28561c69df45SOri Kam 			if (nb_hairpinq > 0 &&
28571c69df45SOri Kam 			    rte_eth_dev_hairpin_capability_get(pi, &cap)) {
285861a3b0e5SAndrew Rybchenko 				fprintf(stderr,
285961a3b0e5SAndrew Rybchenko 					"Port %d doesn't support hairpin queues\n",
286061a3b0e5SAndrew Rybchenko 					pi);
28611c69df45SOri Kam 				return -1;
28621c69df45SOri Kam 			}
28631bb4a528SFerruh Yigit 
2864ce8d5614SIntel 			/* configure port */
2865a550baf2SMin Hu (Connor) 			diag = eth_dev_configure_mp(pi, nb_rxq + nb_hairpinq,
28661c69df45SOri Kam 						     nb_txq + nb_hairpinq,
2867ce8d5614SIntel 						     &(port->dev_conf));
2868ce8d5614SIntel 			if (diag != 0) {
2869eac341d3SJoyce Kong 				if (port->port_status == RTE_PORT_HANDLING)
2870eac341d3SJoyce Kong 					port->port_status = RTE_PORT_STOPPED;
2871eac341d3SJoyce Kong 				else
287261a3b0e5SAndrew Rybchenko 					fprintf(stderr,
287361a3b0e5SAndrew Rybchenko 						"Port %d can not be set back to stopped\n",
287461a3b0e5SAndrew Rybchenko 						pi);
287561a3b0e5SAndrew Rybchenko 				fprintf(stderr, "Fail to configure port %d\n",
287661a3b0e5SAndrew Rybchenko 					pi);
2877ce8d5614SIntel 				/* try to reconfigure port next time */
2878ce8d5614SIntel 				port->need_reconfig = 1;
2879148f963fSBruce Richardson 				return -1;
2880ce8d5614SIntel 			}
2881655eae01SJie Wang 			/* get device configuration*/
2882655eae01SJie Wang 			if (0 !=
2883655eae01SJie Wang 				eth_dev_conf_get_print_err(pi, &dev_conf)) {
2884655eae01SJie Wang 				fprintf(stderr,
2885655eae01SJie Wang 					"port %d can not get device configuration\n",
2886655eae01SJie Wang 					pi);
2887655eae01SJie Wang 				return -1;
2888655eae01SJie Wang 			}
2889655eae01SJie Wang 			/* Apply Rx offloads configuration */
2890655eae01SJie Wang 			if (dev_conf.rxmode.offloads !=
2891655eae01SJie Wang 			    port->dev_conf.rxmode.offloads) {
2892655eae01SJie Wang 				port->dev_conf.rxmode.offloads |=
2893655eae01SJie Wang 					dev_conf.rxmode.offloads;
2894655eae01SJie Wang 				for (k = 0;
2895655eae01SJie Wang 				     k < port->dev_info.max_rx_queues;
2896655eae01SJie Wang 				     k++)
28973c4426dbSDmitry Kozlyuk 					port->rxq[k].conf.offloads |=
2898655eae01SJie Wang 						dev_conf.rxmode.offloads;
2899655eae01SJie Wang 			}
2900655eae01SJie Wang 			/* Apply Tx offloads configuration */
2901655eae01SJie Wang 			if (dev_conf.txmode.offloads !=
2902655eae01SJie Wang 			    port->dev_conf.txmode.offloads) {
2903655eae01SJie Wang 				port->dev_conf.txmode.offloads |=
2904655eae01SJie Wang 					dev_conf.txmode.offloads;
2905655eae01SJie Wang 				for (k = 0;
2906655eae01SJie Wang 				     k < port->dev_info.max_tx_queues;
2907655eae01SJie Wang 				     k++)
29083c4426dbSDmitry Kozlyuk 					port->txq[k].conf.offloads |=
2909655eae01SJie Wang 						dev_conf.txmode.offloads;
2910655eae01SJie Wang 			}
2911ce8d5614SIntel 		}
2912a550baf2SMin Hu (Connor) 		if (port->need_reconfig_queues > 0 && is_proc_primary()) {
2913ce8d5614SIntel 			port->need_reconfig_queues = 0;
2914ce8d5614SIntel 			/* setup tx queues */
2915ce8d5614SIntel 			for (qi = 0; qi < nb_txq; qi++) {
29163c4426dbSDmitry Kozlyuk 				struct rte_eth_txconf *conf =
29173c4426dbSDmitry Kozlyuk 							&port->txq[qi].conf;
29183c4426dbSDmitry Kozlyuk 
2919b6ea6408SIntel 				if ((numa_support) &&
2920b6ea6408SIntel 					(txring_numa[pi] != NUMA_NO_CONFIG))
2921b6ea6408SIntel 					diag = rte_eth_tx_queue_setup(pi, qi,
2922d44f8a48SQi Zhang 						port->nb_tx_desc[qi],
2923d44f8a48SQi Zhang 						txring_numa[pi],
29243c4426dbSDmitry Kozlyuk 						&(port->txq[qi].conf));
2925b6ea6408SIntel 				else
2926b6ea6408SIntel 					diag = rte_eth_tx_queue_setup(pi, qi,
2927d44f8a48SQi Zhang 						port->nb_tx_desc[qi],
2928d44f8a48SQi Zhang 						port->socket_id,
29293c4426dbSDmitry Kozlyuk 						&(port->txq[qi].conf));
2930b6ea6408SIntel 
29313c4426dbSDmitry Kozlyuk 				if (diag == 0) {
29323c4426dbSDmitry Kozlyuk 					port->txq[qi].state =
29333c4426dbSDmitry Kozlyuk 						conf->tx_deferred_start ?
29343c4426dbSDmitry Kozlyuk 						RTE_ETH_QUEUE_STATE_STOPPED :
29353c4426dbSDmitry Kozlyuk 						RTE_ETH_QUEUE_STATE_STARTED;
2936ce8d5614SIntel 					continue;
29373c4426dbSDmitry Kozlyuk 				}
2938ce8d5614SIntel 
2939ce8d5614SIntel 				/* Fail to setup tx queue, return */
2940eac341d3SJoyce Kong 				if (port->port_status == RTE_PORT_HANDLING)
2941eac341d3SJoyce Kong 					port->port_status = RTE_PORT_STOPPED;
2942eac341d3SJoyce Kong 				else
294361a3b0e5SAndrew Rybchenko 					fprintf(stderr,
294461a3b0e5SAndrew Rybchenko 						"Port %d can not be set back to stopped\n",
294561a3b0e5SAndrew Rybchenko 						pi);
294661a3b0e5SAndrew Rybchenko 				fprintf(stderr,
294761a3b0e5SAndrew Rybchenko 					"Fail to configure port %d tx queues\n",
2948d44f8a48SQi Zhang 					pi);
2949ce8d5614SIntel 				/* try to reconfigure queues next time */
2950ce8d5614SIntel 				port->need_reconfig_queues = 1;
2951148f963fSBruce Richardson 				return -1;
2952ce8d5614SIntel 			}
2953ce8d5614SIntel 			for (qi = 0; qi < nb_rxq; qi++) {
2954d44f8a48SQi Zhang 				/* setup rx queues */
2955b6ea6408SIntel 				if ((numa_support) &&
2956b6ea6408SIntel 					(rxring_numa[pi] != NUMA_NO_CONFIG)) {
2957b6ea6408SIntel 					struct rte_mempool * mp =
295826cbb419SViacheslav Ovsiienko 						mbuf_pool_find
295926cbb419SViacheslav Ovsiienko 							(rxring_numa[pi], 0);
2960b6ea6408SIntel 					if (mp == NULL) {
296161a3b0e5SAndrew Rybchenko 						fprintf(stderr,
296261a3b0e5SAndrew Rybchenko 							"Failed to setup RX queue: No mempool allocation on the socket %d\n",
2963b6ea6408SIntel 							rxring_numa[pi]);
2964148f963fSBruce Richardson 						return -1;
2965b6ea6408SIntel 					}
2966b6ea6408SIntel 
29672befc67fSViacheslav Ovsiienko 					diag = rx_queue_setup(pi, qi,
2968d4930794SFerruh Yigit 					     port->nb_rx_desc[qi],
2969d44f8a48SQi Zhang 					     rxring_numa[pi],
29703c4426dbSDmitry Kozlyuk 					     &(port->rxq[qi].conf),
2971d44f8a48SQi Zhang 					     mp);
29721e1d6bddSBernard Iremonger 				} else {
29731e1d6bddSBernard Iremonger 					struct rte_mempool *mp =
297426cbb419SViacheslav Ovsiienko 						mbuf_pool_find
297526cbb419SViacheslav Ovsiienko 							(port->socket_id, 0);
29761e1d6bddSBernard Iremonger 					if (mp == NULL) {
297761a3b0e5SAndrew Rybchenko 						fprintf(stderr,
297861a3b0e5SAndrew Rybchenko 							"Failed to setup RX queue: No mempool allocation on the socket %d\n",
29791e1d6bddSBernard Iremonger 							port->socket_id);
29801e1d6bddSBernard Iremonger 						return -1;
2981b6ea6408SIntel 					}
29822befc67fSViacheslav Ovsiienko 					diag = rx_queue_setup(pi, qi,
2983d4930794SFerruh Yigit 					     port->nb_rx_desc[qi],
2984d44f8a48SQi Zhang 					     port->socket_id,
29853c4426dbSDmitry Kozlyuk 					     &(port->rxq[qi].conf),
2986d44f8a48SQi Zhang 					     mp);
29871e1d6bddSBernard Iremonger 				}
2988ce8d5614SIntel 				if (diag == 0)
2989ce8d5614SIntel 					continue;
2990ce8d5614SIntel 
2991ce8d5614SIntel 				/* Fail to setup rx queue, return */
2992eac341d3SJoyce Kong 				if (port->port_status == RTE_PORT_HANDLING)
2993eac341d3SJoyce Kong 					port->port_status = RTE_PORT_STOPPED;
2994eac341d3SJoyce Kong 				else
299561a3b0e5SAndrew Rybchenko 					fprintf(stderr,
299661a3b0e5SAndrew Rybchenko 						"Port %d can not be set back to stopped\n",
299761a3b0e5SAndrew Rybchenko 						pi);
299861a3b0e5SAndrew Rybchenko 				fprintf(stderr,
299961a3b0e5SAndrew Rybchenko 					"Fail to configure port %d rx queues\n",
3000d44f8a48SQi Zhang 					pi);
3001ce8d5614SIntel 				/* try to reconfigure queues next time */
3002ce8d5614SIntel 				port->need_reconfig_queues = 1;
3003148f963fSBruce Richardson 				return -1;
3004ce8d5614SIntel 			}
30051c69df45SOri Kam 			/* setup hairpin queues */
300601817b10SBing Zhao 			if (setup_hairpin_queues(pi, p_pi, cnt_pi) != 0)
30071c69df45SOri Kam 				return -1;
3008ce8d5614SIntel 		}
3009b5b38ed8SRaslan Darawsheh 		configure_rxtx_dump_callbacks(verbose_level);
3010b0a9354aSPavan Nikhilesh 		if (clear_ptypes) {
3011b0a9354aSPavan Nikhilesh 			diag = rte_eth_dev_set_ptypes(pi, RTE_PTYPE_UNKNOWN,
3012b0a9354aSPavan Nikhilesh 					NULL, 0);
3013b0a9354aSPavan Nikhilesh 			if (diag < 0)
301461a3b0e5SAndrew Rybchenko 				fprintf(stderr,
3015b0a9354aSPavan Nikhilesh 					"Port %d: Failed to disable Ptype parsing\n",
3016b0a9354aSPavan Nikhilesh 					pi);
3017b0a9354aSPavan Nikhilesh 		}
3018b0a9354aSPavan Nikhilesh 
301901817b10SBing Zhao 		p_pi = pi;
302001817b10SBing Zhao 		cnt_pi++;
302101817b10SBing Zhao 
3022ce8d5614SIntel 		/* start port */
3023a550baf2SMin Hu (Connor) 		diag = eth_dev_start_mp(pi);
302452f2c6f2SAndrew Rybchenko 		if (diag < 0) {
302561a3b0e5SAndrew Rybchenko 			fprintf(stderr, "Fail to start port %d: %s\n",
302661a3b0e5SAndrew Rybchenko 				pi, rte_strerror(-diag));
3027ce8d5614SIntel 
3028ce8d5614SIntel 			/* Fail to setup rx queue, return */
3029eac341d3SJoyce Kong 			if (port->port_status == RTE_PORT_HANDLING)
3030eac341d3SJoyce Kong 				port->port_status = RTE_PORT_STOPPED;
3031eac341d3SJoyce Kong 			else
303261a3b0e5SAndrew Rybchenko 				fprintf(stderr,
303361a3b0e5SAndrew Rybchenko 					"Port %d can not be set back to stopped\n",
303461a3b0e5SAndrew Rybchenko 					pi);
3035ce8d5614SIntel 			continue;
3036ce8d5614SIntel 		}
3037ce8d5614SIntel 
3038eac341d3SJoyce Kong 		if (port->port_status == RTE_PORT_HANDLING)
3039eac341d3SJoyce Kong 			port->port_status = RTE_PORT_STARTED;
3040eac341d3SJoyce Kong 		else
304161a3b0e5SAndrew Rybchenko 			fprintf(stderr, "Port %d can not be set into started\n",
304261a3b0e5SAndrew Rybchenko 				pi);
3043ce8d5614SIntel 
30445ffc4a2aSYuying Zhang 		if (eth_macaddr_get_print_err(pi, &port->eth_addr) == 0)
3045c2c4f87bSAman Deep Singh 			printf("Port %d: " RTE_ETHER_ADDR_PRT_FMT "\n", pi,
3046a7db3afcSAman Deep Singh 					RTE_ETHER_ADDR_BYTES(&port->eth_addr));
3047d8c89163SZijie Pan 
3048ce8d5614SIntel 		/* at least one port started, need checking link status */
3049ce8d5614SIntel 		need_check_link_status = 1;
305001817b10SBing Zhao 
305101817b10SBing Zhao 		pl[cfg_pi++] = pi;
3052ce8d5614SIntel 	}
3053ce8d5614SIntel 
305492d2703eSMichael Qiu 	if (need_check_link_status == 1 && !no_link_check)
3055edab33b1STetsuya Mukawa 		check_all_ports_link_status(RTE_PORT_ALL);
305692d2703eSMichael Qiu 	else if (need_check_link_status == 0)
305761a3b0e5SAndrew Rybchenko 		fprintf(stderr, "Please stop the ports first\n");
3058ce8d5614SIntel 
305901817b10SBing Zhao 	if (hairpin_mode & 0xf) {
306001817b10SBing Zhao 		uint16_t i;
306101817b10SBing Zhao 		int j;
306201817b10SBing Zhao 
306301817b10SBing Zhao 		/* bind all started hairpin ports */
306401817b10SBing Zhao 		for (i = 0; i < cfg_pi; i++) {
306501817b10SBing Zhao 			pi = pl[i];
306601817b10SBing Zhao 			/* bind current Tx to all peer Rx */
306701817b10SBing Zhao 			peer_pi = rte_eth_hairpin_get_peer_ports(pi, peer_pl,
306801817b10SBing Zhao 							RTE_MAX_ETHPORTS, 1);
306901817b10SBing Zhao 			if (peer_pi < 0)
307001817b10SBing Zhao 				return peer_pi;
307101817b10SBing Zhao 			for (j = 0; j < peer_pi; j++) {
307201817b10SBing Zhao 				if (!port_is_started(peer_pl[j]))
307301817b10SBing Zhao 					continue;
307401817b10SBing Zhao 				diag = rte_eth_hairpin_bind(pi, peer_pl[j]);
307501817b10SBing Zhao 				if (diag < 0) {
307661a3b0e5SAndrew Rybchenko 					fprintf(stderr,
307761a3b0e5SAndrew Rybchenko 						"Error during binding hairpin Tx port %u to %u: %s\n",
307801817b10SBing Zhao 						pi, peer_pl[j],
307901817b10SBing Zhao 						rte_strerror(-diag));
308001817b10SBing Zhao 					return -1;
308101817b10SBing Zhao 				}
308201817b10SBing Zhao 			}
308301817b10SBing Zhao 			/* bind all peer Tx to current Rx */
308401817b10SBing Zhao 			peer_pi = rte_eth_hairpin_get_peer_ports(pi, peer_pl,
308501817b10SBing Zhao 							RTE_MAX_ETHPORTS, 0);
308601817b10SBing Zhao 			if (peer_pi < 0)
308701817b10SBing Zhao 				return peer_pi;
308801817b10SBing Zhao 			for (j = 0; j < peer_pi; j++) {
308901817b10SBing Zhao 				if (!port_is_started(peer_pl[j]))
309001817b10SBing Zhao 					continue;
309101817b10SBing Zhao 				diag = rte_eth_hairpin_bind(peer_pl[j], pi);
309201817b10SBing Zhao 				if (diag < 0) {
309361a3b0e5SAndrew Rybchenko 					fprintf(stderr,
309461a3b0e5SAndrew Rybchenko 						"Error during binding hairpin Tx port %u to %u: %s\n",
309501817b10SBing Zhao 						peer_pl[j], pi,
309601817b10SBing Zhao 						rte_strerror(-diag));
309701817b10SBing Zhao 					return -1;
309801817b10SBing Zhao 				}
309901817b10SBing Zhao 			}
310001817b10SBing Zhao 		}
310101817b10SBing Zhao 	}
310201817b10SBing Zhao 
310363b72657SIvan Ilchenko 	fill_xstats_display_info_for_port(pid);
310463b72657SIvan Ilchenko 
3105ce8d5614SIntel 	printf("Done\n");
3106148f963fSBruce Richardson 	return 0;
3107ce8d5614SIntel }
3108ce8d5614SIntel 
3109ce8d5614SIntel void
3110ce8d5614SIntel stop_port(portid_t pid)
3111ce8d5614SIntel {
3112ce8d5614SIntel 	portid_t pi;
3113ce8d5614SIntel 	struct rte_port *port;
3114ce8d5614SIntel 	int need_check_link_status = 0;
311501817b10SBing Zhao 	portid_t peer_pl[RTE_MAX_ETHPORTS];
311601817b10SBing Zhao 	int peer_pi;
3117ce8d5614SIntel 
31184468635fSMichael Qiu 	if (port_id_is_invalid(pid, ENABLED_WARN))
31194468635fSMichael Qiu 		return;
31204468635fSMichael Qiu 
3121ce8d5614SIntel 	printf("Stopping ports...\n");
3122ce8d5614SIntel 
31237d89b261SGaetan Rivet 	RTE_ETH_FOREACH_DEV(pi) {
31244468635fSMichael Qiu 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
3125ce8d5614SIntel 			continue;
3126ce8d5614SIntel 
3127a8ef3e3aSBernard Iremonger 		if (port_is_forwarding(pi) != 0 && test_done == 0) {
312861a3b0e5SAndrew Rybchenko 			fprintf(stderr,
312961a3b0e5SAndrew Rybchenko 				"Please remove port %d from forwarding configuration.\n",
313061a3b0e5SAndrew Rybchenko 				pi);
3131a8ef3e3aSBernard Iremonger 			continue;
3132a8ef3e3aSBernard Iremonger 		}
3133a8ef3e3aSBernard Iremonger 
31340e545d30SBernard Iremonger 		if (port_is_bonding_slave(pi)) {
313561a3b0e5SAndrew Rybchenko 			fprintf(stderr,
313661a3b0e5SAndrew Rybchenko 				"Please remove port %d from bonded device.\n",
313761a3b0e5SAndrew Rybchenko 				pi);
31380e545d30SBernard Iremonger 			continue;
31390e545d30SBernard Iremonger 		}
31400e545d30SBernard Iremonger 
3141ce8d5614SIntel 		port = &ports[pi];
3142eac341d3SJoyce Kong 		if (port->port_status == RTE_PORT_STARTED)
3143eac341d3SJoyce Kong 			port->port_status = RTE_PORT_HANDLING;
3144eac341d3SJoyce Kong 		else
3145ce8d5614SIntel 			continue;
3146ce8d5614SIntel 
314701817b10SBing Zhao 		if (hairpin_mode & 0xf) {
314801817b10SBing Zhao 			int j;
314901817b10SBing Zhao 
315001817b10SBing Zhao 			rte_eth_hairpin_unbind(pi, RTE_MAX_ETHPORTS);
315101817b10SBing Zhao 			/* unbind all peer Tx from current Rx */
315201817b10SBing Zhao 			peer_pi = rte_eth_hairpin_get_peer_ports(pi, peer_pl,
315301817b10SBing Zhao 							RTE_MAX_ETHPORTS, 0);
315401817b10SBing Zhao 			if (peer_pi < 0)
315501817b10SBing Zhao 				continue;
315601817b10SBing Zhao 			for (j = 0; j < peer_pi; j++) {
315701817b10SBing Zhao 				if (!port_is_started(peer_pl[j]))
315801817b10SBing Zhao 					continue;
315901817b10SBing Zhao 				rte_eth_hairpin_unbind(peer_pl[j], pi);
316001817b10SBing Zhao 			}
316101817b10SBing Zhao 		}
316201817b10SBing Zhao 
31630f93edbfSGregory Etelson 		if (port->flow_list)
31640f93edbfSGregory Etelson 			port_flow_flush(pi);
31650f93edbfSGregory Etelson 
3166a550baf2SMin Hu (Connor) 		if (eth_dev_stop_mp(pi) != 0)
3167e62c5a12SIvan Ilchenko 			RTE_LOG(ERR, EAL, "rte_eth_dev_stop failed for port %u\n",
3168e62c5a12SIvan Ilchenko 				pi);
3169ce8d5614SIntel 
3170eac341d3SJoyce Kong 		if (port->port_status == RTE_PORT_HANDLING)
3171eac341d3SJoyce Kong 			port->port_status = RTE_PORT_STOPPED;
3172eac341d3SJoyce Kong 		else
317361a3b0e5SAndrew Rybchenko 			fprintf(stderr, "Port %d can not be set into stopped\n",
317461a3b0e5SAndrew Rybchenko 				pi);
3175ce8d5614SIntel 		need_check_link_status = 1;
3176ce8d5614SIntel 	}
3177bc202406SDavid Marchand 	if (need_check_link_status && !no_link_check)
3178edab33b1STetsuya Mukawa 		check_all_ports_link_status(RTE_PORT_ALL);
3179ce8d5614SIntel 
3180ce8d5614SIntel 	printf("Done\n");
3181ce8d5614SIntel }
3182ce8d5614SIntel 
3183ce6959bfSWisam Jaddo static void
31844f1de450SThomas Monjalon remove_invalid_ports_in(portid_t *array, portid_t *total)
3185ce6959bfSWisam Jaddo {
31864f1de450SThomas Monjalon 	portid_t i;
31874f1de450SThomas Monjalon 	portid_t new_total = 0;
3188ce6959bfSWisam Jaddo 
31894f1de450SThomas Monjalon 	for (i = 0; i < *total; i++)
31904f1de450SThomas Monjalon 		if (!port_id_is_invalid(array[i], DISABLED_WARN)) {
31914f1de450SThomas Monjalon 			array[new_total] = array[i];
31924f1de450SThomas Monjalon 			new_total++;
3193ce6959bfSWisam Jaddo 		}
31944f1de450SThomas Monjalon 	*total = new_total;
31954f1de450SThomas Monjalon }
31964f1de450SThomas Monjalon 
31974f1de450SThomas Monjalon static void
31984f1de450SThomas Monjalon remove_invalid_ports(void)
31994f1de450SThomas Monjalon {
32004f1de450SThomas Monjalon 	remove_invalid_ports_in(ports_ids, &nb_ports);
32014f1de450SThomas Monjalon 	remove_invalid_ports_in(fwd_ports_ids, &nb_fwd_ports);
32024f1de450SThomas Monjalon 	nb_cfg_ports = nb_fwd_ports;
3203ce6959bfSWisam Jaddo }
3204ce6959bfSWisam Jaddo 
3205ce8d5614SIntel void
3206ce8d5614SIntel close_port(portid_t pid)
3207ce8d5614SIntel {
3208ce8d5614SIntel 	portid_t pi;
3209ce8d5614SIntel 	struct rte_port *port;
3210ce8d5614SIntel 
32114468635fSMichael Qiu 	if (port_id_is_invalid(pid, ENABLED_WARN))
32124468635fSMichael Qiu 		return;
32134468635fSMichael Qiu 
3214ce8d5614SIntel 	printf("Closing ports...\n");
3215ce8d5614SIntel 
32167d89b261SGaetan Rivet 	RTE_ETH_FOREACH_DEV(pi) {
32174468635fSMichael Qiu 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
3218ce8d5614SIntel 			continue;
3219ce8d5614SIntel 
3220a8ef3e3aSBernard Iremonger 		if (port_is_forwarding(pi) != 0 && test_done == 0) {
322161a3b0e5SAndrew Rybchenko 			fprintf(stderr,
322261a3b0e5SAndrew Rybchenko 				"Please remove port %d from forwarding configuration.\n",
322361a3b0e5SAndrew Rybchenko 				pi);
3224a8ef3e3aSBernard Iremonger 			continue;
3225a8ef3e3aSBernard Iremonger 		}
3226a8ef3e3aSBernard Iremonger 
32270e545d30SBernard Iremonger 		if (port_is_bonding_slave(pi)) {
322861a3b0e5SAndrew Rybchenko 			fprintf(stderr,
322961a3b0e5SAndrew Rybchenko 				"Please remove port %d from bonded device.\n",
323061a3b0e5SAndrew Rybchenko 				pi);
32310e545d30SBernard Iremonger 			continue;
32320e545d30SBernard Iremonger 		}
32330e545d30SBernard Iremonger 
3234ce8d5614SIntel 		port = &ports[pi];
3235eac341d3SJoyce Kong 		if (port->port_status == RTE_PORT_CLOSED) {
323661a3b0e5SAndrew Rybchenko 			fprintf(stderr, "Port %d is already closed\n", pi);
3237d4e8ad64SMichael Qiu 			continue;
3238d4e8ad64SMichael Qiu 		}
3239d4e8ad64SMichael Qiu 
3240a550baf2SMin Hu (Connor) 		if (is_proc_primary()) {
324168629be3SKe Zhang 			mcast_addr_pool_destroy(pi);
3242938a184aSAdrien Mazarguil 			port_flow_flush(pi);
324359f3a8acSGregory Etelson 			port_flex_item_flush(pi);
3244f7352c17SDmitry Kozlyuk 			port_action_handle_flush(pi);
3245ce8d5614SIntel 			rte_eth_dev_close(pi);
3246ce8d5614SIntel 		}
324763b72657SIvan Ilchenko 
324863b72657SIvan Ilchenko 		free_xstats_display_info(pi);
3249a550baf2SMin Hu (Connor) 	}
3250ce8d5614SIntel 
325185c6571cSThomas Monjalon 	remove_invalid_ports();
3252ce8d5614SIntel 	printf("Done\n");
3253ce8d5614SIntel }
3254ce8d5614SIntel 
3255edab33b1STetsuya Mukawa void
325697f1e196SWei Dai reset_port(portid_t pid)
325797f1e196SWei Dai {
325897f1e196SWei Dai 	int diag;
325997f1e196SWei Dai 	portid_t pi;
326097f1e196SWei Dai 	struct rte_port *port;
326197f1e196SWei Dai 
326297f1e196SWei Dai 	if (port_id_is_invalid(pid, ENABLED_WARN))
326397f1e196SWei Dai 		return;
326497f1e196SWei Dai 
32651cde1b9aSShougang Wang 	if ((pid == (portid_t)RTE_PORT_ALL && !all_ports_stopped()) ||
32661cde1b9aSShougang Wang 		(pid != (portid_t)RTE_PORT_ALL && !port_is_stopped(pid))) {
326761a3b0e5SAndrew Rybchenko 		fprintf(stderr,
326861a3b0e5SAndrew Rybchenko 			"Can not reset port(s), please stop port(s) first.\n");
32691cde1b9aSShougang Wang 		return;
32701cde1b9aSShougang Wang 	}
32711cde1b9aSShougang Wang 
327297f1e196SWei Dai 	printf("Resetting ports...\n");
327397f1e196SWei Dai 
327497f1e196SWei Dai 	RTE_ETH_FOREACH_DEV(pi) {
327597f1e196SWei Dai 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
327697f1e196SWei Dai 			continue;
327797f1e196SWei Dai 
327897f1e196SWei Dai 		if (port_is_forwarding(pi) != 0 && test_done == 0) {
327961a3b0e5SAndrew Rybchenko 			fprintf(stderr,
328061a3b0e5SAndrew Rybchenko 				"Please remove port %d from forwarding configuration.\n",
328161a3b0e5SAndrew Rybchenko 				pi);
328297f1e196SWei Dai 			continue;
328397f1e196SWei Dai 		}
328497f1e196SWei Dai 
328597f1e196SWei Dai 		if (port_is_bonding_slave(pi)) {
328661a3b0e5SAndrew Rybchenko 			fprintf(stderr,
328761a3b0e5SAndrew Rybchenko 				"Please remove port %d from bonded device.\n",
328897f1e196SWei Dai 				pi);
328997f1e196SWei Dai 			continue;
329097f1e196SWei Dai 		}
329197f1e196SWei Dai 
329297f1e196SWei Dai 		diag = rte_eth_dev_reset(pi);
329397f1e196SWei Dai 		if (diag == 0) {
329497f1e196SWei Dai 			port = &ports[pi];
329597f1e196SWei Dai 			port->need_reconfig = 1;
329697f1e196SWei Dai 			port->need_reconfig_queues = 1;
329797f1e196SWei Dai 		} else {
329861a3b0e5SAndrew Rybchenko 			fprintf(stderr, "Failed to reset port %d. diag=%d\n",
329961a3b0e5SAndrew Rybchenko 				pi, diag);
330097f1e196SWei Dai 		}
330197f1e196SWei Dai 	}
330297f1e196SWei Dai 
330397f1e196SWei Dai 	printf("Done\n");
330497f1e196SWei Dai }
330597f1e196SWei Dai 
330697f1e196SWei Dai void
3307edab33b1STetsuya Mukawa attach_port(char *identifier)
3308ce8d5614SIntel {
33094f1ed78eSThomas Monjalon 	portid_t pi;
3310c9cce428SThomas Monjalon 	struct rte_dev_iterator iterator;
3311ce8d5614SIntel 
3312edab33b1STetsuya Mukawa 	printf("Attaching a new port...\n");
3313edab33b1STetsuya Mukawa 
3314edab33b1STetsuya Mukawa 	if (identifier == NULL) {
331561a3b0e5SAndrew Rybchenko 		fprintf(stderr, "Invalid parameters are specified\n");
3316edab33b1STetsuya Mukawa 		return;
3317ce8d5614SIntel 	}
3318ce8d5614SIntel 
331975b66decSIlya Maximets 	if (rte_dev_probe(identifier) < 0) {
3320c9cce428SThomas Monjalon 		TESTPMD_LOG(ERR, "Failed to attach port %s\n", identifier);
3321edab33b1STetsuya Mukawa 		return;
3322c9cce428SThomas Monjalon 	}
3323c9cce428SThomas Monjalon 
33244f1ed78eSThomas Monjalon 	/* first attach mode: event */
33254f1ed78eSThomas Monjalon 	if (setup_on_probe_event) {
33264f1ed78eSThomas Monjalon 		/* new ports are detected on RTE_ETH_EVENT_NEW event */
33274f1ed78eSThomas Monjalon 		for (pi = 0; pi < RTE_MAX_ETHPORTS; pi++)
33284f1ed78eSThomas Monjalon 			if (ports[pi].port_status == RTE_PORT_HANDLING &&
33294f1ed78eSThomas Monjalon 					ports[pi].need_setup != 0)
33304f1ed78eSThomas Monjalon 				setup_attached_port(pi);
33314f1ed78eSThomas Monjalon 		return;
33324f1ed78eSThomas Monjalon 	}
33334f1ed78eSThomas Monjalon 
33344f1ed78eSThomas Monjalon 	/* second attach mode: iterator */
333586fa5de1SThomas Monjalon 	RTE_ETH_FOREACH_MATCHING_DEV(pi, identifier, &iterator) {
33364f1ed78eSThomas Monjalon 		/* setup ports matching the devargs used for probing */
333786fa5de1SThomas Monjalon 		if (port_is_forwarding(pi))
333886fa5de1SThomas Monjalon 			continue; /* port was already attached before */
3339c9cce428SThomas Monjalon 		setup_attached_port(pi);
3340c9cce428SThomas Monjalon 	}
334186fa5de1SThomas Monjalon }
3342c9cce428SThomas Monjalon 
3343c9cce428SThomas Monjalon static void
3344c9cce428SThomas Monjalon setup_attached_port(portid_t pi)
3345c9cce428SThomas Monjalon {
3346c9cce428SThomas Monjalon 	unsigned int socket_id;
334734fc1051SIvan Ilchenko 	int ret;
3348edab33b1STetsuya Mukawa 
3349931126baSBernard Iremonger 	socket_id = (unsigned)rte_eth_dev_socket_id(pi);
335029841336SPhil Yang 	/* if socket_id is invalid, set to the first available socket. */
3351931126baSBernard Iremonger 	if (check_socket_id(socket_id) < 0)
335229841336SPhil Yang 		socket_id = socket_ids[0];
3353931126baSBernard Iremonger 	reconfig(pi, socket_id);
335434fc1051SIvan Ilchenko 	ret = rte_eth_promiscuous_enable(pi);
335534fc1051SIvan Ilchenko 	if (ret != 0)
335661a3b0e5SAndrew Rybchenko 		fprintf(stderr,
335761a3b0e5SAndrew Rybchenko 			"Error during enabling promiscuous mode for port %u: %s - ignore\n",
335834fc1051SIvan Ilchenko 			pi, rte_strerror(-ret));
3359edab33b1STetsuya Mukawa 
33604f1de450SThomas Monjalon 	ports_ids[nb_ports++] = pi;
33614f1de450SThomas Monjalon 	fwd_ports_ids[nb_fwd_ports++] = pi;
33624f1de450SThomas Monjalon 	nb_cfg_ports = nb_fwd_ports;
33634f1ed78eSThomas Monjalon 	ports[pi].need_setup = 0;
3364edab33b1STetsuya Mukawa 	ports[pi].port_status = RTE_PORT_STOPPED;
3365edab33b1STetsuya Mukawa 
3366edab33b1STetsuya Mukawa 	printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports);
3367edab33b1STetsuya Mukawa 	printf("Done\n");
3368edab33b1STetsuya Mukawa }
3369edab33b1STetsuya Mukawa 
33700654d4a8SThomas Monjalon static void
33710654d4a8SThomas Monjalon detach_device(struct rte_device *dev)
33725f4ec54fSChen Jing D(Mark) {
3373f8e5baa2SThomas Monjalon 	portid_t sibling;
3374f8e5baa2SThomas Monjalon 
3375f8e5baa2SThomas Monjalon 	if (dev == NULL) {
337661a3b0e5SAndrew Rybchenko 		fprintf(stderr, "Device already removed\n");
3377f8e5baa2SThomas Monjalon 		return;
3378f8e5baa2SThomas Monjalon 	}
3379f8e5baa2SThomas Monjalon 
33800654d4a8SThomas Monjalon 	printf("Removing a device...\n");
3381938a184aSAdrien Mazarguil 
33822a449871SThomas Monjalon 	RTE_ETH_FOREACH_DEV_OF(sibling, dev) {
33832a449871SThomas Monjalon 		if (ports[sibling].port_status != RTE_PORT_CLOSED) {
33842a449871SThomas Monjalon 			if (ports[sibling].port_status != RTE_PORT_STOPPED) {
338561a3b0e5SAndrew Rybchenko 				fprintf(stderr, "Port %u not stopped\n",
338661a3b0e5SAndrew Rybchenko 					sibling);
33872a449871SThomas Monjalon 				return;
33882a449871SThomas Monjalon 			}
33892a449871SThomas Monjalon 			port_flow_flush(sibling);
33902a449871SThomas Monjalon 		}
33912a449871SThomas Monjalon 	}
33922a449871SThomas Monjalon 
339375b66decSIlya Maximets 	if (rte_dev_remove(dev) < 0) {
3394f8e5baa2SThomas Monjalon 		TESTPMD_LOG(ERR, "Failed to detach device %s\n", dev->name);
3395edab33b1STetsuya Mukawa 		return;
33963070419eSGaetan Rivet 	}
33974f1de450SThomas Monjalon 	remove_invalid_ports();
339803ce2c53SMatan Azrad 
33990654d4a8SThomas Monjalon 	printf("Device is detached\n");
3400f8e5baa2SThomas Monjalon 	printf("Now total ports is %d\n", nb_ports);
3401edab33b1STetsuya Mukawa 	printf("Done\n");
3402edab33b1STetsuya Mukawa 	return;
34035f4ec54fSChen Jing D(Mark) }
34045f4ec54fSChen Jing D(Mark) 
3405af75078fSIntel void
34060654d4a8SThomas Monjalon detach_port_device(portid_t port_id)
34070654d4a8SThomas Monjalon {
34080a0821bcSPaulis Gributs 	int ret;
34090a0821bcSPaulis Gributs 	struct rte_eth_dev_info dev_info;
34100a0821bcSPaulis Gributs 
34110654d4a8SThomas Monjalon 	if (port_id_is_invalid(port_id, ENABLED_WARN))
34120654d4a8SThomas Monjalon 		return;
34130654d4a8SThomas Monjalon 
34140654d4a8SThomas Monjalon 	if (ports[port_id].port_status != RTE_PORT_CLOSED) {
34150654d4a8SThomas Monjalon 		if (ports[port_id].port_status != RTE_PORT_STOPPED) {
341661a3b0e5SAndrew Rybchenko 			fprintf(stderr, "Port not stopped\n");
34170654d4a8SThomas Monjalon 			return;
34180654d4a8SThomas Monjalon 		}
341961a3b0e5SAndrew Rybchenko 		fprintf(stderr, "Port was not closed\n");
34200654d4a8SThomas Monjalon 	}
34210654d4a8SThomas Monjalon 
34220a0821bcSPaulis Gributs 	ret = eth_dev_info_get_print_err(port_id, &dev_info);
34230a0821bcSPaulis Gributs 	if (ret != 0) {
34240a0821bcSPaulis Gributs 		TESTPMD_LOG(ERR,
34250a0821bcSPaulis Gributs 			"Failed to get device info for port %d, not detaching\n",
34260a0821bcSPaulis Gributs 			port_id);
34270a0821bcSPaulis Gributs 		return;
34280a0821bcSPaulis Gributs 	}
34290a0821bcSPaulis Gributs 	detach_device(dev_info.device);
34300654d4a8SThomas Monjalon }
34310654d4a8SThomas Monjalon 
34320654d4a8SThomas Monjalon void
34335edee5f6SThomas Monjalon detach_devargs(char *identifier)
343455e51c96SNithin Dabilpuram {
343555e51c96SNithin Dabilpuram 	struct rte_dev_iterator iterator;
343655e51c96SNithin Dabilpuram 	struct rte_devargs da;
343755e51c96SNithin Dabilpuram 	portid_t port_id;
343855e51c96SNithin Dabilpuram 
343955e51c96SNithin Dabilpuram 	printf("Removing a device...\n");
344055e51c96SNithin Dabilpuram 
344155e51c96SNithin Dabilpuram 	memset(&da, 0, sizeof(da));
344255e51c96SNithin Dabilpuram 	if (rte_devargs_parsef(&da, "%s", identifier)) {
344361a3b0e5SAndrew Rybchenko 		fprintf(stderr, "cannot parse identifier\n");
344455e51c96SNithin Dabilpuram 		return;
344555e51c96SNithin Dabilpuram 	}
344655e51c96SNithin Dabilpuram 
344755e51c96SNithin Dabilpuram 	RTE_ETH_FOREACH_MATCHING_DEV(port_id, identifier, &iterator) {
344855e51c96SNithin Dabilpuram 		if (ports[port_id].port_status != RTE_PORT_CLOSED) {
344955e51c96SNithin Dabilpuram 			if (ports[port_id].port_status != RTE_PORT_STOPPED) {
345061a3b0e5SAndrew Rybchenko 				fprintf(stderr, "Port %u not stopped\n",
345161a3b0e5SAndrew Rybchenko 					port_id);
3452149677c9SStephen Hemminger 				rte_eth_iterator_cleanup(&iterator);
345364051bb1SXueming Li 				rte_devargs_reset(&da);
345455e51c96SNithin Dabilpuram 				return;
345555e51c96SNithin Dabilpuram 			}
345655e51c96SNithin Dabilpuram 			port_flow_flush(port_id);
345755e51c96SNithin Dabilpuram 		}
345855e51c96SNithin Dabilpuram 	}
345955e51c96SNithin Dabilpuram 
346055e51c96SNithin Dabilpuram 	if (rte_eal_hotplug_remove(da.bus->name, da.name) != 0) {
346155e51c96SNithin Dabilpuram 		TESTPMD_LOG(ERR, "Failed to detach device %s(%s)\n",
346255e51c96SNithin Dabilpuram 			    da.name, da.bus->name);
346364051bb1SXueming Li 		rte_devargs_reset(&da);
346455e51c96SNithin Dabilpuram 		return;
346555e51c96SNithin Dabilpuram 	}
346655e51c96SNithin Dabilpuram 
346755e51c96SNithin Dabilpuram 	remove_invalid_ports();
346855e51c96SNithin Dabilpuram 
346955e51c96SNithin Dabilpuram 	printf("Device %s is detached\n", identifier);
347055e51c96SNithin Dabilpuram 	printf("Now total ports is %d\n", nb_ports);
347155e51c96SNithin Dabilpuram 	printf("Done\n");
347264051bb1SXueming Li 	rte_devargs_reset(&da);
347355e51c96SNithin Dabilpuram }
347455e51c96SNithin Dabilpuram 
347555e51c96SNithin Dabilpuram void
3476af75078fSIntel pmd_test_exit(void)
3477af75078fSIntel {
3478af75078fSIntel 	portid_t pt_id;
347926cbb419SViacheslav Ovsiienko 	unsigned int i;
3480fb73e096SJeff Guo 	int ret;
3481af75078fSIntel 
34828210ec25SPablo de Lara 	if (test_done == 0)
34838210ec25SPablo de Lara 		stop_packet_forwarding();
34848210ec25SPablo de Lara 
3485761f7ae1SJie Zhou #ifndef RTE_EXEC_ENV_WINDOWS
348626cbb419SViacheslav Ovsiienko 	for (i = 0 ; i < RTE_DIM(mempools) ; i++) {
34873a0968c8SShahaf Shuler 		if (mempools[i]) {
34883a0968c8SShahaf Shuler 			if (mp_alloc_type == MP_ALLOC_ANON)
34893a0968c8SShahaf Shuler 				rte_mempool_mem_iter(mempools[i], dma_unmap_cb,
34903a0968c8SShahaf Shuler 						     NULL);
34913a0968c8SShahaf Shuler 		}
34923a0968c8SShahaf Shuler 	}
3493761f7ae1SJie Zhou #endif
3494d3a274ceSZhihong Wang 	if (ports != NULL) {
3495d3a274ceSZhihong Wang 		no_link_check = 1;
34967d89b261SGaetan Rivet 		RTE_ETH_FOREACH_DEV(pt_id) {
349708fd782bSCristian Dumitrescu 			printf("\nStopping port %d...\n", pt_id);
3498af75078fSIntel 			fflush(stdout);
3499d3a274ceSZhihong Wang 			stop_port(pt_id);
350008fd782bSCristian Dumitrescu 		}
350108fd782bSCristian Dumitrescu 		RTE_ETH_FOREACH_DEV(pt_id) {
350208fd782bSCristian Dumitrescu 			printf("\nShutting down port %d...\n", pt_id);
350308fd782bSCristian Dumitrescu 			fflush(stdout);
3504d3a274ceSZhihong Wang 			close_port(pt_id);
3505af75078fSIntel 		}
3506d3a274ceSZhihong Wang 	}
3507fb73e096SJeff Guo 
3508fb73e096SJeff Guo 	if (hot_plug) {
3509fb73e096SJeff Guo 		ret = rte_dev_event_monitor_stop();
35102049c511SJeff Guo 		if (ret) {
3511fb73e096SJeff Guo 			RTE_LOG(ERR, EAL,
3512fb73e096SJeff Guo 				"fail to stop device event monitor.");
35132049c511SJeff Guo 			return;
35142049c511SJeff Guo 		}
3515fb73e096SJeff Guo 
35162049c511SJeff Guo 		ret = rte_dev_event_callback_unregister(NULL,
3517cc1bf307SJeff Guo 			dev_event_callback, NULL);
35182049c511SJeff Guo 		if (ret < 0) {
3519fb73e096SJeff Guo 			RTE_LOG(ERR, EAL,
35202049c511SJeff Guo 				"fail to unregister device event callback.\n");
35212049c511SJeff Guo 			return;
35222049c511SJeff Guo 		}
35232049c511SJeff Guo 
35242049c511SJeff Guo 		ret = rte_dev_hotplug_handle_disable();
35252049c511SJeff Guo 		if (ret) {
35262049c511SJeff Guo 			RTE_LOG(ERR, EAL,
35272049c511SJeff Guo 				"fail to disable hotplug handling.\n");
35282049c511SJeff Guo 			return;
35292049c511SJeff Guo 		}
3530fb73e096SJeff Guo 	}
353126cbb419SViacheslav Ovsiienko 	for (i = 0 ; i < RTE_DIM(mempools) ; i++) {
3532401b744dSShahaf Shuler 		if (mempools[i])
3533a550baf2SMin Hu (Connor) 			mempool_free_mp(mempools[i]);
3534401b744dSShahaf Shuler 	}
353563b72657SIvan Ilchenko 	free(xstats_display);
3536fb73e096SJeff Guo 
3537d3a274ceSZhihong Wang 	printf("\nBye...\n");
3538af75078fSIntel }
3539af75078fSIntel 
3540af75078fSIntel typedef void (*cmd_func_t)(void);
3541af75078fSIntel struct pmd_test_command {
3542af75078fSIntel 	const char *cmd_name;
3543af75078fSIntel 	cmd_func_t cmd_func;
3544af75078fSIntel };
3545af75078fSIntel 
3546ce8d5614SIntel /* Check the link status of all ports in up to 9s, and print them finally */
3547af75078fSIntel static void
3548edab33b1STetsuya Mukawa check_all_ports_link_status(uint32_t port_mask)
3549af75078fSIntel {
3550ce8d5614SIntel #define CHECK_INTERVAL 100 /* 100ms */
3551ce8d5614SIntel #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
3552f8244c63SZhiyong Yang 	portid_t portid;
3553f8244c63SZhiyong Yang 	uint8_t count, all_ports_up, print_flag = 0;
3554ce8d5614SIntel 	struct rte_eth_link link;
3555e661a08bSIgor Romanov 	int ret;
3556ba5509a6SIvan Dyukov 	char link_status[RTE_ETH_LINK_MAX_STR_LEN];
3557ce8d5614SIntel 
3558ce8d5614SIntel 	printf("Checking link statuses...\n");
3559ce8d5614SIntel 	fflush(stdout);
3560ce8d5614SIntel 	for (count = 0; count <= MAX_CHECK_TIME; count++) {
3561ce8d5614SIntel 		all_ports_up = 1;
35627d89b261SGaetan Rivet 		RTE_ETH_FOREACH_DEV(portid) {
3563ce8d5614SIntel 			if ((port_mask & (1 << portid)) == 0)
3564ce8d5614SIntel 				continue;
3565ce8d5614SIntel 			memset(&link, 0, sizeof(link));
3566e661a08bSIgor Romanov 			ret = rte_eth_link_get_nowait(portid, &link);
3567e661a08bSIgor Romanov 			if (ret < 0) {
3568e661a08bSIgor Romanov 				all_ports_up = 0;
3569e661a08bSIgor Romanov 				if (print_flag == 1)
357061a3b0e5SAndrew Rybchenko 					fprintf(stderr,
357161a3b0e5SAndrew Rybchenko 						"Port %u link get failed: %s\n",
3572e661a08bSIgor Romanov 						portid, rte_strerror(-ret));
3573e661a08bSIgor Romanov 				continue;
3574e661a08bSIgor Romanov 			}
3575ce8d5614SIntel 			/* print link status if flag set */
3576ce8d5614SIntel 			if (print_flag == 1) {
3577ba5509a6SIvan Dyukov 				rte_eth_link_to_str(link_status,
3578ba5509a6SIvan Dyukov 					sizeof(link_status), &link);
3579ba5509a6SIvan Dyukov 				printf("Port %d %s\n", portid, link_status);
3580ce8d5614SIntel 				continue;
3581ce8d5614SIntel 			}
3582ce8d5614SIntel 			/* clear all_ports_up flag if any link down */
3583295968d1SFerruh Yigit 			if (link.link_status == RTE_ETH_LINK_DOWN) {
3584ce8d5614SIntel 				all_ports_up = 0;
3585ce8d5614SIntel 				break;
3586ce8d5614SIntel 			}
3587ce8d5614SIntel 		}
3588ce8d5614SIntel 		/* after finally printing all link status, get out */
3589ce8d5614SIntel 		if (print_flag == 1)
3590ce8d5614SIntel 			break;
3591ce8d5614SIntel 
3592ce8d5614SIntel 		if (all_ports_up == 0) {
3593ce8d5614SIntel 			fflush(stdout);
3594ce8d5614SIntel 			rte_delay_ms(CHECK_INTERVAL);
3595ce8d5614SIntel 		}
3596ce8d5614SIntel 
3597ce8d5614SIntel 		/* set the print_flag if all ports up or timeout */
3598ce8d5614SIntel 		if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
3599ce8d5614SIntel 			print_flag = 1;
3600ce8d5614SIntel 		}
36018ea656f8SGaetan Rivet 
36028ea656f8SGaetan Rivet 		if (lsc_interrupt)
36038ea656f8SGaetan Rivet 			break;
3604ce8d5614SIntel 	}
3605af75078fSIntel }
3606af75078fSIntel 
3607284c908cSGaetan Rivet static void
3608cc1bf307SJeff Guo rmv_port_callback(void *arg)
3609284c908cSGaetan Rivet {
36103b97888aSMatan Azrad 	int need_to_start = 0;
36110da2a62bSMatan Azrad 	int org_no_link_check = no_link_check;
361228caa76aSZhiyong Yang 	portid_t port_id = (intptr_t)arg;
36130a0821bcSPaulis Gributs 	struct rte_eth_dev_info dev_info;
36140a0821bcSPaulis Gributs 	int ret;
3615284c908cSGaetan Rivet 
3616284c908cSGaetan Rivet 	RTE_ETH_VALID_PORTID_OR_RET(port_id);
3617284c908cSGaetan Rivet 
36183b97888aSMatan Azrad 	if (!test_done && port_is_forwarding(port_id)) {
36193b97888aSMatan Azrad 		need_to_start = 1;
36203b97888aSMatan Azrad 		stop_packet_forwarding();
36213b97888aSMatan Azrad 	}
36220da2a62bSMatan Azrad 	no_link_check = 1;
3623284c908cSGaetan Rivet 	stop_port(port_id);
36240da2a62bSMatan Azrad 	no_link_check = org_no_link_check;
36250654d4a8SThomas Monjalon 
36260a0821bcSPaulis Gributs 	ret = eth_dev_info_get_print_err(port_id, &dev_info);
36270a0821bcSPaulis Gributs 	if (ret != 0)
36280a0821bcSPaulis Gributs 		TESTPMD_LOG(ERR,
36290a0821bcSPaulis Gributs 			"Failed to get device info for port %d, not detaching\n",
36300a0821bcSPaulis Gributs 			port_id);
3631e1d38504SPaulis Gributs 	else {
3632e1d38504SPaulis Gributs 		struct rte_device *device = dev_info.device;
3633e1d38504SPaulis Gributs 		close_port(port_id);
3634e1d38504SPaulis Gributs 		detach_device(device); /* might be already removed or have more ports */
3635e1d38504SPaulis Gributs 	}
36363b97888aSMatan Azrad 	if (need_to_start)
36373b97888aSMatan Azrad 		start_packet_forwarding(0);
3638284c908cSGaetan Rivet }
3639284c908cSGaetan Rivet 
364076ad4a2dSGaetan Rivet /* This function is used by the interrupt thread */
3641d6af1a13SBernard Iremonger static int
3642f8244c63SZhiyong Yang eth_event_callback(portid_t port_id, enum rte_eth_event_type type, void *param,
3643d6af1a13SBernard Iremonger 		  void *ret_param)
364476ad4a2dSGaetan Rivet {
364576ad4a2dSGaetan Rivet 	RTE_SET_USED(param);
3646d6af1a13SBernard Iremonger 	RTE_SET_USED(ret_param);
364776ad4a2dSGaetan Rivet 
364876ad4a2dSGaetan Rivet 	if (type >= RTE_ETH_EVENT_MAX) {
364961a3b0e5SAndrew Rybchenko 		fprintf(stderr,
365061a3b0e5SAndrew Rybchenko 			"\nPort %" PRIu16 ": %s called upon invalid event %d\n",
365176ad4a2dSGaetan Rivet 			port_id, __func__, type);
365276ad4a2dSGaetan Rivet 		fflush(stderr);
36533af72783SGaetan Rivet 	} else if (event_print_mask & (UINT32_C(1) << type)) {
3654f431e010SHerakliusz Lipiec 		printf("\nPort %" PRIu16 ": %s event\n", port_id,
365597b5d8b5SThomas Monjalon 			eth_event_desc[type]);
365676ad4a2dSGaetan Rivet 		fflush(stdout);
365776ad4a2dSGaetan Rivet 	}
3658284c908cSGaetan Rivet 
3659284c908cSGaetan Rivet 	switch (type) {
36604f1ed78eSThomas Monjalon 	case RTE_ETH_EVENT_NEW:
36614f1ed78eSThomas Monjalon 		ports[port_id].need_setup = 1;
36624f1ed78eSThomas Monjalon 		ports[port_id].port_status = RTE_PORT_HANDLING;
36634f1ed78eSThomas Monjalon 		break;
3664284c908cSGaetan Rivet 	case RTE_ETH_EVENT_INTR_RMV:
36654f1ed78eSThomas Monjalon 		if (port_id_is_invalid(port_id, DISABLED_WARN))
36664f1ed78eSThomas Monjalon 			break;
3667284c908cSGaetan Rivet 		if (rte_eal_alarm_set(100000,
3668cc1bf307SJeff Guo 				rmv_port_callback, (void *)(intptr_t)port_id))
366961a3b0e5SAndrew Rybchenko 			fprintf(stderr,
367061a3b0e5SAndrew Rybchenko 				"Could not set up deferred device removal\n");
3671284c908cSGaetan Rivet 		break;
367285c6571cSThomas Monjalon 	case RTE_ETH_EVENT_DESTROY:
367385c6571cSThomas Monjalon 		ports[port_id].port_status = RTE_PORT_CLOSED;
367485c6571cSThomas Monjalon 		printf("Port %u is closed\n", port_id);
367585c6571cSThomas Monjalon 		break;
3676*bc70e559SSpike Du 	case RTE_ETH_EVENT_RX_AVAIL_THRESH: {
3677*bc70e559SSpike Du 		uint16_t rxq_id;
3678*bc70e559SSpike Du 		int ret;
3679*bc70e559SSpike Du 
3680*bc70e559SSpike Du 		/* avail_thresh query API rewinds rxq_id, no need to check max RxQ num */
3681*bc70e559SSpike Du 		for (rxq_id = 0; ; rxq_id++) {
3682*bc70e559SSpike Du 			ret = rte_eth_rx_avail_thresh_query(port_id, &rxq_id,
3683*bc70e559SSpike Du 							    NULL);
3684*bc70e559SSpike Du 			if (ret <= 0)
3685*bc70e559SSpike Du 				break;
3686*bc70e559SSpike Du 			printf("Received avail_thresh event, port: %u, rxq_id: %u\n",
3687*bc70e559SSpike Du 			       port_id, rxq_id);
3688*bc70e559SSpike Du 		}
3689*bc70e559SSpike Du 		break;
3690*bc70e559SSpike Du 	}
3691284c908cSGaetan Rivet 	default:
3692284c908cSGaetan Rivet 		break;
3693284c908cSGaetan Rivet 	}
3694d6af1a13SBernard Iremonger 	return 0;
369576ad4a2dSGaetan Rivet }
369676ad4a2dSGaetan Rivet 
369797b5d8b5SThomas Monjalon static int
369897b5d8b5SThomas Monjalon register_eth_event_callback(void)
369997b5d8b5SThomas Monjalon {
370097b5d8b5SThomas Monjalon 	int ret;
370197b5d8b5SThomas Monjalon 	enum rte_eth_event_type event;
370297b5d8b5SThomas Monjalon 
370397b5d8b5SThomas Monjalon 	for (event = RTE_ETH_EVENT_UNKNOWN;
370497b5d8b5SThomas Monjalon 			event < RTE_ETH_EVENT_MAX; event++) {
370597b5d8b5SThomas Monjalon 		ret = rte_eth_dev_callback_register(RTE_ETH_ALL,
370697b5d8b5SThomas Monjalon 				event,
370797b5d8b5SThomas Monjalon 				eth_event_callback,
370897b5d8b5SThomas Monjalon 				NULL);
370997b5d8b5SThomas Monjalon 		if (ret != 0) {
371097b5d8b5SThomas Monjalon 			TESTPMD_LOG(ERR, "Failed to register callback for "
371197b5d8b5SThomas Monjalon 					"%s event\n", eth_event_desc[event]);
371297b5d8b5SThomas Monjalon 			return -1;
371397b5d8b5SThomas Monjalon 		}
371497b5d8b5SThomas Monjalon 	}
371597b5d8b5SThomas Monjalon 
371697b5d8b5SThomas Monjalon 	return 0;
371797b5d8b5SThomas Monjalon }
371897b5d8b5SThomas Monjalon 
3719fb73e096SJeff Guo /* This function is used by the interrupt thread */
3720fb73e096SJeff Guo static void
3721cc1bf307SJeff Guo dev_event_callback(const char *device_name, enum rte_dev_event_type type,
3722fb73e096SJeff Guo 			     __rte_unused void *arg)
3723fb73e096SJeff Guo {
37242049c511SJeff Guo 	uint16_t port_id;
37252049c511SJeff Guo 	int ret;
37262049c511SJeff Guo 
3727fb73e096SJeff Guo 	if (type >= RTE_DEV_EVENT_MAX) {
3728fb73e096SJeff Guo 		fprintf(stderr, "%s called upon invalid event %d\n",
3729fb73e096SJeff Guo 			__func__, type);
3730fb73e096SJeff Guo 		fflush(stderr);
3731fb73e096SJeff Guo 	}
3732fb73e096SJeff Guo 
3733fb73e096SJeff Guo 	switch (type) {
3734fb73e096SJeff Guo 	case RTE_DEV_EVENT_REMOVE:
3735cc1bf307SJeff Guo 		RTE_LOG(DEBUG, EAL, "The device: %s has been removed!\n",
3736fb73e096SJeff Guo 			device_name);
37372049c511SJeff Guo 		ret = rte_eth_dev_get_port_by_name(device_name, &port_id);
37382049c511SJeff Guo 		if (ret) {
37392049c511SJeff Guo 			RTE_LOG(ERR, EAL, "can not get port by device %s!\n",
37402049c511SJeff Guo 				device_name);
37412049c511SJeff Guo 			return;
37422049c511SJeff Guo 		}
3743cc1bf307SJeff Guo 		/*
3744cc1bf307SJeff Guo 		 * Because the user's callback is invoked in eal interrupt
3745cc1bf307SJeff Guo 		 * callback, the interrupt callback need to be finished before
3746cc1bf307SJeff Guo 		 * it can be unregistered when detaching device. So finish
3747cc1bf307SJeff Guo 		 * callback soon and use a deferred removal to detach device
3748cc1bf307SJeff Guo 		 * is need. It is a workaround, once the device detaching be
3749cc1bf307SJeff Guo 		 * moved into the eal in the future, the deferred removal could
3750cc1bf307SJeff Guo 		 * be deleted.
3751cc1bf307SJeff Guo 		 */
3752cc1bf307SJeff Guo 		if (rte_eal_alarm_set(100000,
3753cc1bf307SJeff Guo 				rmv_port_callback, (void *)(intptr_t)port_id))
3754cc1bf307SJeff Guo 			RTE_LOG(ERR, EAL,
3755cc1bf307SJeff Guo 				"Could not set up deferred device removal\n");
3756fb73e096SJeff Guo 		break;
3757fb73e096SJeff Guo 	case RTE_DEV_EVENT_ADD:
3758fb73e096SJeff Guo 		RTE_LOG(ERR, EAL, "The device: %s has been added!\n",
3759fb73e096SJeff Guo 			device_name);
3760fb73e096SJeff Guo 		/* TODO: After finish kernel driver binding,
3761fb73e096SJeff Guo 		 * begin to attach port.
3762fb73e096SJeff Guo 		 */
3763fb73e096SJeff Guo 		break;
3764fb73e096SJeff Guo 	default:
3765fb73e096SJeff Guo 		break;
3766fb73e096SJeff Guo 	}
3767fb73e096SJeff Guo }
3768fb73e096SJeff Guo 
3769f2c5125aSPablo de Lara static void
3770f4d178c1SXueming Li rxtx_port_config(portid_t pid)
3771f2c5125aSPablo de Lara {
3772d44f8a48SQi Zhang 	uint16_t qid;
37735e91aeefSWei Zhao 	uint64_t offloads;
3774f4d178c1SXueming Li 	struct rte_port *port = &ports[pid];
3775f2c5125aSPablo de Lara 
3776d44f8a48SQi Zhang 	for (qid = 0; qid < nb_rxq; qid++) {
37773c4426dbSDmitry Kozlyuk 		offloads = port->rxq[qid].conf.offloads;
37783c4426dbSDmitry Kozlyuk 		port->rxq[qid].conf = port->dev_info.default_rxconf;
3779f4d178c1SXueming Li 
3780f4d178c1SXueming Li 		if (rxq_share > 0 &&
3781f4d178c1SXueming Li 		    (port->dev_info.dev_capa & RTE_ETH_DEV_CAPA_RXQ_SHARE)) {
3782f4d178c1SXueming Li 			/* Non-zero share group to enable RxQ share. */
37833c4426dbSDmitry Kozlyuk 			port->rxq[qid].conf.share_group = pid / rxq_share + 1;
37843c4426dbSDmitry Kozlyuk 			port->rxq[qid].conf.share_qid = qid; /* Equal mapping. */
3785f4d178c1SXueming Li 		}
3786f4d178c1SXueming Li 
3787575e0fd1SWei Zhao 		if (offloads != 0)
37883c4426dbSDmitry Kozlyuk 			port->rxq[qid].conf.offloads = offloads;
3789d44f8a48SQi Zhang 
3790d44f8a48SQi Zhang 		/* Check if any Rx parameters have been passed */
3791f2c5125aSPablo de Lara 		if (rx_pthresh != RTE_PMD_PARAM_UNSET)
37923c4426dbSDmitry Kozlyuk 			port->rxq[qid].conf.rx_thresh.pthresh = rx_pthresh;
3793f2c5125aSPablo de Lara 
3794f2c5125aSPablo de Lara 		if (rx_hthresh != RTE_PMD_PARAM_UNSET)
37953c4426dbSDmitry Kozlyuk 			port->rxq[qid].conf.rx_thresh.hthresh = rx_hthresh;
3796f2c5125aSPablo de Lara 
3797f2c5125aSPablo de Lara 		if (rx_wthresh != RTE_PMD_PARAM_UNSET)
37983c4426dbSDmitry Kozlyuk 			port->rxq[qid].conf.rx_thresh.wthresh = rx_wthresh;
3799f2c5125aSPablo de Lara 
3800f2c5125aSPablo de Lara 		if (rx_free_thresh != RTE_PMD_PARAM_UNSET)
38013c4426dbSDmitry Kozlyuk 			port->rxq[qid].conf.rx_free_thresh = rx_free_thresh;
3802f2c5125aSPablo de Lara 
3803f2c5125aSPablo de Lara 		if (rx_drop_en != RTE_PMD_PARAM_UNSET)
38043c4426dbSDmitry Kozlyuk 			port->rxq[qid].conf.rx_drop_en = rx_drop_en;
3805f2c5125aSPablo de Lara 
3806d44f8a48SQi Zhang 		port->nb_rx_desc[qid] = nb_rxd;
3807d44f8a48SQi Zhang 	}
3808d44f8a48SQi Zhang 
3809d44f8a48SQi Zhang 	for (qid = 0; qid < nb_txq; qid++) {
38103c4426dbSDmitry Kozlyuk 		offloads = port->txq[qid].conf.offloads;
38113c4426dbSDmitry Kozlyuk 		port->txq[qid].conf = port->dev_info.default_txconf;
3812575e0fd1SWei Zhao 		if (offloads != 0)
38133c4426dbSDmitry Kozlyuk 			port->txq[qid].conf.offloads = offloads;
3814d44f8a48SQi Zhang 
3815d44f8a48SQi Zhang 		/* Check if any Tx parameters have been passed */
3816f2c5125aSPablo de Lara 		if (tx_pthresh != RTE_PMD_PARAM_UNSET)
38173c4426dbSDmitry Kozlyuk 			port->txq[qid].conf.tx_thresh.pthresh = tx_pthresh;
3818f2c5125aSPablo de Lara 
3819f2c5125aSPablo de Lara 		if (tx_hthresh != RTE_PMD_PARAM_UNSET)
38203c4426dbSDmitry Kozlyuk 			port->txq[qid].conf.tx_thresh.hthresh = tx_hthresh;
3821f2c5125aSPablo de Lara 
3822f2c5125aSPablo de Lara 		if (tx_wthresh != RTE_PMD_PARAM_UNSET)
38233c4426dbSDmitry Kozlyuk 			port->txq[qid].conf.tx_thresh.wthresh = tx_wthresh;
3824f2c5125aSPablo de Lara 
3825f2c5125aSPablo de Lara 		if (tx_rs_thresh != RTE_PMD_PARAM_UNSET)
38263c4426dbSDmitry Kozlyuk 			port->txq[qid].conf.tx_rs_thresh = tx_rs_thresh;
3827f2c5125aSPablo de Lara 
3828f2c5125aSPablo de Lara 		if (tx_free_thresh != RTE_PMD_PARAM_UNSET)
38293c4426dbSDmitry Kozlyuk 			port->txq[qid].conf.tx_free_thresh = tx_free_thresh;
3830d44f8a48SQi Zhang 
3831d44f8a48SQi Zhang 		port->nb_tx_desc[qid] = nb_txd;
3832d44f8a48SQi Zhang 	}
3833f2c5125aSPablo de Lara }
3834f2c5125aSPablo de Lara 
38350c4abd36SSteve Yang /*
3836b563c142SFerruh Yigit  * Helper function to set MTU from frame size
38370c4abd36SSteve Yang  *
38380c4abd36SSteve Yang  * port->dev_info should be set before calling this function.
38390c4abd36SSteve Yang  *
38400c4abd36SSteve Yang  * return 0 on success, negative on error
38410c4abd36SSteve Yang  */
38420c4abd36SSteve Yang int
3843b563c142SFerruh Yigit update_mtu_from_frame_size(portid_t portid, uint32_t max_rx_pktlen)
38440c4abd36SSteve Yang {
38450c4abd36SSteve Yang 	struct rte_port *port = &ports[portid];
38460c4abd36SSteve Yang 	uint32_t eth_overhead;
38471bb4a528SFerruh Yigit 	uint16_t mtu, new_mtu;
38480c4abd36SSteve Yang 
38491bb4a528SFerruh Yigit 	eth_overhead = get_eth_overhead(&port->dev_info);
38501bb4a528SFerruh Yigit 
38511bb4a528SFerruh Yigit 	if (rte_eth_dev_get_mtu(portid, &mtu) != 0) {
38521bb4a528SFerruh Yigit 		printf("Failed to get MTU for port %u\n", portid);
38531bb4a528SFerruh Yigit 		return -1;
38541bb4a528SFerruh Yigit 	}
38551bb4a528SFerruh Yigit 
38561bb4a528SFerruh Yigit 	new_mtu = max_rx_pktlen - eth_overhead;
38570c4abd36SSteve Yang 
38581bb4a528SFerruh Yigit 	if (mtu == new_mtu)
38591bb4a528SFerruh Yigit 		return 0;
38601bb4a528SFerruh Yigit 
38611bb4a528SFerruh Yigit 	if (eth_dev_set_mtu_mp(portid, new_mtu) != 0) {
386261a3b0e5SAndrew Rybchenko 		fprintf(stderr,
386361a3b0e5SAndrew Rybchenko 			"Failed to set MTU to %u for port %u\n",
38641bb4a528SFerruh Yigit 			new_mtu, portid);
38651bb4a528SFerruh Yigit 		return -1;
38660c4abd36SSteve Yang 	}
38670c4abd36SSteve Yang 
38681bb4a528SFerruh Yigit 	port->dev_conf.rxmode.mtu = new_mtu;
38691bb4a528SFerruh Yigit 
38700c4abd36SSteve Yang 	return 0;
38710c4abd36SSteve Yang }
38720c4abd36SSteve Yang 
3873013af9b6SIntel void
3874013af9b6SIntel init_port_config(void)
3875013af9b6SIntel {
3876013af9b6SIntel 	portid_t pid;
3877013af9b6SIntel 	struct rte_port *port;
3878655eae01SJie Wang 	int ret, i;
3879013af9b6SIntel 
38807d89b261SGaetan Rivet 	RTE_ETH_FOREACH_DEV(pid) {
3881013af9b6SIntel 		port = &ports[pid];
3882013af9b6SIntel 		port->dev_conf.fdir_conf = fdir_conf;
38836f51deb9SIvan Ilchenko 
38846f51deb9SIvan Ilchenko 		ret = eth_dev_info_get_print_err(pid, &port->dev_info);
38856f51deb9SIvan Ilchenko 		if (ret != 0)
38866f51deb9SIvan Ilchenko 			return;
38876f51deb9SIvan Ilchenko 
38883ce690d3SBruce Richardson 		if (nb_rxq > 1) {
3889013af9b6SIntel 			port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
389090892962SQi Zhang 			port->dev_conf.rx_adv_conf.rss_conf.rss_hf =
3891422515b9SAdrien Mazarguil 				rss_hf & port->dev_info.flow_type_rss_offloads;
3892af75078fSIntel 		} else {
3893013af9b6SIntel 			port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
3894013af9b6SIntel 			port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
3895af75078fSIntel 		}
38963ce690d3SBruce Richardson 
38975f592039SJingjing Wu 		if (port->dcb_flag == 0) {
3898655eae01SJie Wang 			if (port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0) {
3899f9295aa2SXiaoyu Min 				port->dev_conf.rxmode.mq_mode =
3900f9295aa2SXiaoyu Min 					(enum rte_eth_rx_mq_mode)
3901295968d1SFerruh Yigit 						(rx_mq_mode & RTE_ETH_MQ_RX_RSS);
3902655eae01SJie Wang 			} else {
3903295968d1SFerruh Yigit 				port->dev_conf.rxmode.mq_mode = RTE_ETH_MQ_RX_NONE;
3904655eae01SJie Wang 				port->dev_conf.rxmode.offloads &=
3905295968d1SFerruh Yigit 						~RTE_ETH_RX_OFFLOAD_RSS_HASH;
3906655eae01SJie Wang 
3907655eae01SJie Wang 				for (i = 0;
3908655eae01SJie Wang 				     i < port->dev_info.nb_rx_queues;
3909655eae01SJie Wang 				     i++)
39103c4426dbSDmitry Kozlyuk 					port->rxq[i].conf.offloads &=
3911295968d1SFerruh Yigit 						~RTE_ETH_RX_OFFLOAD_RSS_HASH;
3912655eae01SJie Wang 			}
39133ce690d3SBruce Richardson 		}
39143ce690d3SBruce Richardson 
3915f4d178c1SXueming Li 		rxtx_port_config(pid);
3916013af9b6SIntel 
3917a5279d25SIgor Romanov 		ret = eth_macaddr_get_print_err(pid, &port->eth_addr);
3918a5279d25SIgor Romanov 		if (ret != 0)
3919a5279d25SIgor Romanov 			return;
3920013af9b6SIntel 
3921a8d0d473SBruce Richardson #if defined RTE_NET_IXGBE && defined RTE_LIBRTE_IXGBE_BYPASS
3922e261265eSRadu Nicolau 		rte_pmd_ixgbe_bypass_init(pid);
39237b7e5ba7SIntel #endif
39248ea656f8SGaetan Rivet 
39250a0821bcSPaulis Gributs 		if (lsc_interrupt && (*port->dev_info.dev_flags & RTE_ETH_DEV_INTR_LSC))
39268ea656f8SGaetan Rivet 			port->dev_conf.intr_conf.lsc = 1;
39270a0821bcSPaulis Gributs 		if (rmv_interrupt && (*port->dev_info.dev_flags & RTE_ETH_DEV_INTR_RMV))
3928284c908cSGaetan Rivet 			port->dev_conf.intr_conf.rmv = 1;
3929013af9b6SIntel 	}
3930013af9b6SIntel }
3931013af9b6SIntel 
393241b05095SBernard Iremonger void set_port_slave_flag(portid_t slave_pid)
393341b05095SBernard Iremonger {
393441b05095SBernard Iremonger 	struct rte_port *port;
393541b05095SBernard Iremonger 
393641b05095SBernard Iremonger 	port = &ports[slave_pid];
393741b05095SBernard Iremonger 	port->slave_flag = 1;
393841b05095SBernard Iremonger }
393941b05095SBernard Iremonger 
394041b05095SBernard Iremonger void clear_port_slave_flag(portid_t slave_pid)
394141b05095SBernard Iremonger {
394241b05095SBernard Iremonger 	struct rte_port *port;
394341b05095SBernard Iremonger 
394441b05095SBernard Iremonger 	port = &ports[slave_pid];
394541b05095SBernard Iremonger 	port->slave_flag = 0;
394641b05095SBernard Iremonger }
394741b05095SBernard Iremonger 
39480e545d30SBernard Iremonger uint8_t port_is_bonding_slave(portid_t slave_pid)
39490e545d30SBernard Iremonger {
39500e545d30SBernard Iremonger 	struct rte_port *port;
39510a0821bcSPaulis Gributs 	struct rte_eth_dev_info dev_info;
39520a0821bcSPaulis Gributs 	int ret;
39530e545d30SBernard Iremonger 
39540e545d30SBernard Iremonger 	port = &ports[slave_pid];
39550a0821bcSPaulis Gributs 	ret = eth_dev_info_get_print_err(slave_pid, &dev_info);
39560a0821bcSPaulis Gributs 	if (ret != 0) {
39570a0821bcSPaulis Gributs 		TESTPMD_LOG(ERR,
39580a0821bcSPaulis Gributs 			"Failed to get device info for port id %d,"
39590a0821bcSPaulis Gributs 			"cannot determine if the port is a bonded slave",
39600a0821bcSPaulis Gributs 			slave_pid);
39610a0821bcSPaulis Gributs 		return 0;
39620a0821bcSPaulis Gributs 	}
39630a0821bcSPaulis Gributs 	if ((*dev_info.dev_flags & RTE_ETH_DEV_BONDED_SLAVE) || (port->slave_flag == 1))
3964b8b8b344SMatan Azrad 		return 1;
3965b8b8b344SMatan Azrad 	return 0;
39660e545d30SBernard Iremonger }
39670e545d30SBernard Iremonger 
3968013af9b6SIntel const uint16_t vlan_tags[] = {
3969013af9b6SIntel 		0,  1,  2,  3,  4,  5,  6,  7,
3970013af9b6SIntel 		8,  9, 10, 11,  12, 13, 14, 15,
3971013af9b6SIntel 		16, 17, 18, 19, 20, 21, 22, 23,
3972013af9b6SIntel 		24, 25, 26, 27, 28, 29, 30, 31
3973013af9b6SIntel };
3974013af9b6SIntel 
3975013af9b6SIntel static  int
3976ac7c491cSKonstantin Ananyev get_eth_dcb_conf(portid_t pid, struct rte_eth_conf *eth_conf,
39771a572499SJingjing Wu 		 enum dcb_mode_enable dcb_mode,
39781a572499SJingjing Wu 		 enum rte_eth_nb_tcs num_tcs,
39791a572499SJingjing Wu 		 uint8_t pfc_en)
3980013af9b6SIntel {
3981013af9b6SIntel 	uint8_t i;
3982ac7c491cSKonstantin Ananyev 	int32_t rc;
3983ac7c491cSKonstantin Ananyev 	struct rte_eth_rss_conf rss_conf;
3984af75078fSIntel 
3985af75078fSIntel 	/*
3986013af9b6SIntel 	 * Builds up the correct configuration for dcb+vt based on the vlan tags array
3987013af9b6SIntel 	 * given above, and the number of traffic classes available for use.
3988af75078fSIntel 	 */
39891a572499SJingjing Wu 	if (dcb_mode == DCB_VT_ENABLED) {
39901a572499SJingjing Wu 		struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
39911a572499SJingjing Wu 				&eth_conf->rx_adv_conf.vmdq_dcb_conf;
39921a572499SJingjing Wu 		struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
39931a572499SJingjing Wu 				&eth_conf->tx_adv_conf.vmdq_dcb_tx_conf;
3994013af9b6SIntel 
3995547d946cSNirmoy Das 		/* VMDQ+DCB RX and TX configurations */
39961a572499SJingjing Wu 		vmdq_rx_conf->enable_default_pool = 0;
39971a572499SJingjing Wu 		vmdq_rx_conf->default_pool = 0;
39981a572499SJingjing Wu 		vmdq_rx_conf->nb_queue_pools =
3999295968d1SFerruh Yigit 			(num_tcs ==  RTE_ETH_4_TCS ? RTE_ETH_32_POOLS : RTE_ETH_16_POOLS);
40001a572499SJingjing Wu 		vmdq_tx_conf->nb_queue_pools =
4001295968d1SFerruh Yigit 			(num_tcs ==  RTE_ETH_4_TCS ? RTE_ETH_32_POOLS : RTE_ETH_16_POOLS);
4002013af9b6SIntel 
40031a572499SJingjing Wu 		vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools;
40041a572499SJingjing Wu 		for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) {
40051a572499SJingjing Wu 			vmdq_rx_conf->pool_map[i].vlan_id = vlan_tags[i];
40061a572499SJingjing Wu 			vmdq_rx_conf->pool_map[i].pools =
40071a572499SJingjing Wu 				1 << (i % vmdq_rx_conf->nb_queue_pools);
4008af75078fSIntel 		}
4009295968d1SFerruh Yigit 		for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++) {
4010f59908feSWei Dai 			vmdq_rx_conf->dcb_tc[i] = i % num_tcs;
4011f59908feSWei Dai 			vmdq_tx_conf->dcb_tc[i] = i % num_tcs;
4012013af9b6SIntel 		}
4013013af9b6SIntel 
4014013af9b6SIntel 		/* set DCB mode of RX and TX of multiple queues */
4015f9295aa2SXiaoyu Min 		eth_conf->rxmode.mq_mode =
4016f9295aa2SXiaoyu Min 				(enum rte_eth_rx_mq_mode)
4017295968d1SFerruh Yigit 					(rx_mq_mode & RTE_ETH_MQ_RX_VMDQ_DCB);
4018295968d1SFerruh Yigit 		eth_conf->txmode.mq_mode = RTE_ETH_MQ_TX_VMDQ_DCB;
40191a572499SJingjing Wu 	} else {
40201a572499SJingjing Wu 		struct rte_eth_dcb_rx_conf *rx_conf =
40211a572499SJingjing Wu 				&eth_conf->rx_adv_conf.dcb_rx_conf;
40221a572499SJingjing Wu 		struct rte_eth_dcb_tx_conf *tx_conf =
40231a572499SJingjing Wu 				&eth_conf->tx_adv_conf.dcb_tx_conf;
4024013af9b6SIntel 
40255139bc12STing Xu 		memset(&rss_conf, 0, sizeof(struct rte_eth_rss_conf));
40265139bc12STing Xu 
4027ac7c491cSKonstantin Ananyev 		rc = rte_eth_dev_rss_hash_conf_get(pid, &rss_conf);
4028ac7c491cSKonstantin Ananyev 		if (rc != 0)
4029ac7c491cSKonstantin Ananyev 			return rc;
4030ac7c491cSKonstantin Ananyev 
40311a572499SJingjing Wu 		rx_conf->nb_tcs = num_tcs;
40321a572499SJingjing Wu 		tx_conf->nb_tcs = num_tcs;
40331a572499SJingjing Wu 
4034295968d1SFerruh Yigit 		for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++) {
4035bcd0e432SJingjing Wu 			rx_conf->dcb_tc[i] = i % num_tcs;
4036bcd0e432SJingjing Wu 			tx_conf->dcb_tc[i] = i % num_tcs;
4037013af9b6SIntel 		}
4038ac7c491cSKonstantin Ananyev 
4039f9295aa2SXiaoyu Min 		eth_conf->rxmode.mq_mode =
4040f9295aa2SXiaoyu Min 				(enum rte_eth_rx_mq_mode)
4041295968d1SFerruh Yigit 					(rx_mq_mode & RTE_ETH_MQ_RX_DCB_RSS);
4042ac7c491cSKonstantin Ananyev 		eth_conf->rx_adv_conf.rss_conf = rss_conf;
4043295968d1SFerruh Yigit 		eth_conf->txmode.mq_mode = RTE_ETH_MQ_TX_DCB;
40441a572499SJingjing Wu 	}
40451a572499SJingjing Wu 
40461a572499SJingjing Wu 	if (pfc_en)
40471a572499SJingjing Wu 		eth_conf->dcb_capability_en =
4048295968d1SFerruh Yigit 				RTE_ETH_DCB_PG_SUPPORT | RTE_ETH_DCB_PFC_SUPPORT;
4049013af9b6SIntel 	else
4050295968d1SFerruh Yigit 		eth_conf->dcb_capability_en = RTE_ETH_DCB_PG_SUPPORT;
4051013af9b6SIntel 
4052013af9b6SIntel 	return 0;
4053013af9b6SIntel }
4054013af9b6SIntel 
4055013af9b6SIntel int
40561a572499SJingjing Wu init_port_dcb_config(portid_t pid,
40571a572499SJingjing Wu 		     enum dcb_mode_enable dcb_mode,
40581a572499SJingjing Wu 		     enum rte_eth_nb_tcs num_tcs,
40591a572499SJingjing Wu 		     uint8_t pfc_en)
4060013af9b6SIntel {
4061013af9b6SIntel 	struct rte_eth_conf port_conf;
4062013af9b6SIntel 	struct rte_port *rte_port;
4063013af9b6SIntel 	int retval;
4064013af9b6SIntel 	uint16_t i;
4065013af9b6SIntel 
4066a550baf2SMin Hu (Connor) 	if (num_procs > 1) {
4067a550baf2SMin Hu (Connor) 		printf("The multi-process feature doesn't support dcb.\n");
4068a550baf2SMin Hu (Connor) 		return -ENOTSUP;
4069a550baf2SMin Hu (Connor) 	}
40702a977b89SWenzhuo Lu 	rte_port = &ports[pid];
4071013af9b6SIntel 
4072c1ba6c32SHuisong Li 	/* retain the original device configuration. */
4073c1ba6c32SHuisong Li 	memcpy(&port_conf, &rte_port->dev_conf, sizeof(struct rte_eth_conf));
4074d5354e89SYanglong Wu 
4075013af9b6SIntel 	/*set configuration of DCB in vt mode and DCB in non-vt mode*/
4076ac7c491cSKonstantin Ananyev 	retval = get_eth_dcb_conf(pid, &port_conf, dcb_mode, num_tcs, pfc_en);
4077013af9b6SIntel 	if (retval < 0)
4078013af9b6SIntel 		return retval;
4079295968d1SFerruh Yigit 	port_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
4080cbe70fdeSJie Wang 	/* remove RSS HASH offload for DCB in vt mode */
4081cbe70fdeSJie Wang 	if (port_conf.rxmode.mq_mode == RTE_ETH_MQ_RX_VMDQ_DCB) {
4082cbe70fdeSJie Wang 		port_conf.rxmode.offloads &= ~RTE_ETH_RX_OFFLOAD_RSS_HASH;
4083cbe70fdeSJie Wang 		for (i = 0; i < nb_rxq; i++)
40843c4426dbSDmitry Kozlyuk 			rte_port->rxq[i].conf.offloads &=
4085cbe70fdeSJie Wang 				~RTE_ETH_RX_OFFLOAD_RSS_HASH;
4086cbe70fdeSJie Wang 	}
4087013af9b6SIntel 
40882f203d44SQi Zhang 	/* re-configure the device . */
40892b0e0ebaSChenbo Xia 	retval = rte_eth_dev_configure(pid, nb_rxq, nb_rxq, &port_conf);
40902b0e0ebaSChenbo Xia 	if (retval < 0)
40912b0e0ebaSChenbo Xia 		return retval;
40926f51deb9SIvan Ilchenko 
40936f51deb9SIvan Ilchenko 	retval = eth_dev_info_get_print_err(pid, &rte_port->dev_info);
40946f51deb9SIvan Ilchenko 	if (retval != 0)
40956f51deb9SIvan Ilchenko 		return retval;
40962a977b89SWenzhuo Lu 
40972a977b89SWenzhuo Lu 	/* If dev_info.vmdq_pool_base is greater than 0,
40982a977b89SWenzhuo Lu 	 * the queue id of vmdq pools is started after pf queues.
40992a977b89SWenzhuo Lu 	 */
41002a977b89SWenzhuo Lu 	if (dcb_mode == DCB_VT_ENABLED &&
41012a977b89SWenzhuo Lu 	    rte_port->dev_info.vmdq_pool_base > 0) {
410261a3b0e5SAndrew Rybchenko 		fprintf(stderr,
410361a3b0e5SAndrew Rybchenko 			"VMDQ_DCB multi-queue mode is nonsensical for port %d.\n",
410461a3b0e5SAndrew Rybchenko 			pid);
41052a977b89SWenzhuo Lu 		return -1;
41062a977b89SWenzhuo Lu 	}
41072a977b89SWenzhuo Lu 
41082a977b89SWenzhuo Lu 	/* Assume the ports in testpmd have the same dcb capability
41092a977b89SWenzhuo Lu 	 * and has the same number of rxq and txq in dcb mode
41102a977b89SWenzhuo Lu 	 */
41112a977b89SWenzhuo Lu 	if (dcb_mode == DCB_VT_ENABLED) {
411286ef65eeSBernard Iremonger 		if (rte_port->dev_info.max_vfs > 0) {
411386ef65eeSBernard Iremonger 			nb_rxq = rte_port->dev_info.nb_rx_queues;
411486ef65eeSBernard Iremonger 			nb_txq = rte_port->dev_info.nb_tx_queues;
411586ef65eeSBernard Iremonger 		} else {
41162a977b89SWenzhuo Lu 			nb_rxq = rte_port->dev_info.max_rx_queues;
41172a977b89SWenzhuo Lu 			nb_txq = rte_port->dev_info.max_tx_queues;
411886ef65eeSBernard Iremonger 		}
41192a977b89SWenzhuo Lu 	} else {
41202a977b89SWenzhuo Lu 		/*if vt is disabled, use all pf queues */
41212a977b89SWenzhuo Lu 		if (rte_port->dev_info.vmdq_pool_base == 0) {
41222a977b89SWenzhuo Lu 			nb_rxq = rte_port->dev_info.max_rx_queues;
41232a977b89SWenzhuo Lu 			nb_txq = rte_port->dev_info.max_tx_queues;
41242a977b89SWenzhuo Lu 		} else {
41252a977b89SWenzhuo Lu 			nb_rxq = (queueid_t)num_tcs;
41262a977b89SWenzhuo Lu 			nb_txq = (queueid_t)num_tcs;
41272a977b89SWenzhuo Lu 
41282a977b89SWenzhuo Lu 		}
41292a977b89SWenzhuo Lu 	}
41302a977b89SWenzhuo Lu 	rx_free_thresh = 64;
41312a977b89SWenzhuo Lu 
4132013af9b6SIntel 	memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf));
4133013af9b6SIntel 
4134f4d178c1SXueming Li 	rxtx_port_config(pid);
4135013af9b6SIntel 	/* VLAN filter */
4136295968d1SFerruh Yigit 	rte_port->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
41371a572499SJingjing Wu 	for (i = 0; i < RTE_DIM(vlan_tags); i++)
4138013af9b6SIntel 		rx_vft_set(pid, vlan_tags[i], 1);
4139013af9b6SIntel 
4140a5279d25SIgor Romanov 	retval = eth_macaddr_get_print_err(pid, &rte_port->eth_addr);
4141a5279d25SIgor Romanov 	if (retval != 0)
4142a5279d25SIgor Romanov 		return retval;
4143a5279d25SIgor Romanov 
41447741e4cfSIntel 	rte_port->dcb_flag = 1;
41457741e4cfSIntel 
4146a690a070SHuisong Li 	/* Enter DCB configuration status */
4147a690a070SHuisong Li 	dcb_config = 1;
4148a690a070SHuisong Li 
4149013af9b6SIntel 	return 0;
4150af75078fSIntel }
4151af75078fSIntel 
4152ffc468ffSTetsuya Mukawa static void
4153ffc468ffSTetsuya Mukawa init_port(void)
4154ffc468ffSTetsuya Mukawa {
41551b9f2746SGregory Etelson 	int i;
41561b9f2746SGregory Etelson 
4157ffc468ffSTetsuya Mukawa 	/* Configuration of Ethernet ports. */
4158ffc468ffSTetsuya Mukawa 	ports = rte_zmalloc("testpmd: ports",
4159ffc468ffSTetsuya Mukawa 			    sizeof(struct rte_port) * RTE_MAX_ETHPORTS,
4160ffc468ffSTetsuya Mukawa 			    RTE_CACHE_LINE_SIZE);
4161ffc468ffSTetsuya Mukawa 	if (ports == NULL) {
4162ffc468ffSTetsuya Mukawa 		rte_exit(EXIT_FAILURE,
4163ffc468ffSTetsuya Mukawa 				"rte_zmalloc(%d struct rte_port) failed\n",
4164ffc468ffSTetsuya Mukawa 				RTE_MAX_ETHPORTS);
4165ffc468ffSTetsuya Mukawa 	}
41661b9f2746SGregory Etelson 	for (i = 0; i < RTE_MAX_ETHPORTS; i++)
416763b72657SIvan Ilchenko 		ports[i].xstats_info.allocated = false;
416863b72657SIvan Ilchenko 	for (i = 0; i < RTE_MAX_ETHPORTS; i++)
41691b9f2746SGregory Etelson 		LIST_INIT(&ports[i].flow_tunnel_list);
417029841336SPhil Yang 	/* Initialize ports NUMA structures */
417129841336SPhil Yang 	memset(port_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
417229841336SPhil Yang 	memset(rxring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
417329841336SPhil Yang 	memset(txring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
4174ffc468ffSTetsuya Mukawa }
4175ffc468ffSTetsuya Mukawa 
4176d3a274ceSZhihong Wang static void
4177d3a274ceSZhihong Wang force_quit(void)
4178d3a274ceSZhihong Wang {
4179d3a274ceSZhihong Wang 	pmd_test_exit();
4180d3a274ceSZhihong Wang 	prompt_exit();
4181d3a274ceSZhihong Wang }
4182d3a274ceSZhihong Wang 
4183d3a274ceSZhihong Wang static void
4184cfea1f30SPablo de Lara print_stats(void)
4185cfea1f30SPablo de Lara {
4186cfea1f30SPablo de Lara 	uint8_t i;
4187cfea1f30SPablo de Lara 	const char clr[] = { 27, '[', '2', 'J', '\0' };
4188cfea1f30SPablo de Lara 	const char top_left[] = { 27, '[', '1', ';', '1', 'H', '\0' };
4189cfea1f30SPablo de Lara 
4190cfea1f30SPablo de Lara 	/* Clear screen and move to top left */
4191cfea1f30SPablo de Lara 	printf("%s%s", clr, top_left);
4192cfea1f30SPablo de Lara 
4193cfea1f30SPablo de Lara 	printf("\nPort statistics ====================================");
4194cfea1f30SPablo de Lara 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
4195cfea1f30SPablo de Lara 		nic_stats_display(fwd_ports_ids[i]);
4196683d1e82SIgor Romanov 
4197683d1e82SIgor Romanov 	fflush(stdout);
4198cfea1f30SPablo de Lara }
4199cfea1f30SPablo de Lara 
4200cfea1f30SPablo de Lara static void
4201d3a274ceSZhihong Wang signal_handler(int signum)
4202d3a274ceSZhihong Wang {
4203d3a274ceSZhihong Wang 	if (signum == SIGINT || signum == SIGTERM) {
420461a3b0e5SAndrew Rybchenko 		fprintf(stderr, "\nSignal %d received, preparing to exit...\n",
4205d3a274ceSZhihong Wang 			signum);
4206a8d0d473SBruce Richardson #ifdef RTE_LIB_PDUMP
4207102b7329SReshma Pattan 		/* uninitialize packet capture framework */
4208102b7329SReshma Pattan 		rte_pdump_uninit();
4209102b7329SReshma Pattan #endif
4210a8d0d473SBruce Richardson #ifdef RTE_LIB_LATENCYSTATS
42118b36297dSAmit Gupta 		if (latencystats_enabled != 0)
421262d3216dSReshma Pattan 			rte_latencystats_uninit();
421362d3216dSReshma Pattan #endif
4214d3a274ceSZhihong Wang 		force_quit();
4215d9a191a0SPhil Yang 		/* Set flag to indicate the force termination. */
4216d9a191a0SPhil Yang 		f_quit = 1;
4217d3a274ceSZhihong Wang 		/* exit with the expected status */
4218761f7ae1SJie Zhou #ifndef RTE_EXEC_ENV_WINDOWS
4219d3a274ceSZhihong Wang 		signal(signum, SIG_DFL);
4220d3a274ceSZhihong Wang 		kill(getpid(), signum);
4221761f7ae1SJie Zhou #endif
4222d3a274ceSZhihong Wang 	}
4223d3a274ceSZhihong Wang }
4224d3a274ceSZhihong Wang 
4225af75078fSIntel int
4226af75078fSIntel main(int argc, char** argv)
4227af75078fSIntel {
4228af75078fSIntel 	int diag;
4229f8244c63SZhiyong Yang 	portid_t port_id;
42304918a357SXiaoyun Li 	uint16_t count;
4231fb73e096SJeff Guo 	int ret;
4232af75078fSIntel 
4233d3a274ceSZhihong Wang 	signal(SIGINT, signal_handler);
4234d3a274ceSZhihong Wang 	signal(SIGTERM, signal_handler);
4235d3a274ceSZhihong Wang 
4236285fd101SOlivier Matz 	testpmd_logtype = rte_log_register("testpmd");
4237285fd101SOlivier Matz 	if (testpmd_logtype < 0)
423816267ceeSStephen Hemminger 		rte_exit(EXIT_FAILURE, "Cannot register log type");
4239285fd101SOlivier Matz 	rte_log_set_level(testpmd_logtype, RTE_LOG_DEBUG);
4240285fd101SOlivier Matz 
42419201806eSStephen Hemminger 	diag = rte_eal_init(argc, argv);
42429201806eSStephen Hemminger 	if (diag < 0)
424316267ceeSStephen Hemminger 		rte_exit(EXIT_FAILURE, "Cannot init EAL: %s\n",
424416267ceeSStephen Hemminger 			 rte_strerror(rte_errno));
42459201806eSStephen Hemminger 
424697b5d8b5SThomas Monjalon 	ret = register_eth_event_callback();
424797b5d8b5SThomas Monjalon 	if (ret != 0)
424816267ceeSStephen Hemminger 		rte_exit(EXIT_FAILURE, "Cannot register for ethdev events");
424997b5d8b5SThomas Monjalon 
4250a8d0d473SBruce Richardson #ifdef RTE_LIB_PDUMP
42514aa0d012SAnatoly Burakov 	/* initialize packet capture framework */
4252e9436f54STiwei Bie 	rte_pdump_init();
42534aa0d012SAnatoly Burakov #endif
42544aa0d012SAnatoly Burakov 
42554918a357SXiaoyun Li 	count = 0;
42564918a357SXiaoyun Li 	RTE_ETH_FOREACH_DEV(port_id) {
42574918a357SXiaoyun Li 		ports_ids[count] = port_id;
42584918a357SXiaoyun Li 		count++;
42594918a357SXiaoyun Li 	}
42604918a357SXiaoyun Li 	nb_ports = (portid_t) count;
42614aa0d012SAnatoly Burakov 	if (nb_ports == 0)
42624aa0d012SAnatoly Burakov 		TESTPMD_LOG(WARNING, "No probed ethernet devices\n");
42634aa0d012SAnatoly Burakov 
42644aa0d012SAnatoly Burakov 	/* allocate port structures, and init them */
42654aa0d012SAnatoly Burakov 	init_port();
42664aa0d012SAnatoly Burakov 
42674aa0d012SAnatoly Burakov 	set_def_fwd_config();
42684aa0d012SAnatoly Burakov 	if (nb_lcores == 0)
426916267ceeSStephen Hemminger 		rte_exit(EXIT_FAILURE, "No cores defined for forwarding\n"
427016267ceeSStephen Hemminger 			 "Check the core mask argument\n");
42714aa0d012SAnatoly Burakov 
4272e505d84cSAnatoly Burakov 	/* Bitrate/latency stats disabled by default */
4273a8d0d473SBruce Richardson #ifdef RTE_LIB_BITRATESTATS
4274e505d84cSAnatoly Burakov 	bitrate_enabled = 0;
4275e505d84cSAnatoly Burakov #endif
4276a8d0d473SBruce Richardson #ifdef RTE_LIB_LATENCYSTATS
4277e505d84cSAnatoly Burakov 	latencystats_enabled = 0;
4278e505d84cSAnatoly Burakov #endif
4279e505d84cSAnatoly Burakov 
4280fb7b8b32SAnatoly Burakov 	/* on FreeBSD, mlockall() is disabled by default */
42815fbc1d49SBruce Richardson #ifdef RTE_EXEC_ENV_FREEBSD
4282fb7b8b32SAnatoly Burakov 	do_mlockall = 0;
4283fb7b8b32SAnatoly Burakov #else
4284fb7b8b32SAnatoly Burakov 	do_mlockall = 1;
4285fb7b8b32SAnatoly Burakov #endif
4286fb7b8b32SAnatoly Burakov 
4287e505d84cSAnatoly Burakov 	argc -= diag;
4288e505d84cSAnatoly Burakov 	argv += diag;
4289e505d84cSAnatoly Burakov 	if (argc > 1)
4290e505d84cSAnatoly Burakov 		launch_args_parse(argc, argv);
4291e505d84cSAnatoly Burakov 
4292761f7ae1SJie Zhou #ifndef RTE_EXEC_ENV_WINDOWS
4293e505d84cSAnatoly Burakov 	if (do_mlockall && mlockall(MCL_CURRENT | MCL_FUTURE)) {
4294285fd101SOlivier Matz 		TESTPMD_LOG(NOTICE, "mlockall() failed with error \"%s\"\n",
42951c036b16SEelco Chaudron 			strerror(errno));
42961c036b16SEelco Chaudron 	}
4297761f7ae1SJie Zhou #endif
42981c036b16SEelco Chaudron 
429999cabef0SPablo de Lara 	if (tx_first && interactive)
430099cabef0SPablo de Lara 		rte_exit(EXIT_FAILURE, "--tx-first cannot be used on "
430199cabef0SPablo de Lara 				"interactive mode.\n");
43028820cba4SDavid Hunt 
43038820cba4SDavid Hunt 	if (tx_first && lsc_interrupt) {
430461a3b0e5SAndrew Rybchenko 		fprintf(stderr,
430561a3b0e5SAndrew Rybchenko 			"Warning: lsc_interrupt needs to be off when using tx_first. Disabling.\n");
43068820cba4SDavid Hunt 		lsc_interrupt = 0;
43078820cba4SDavid Hunt 	}
43088820cba4SDavid Hunt 
43095a8fb55cSReshma Pattan 	if (!nb_rxq && !nb_txq)
431061a3b0e5SAndrew Rybchenko 		fprintf(stderr,
431161a3b0e5SAndrew Rybchenko 			"Warning: Either rx or tx queues should be non-zero\n");
43125a8fb55cSReshma Pattan 
43135a8fb55cSReshma Pattan 	if (nb_rxq > 1 && nb_rxq > nb_txq)
431461a3b0e5SAndrew Rybchenko 		fprintf(stderr,
431561a3b0e5SAndrew Rybchenko 			"Warning: nb_rxq=%d enables RSS configuration, but nb_txq=%d will prevent to fully test it.\n",
4316af75078fSIntel 			nb_rxq, nb_txq);
4317af75078fSIntel 
4318af75078fSIntel 	init_config();
4319fb73e096SJeff Guo 
4320fb73e096SJeff Guo 	if (hot_plug) {
43212049c511SJeff Guo 		ret = rte_dev_hotplug_handle_enable();
4322fb73e096SJeff Guo 		if (ret) {
43232049c511SJeff Guo 			RTE_LOG(ERR, EAL,
43242049c511SJeff Guo 				"fail to enable hotplug handling.");
4325fb73e096SJeff Guo 			return -1;
4326fb73e096SJeff Guo 		}
4327fb73e096SJeff Guo 
43282049c511SJeff Guo 		ret = rte_dev_event_monitor_start();
43292049c511SJeff Guo 		if (ret) {
43302049c511SJeff Guo 			RTE_LOG(ERR, EAL,
43312049c511SJeff Guo 				"fail to start device event monitoring.");
43322049c511SJeff Guo 			return -1;
43332049c511SJeff Guo 		}
43342049c511SJeff Guo 
43352049c511SJeff Guo 		ret = rte_dev_event_callback_register(NULL,
4336cc1bf307SJeff Guo 			dev_event_callback, NULL);
43372049c511SJeff Guo 		if (ret) {
43382049c511SJeff Guo 			RTE_LOG(ERR, EAL,
43392049c511SJeff Guo 				"fail  to register device event callback\n");
43402049c511SJeff Guo 			return -1;
43412049c511SJeff Guo 		}
4342fb73e096SJeff Guo 	}
4343fb73e096SJeff Guo 
43446937d210SStephen Hemminger 	if (!no_device_start && start_port(RTE_PORT_ALL) != 0)
4345148f963fSBruce Richardson 		rte_exit(EXIT_FAILURE, "Start ports failed\n");
4346af75078fSIntel 
4347ce8d5614SIntel 	/* set all ports to promiscuous mode by default */
434834fc1051SIvan Ilchenko 	RTE_ETH_FOREACH_DEV(port_id) {
434934fc1051SIvan Ilchenko 		ret = rte_eth_promiscuous_enable(port_id);
435034fc1051SIvan Ilchenko 		if (ret != 0)
435161a3b0e5SAndrew Rybchenko 			fprintf(stderr,
435261a3b0e5SAndrew Rybchenko 				"Error during enabling promiscuous mode for port %u: %s - ignore\n",
435334fc1051SIvan Ilchenko 				port_id, rte_strerror(-ret));
435434fc1051SIvan Ilchenko 	}
4355af75078fSIntel 
4356bb9be9a4SDavid Marchand #ifdef RTE_LIB_METRICS
43577e4441c8SRemy Horton 	/* Init metrics library */
43587e4441c8SRemy Horton 	rte_metrics_init(rte_socket_id());
4359bb9be9a4SDavid Marchand #endif
43607e4441c8SRemy Horton 
4361a8d0d473SBruce Richardson #ifdef RTE_LIB_LATENCYSTATS
436262d3216dSReshma Pattan 	if (latencystats_enabled != 0) {
436362d3216dSReshma Pattan 		int ret = rte_latencystats_init(1, NULL);
436462d3216dSReshma Pattan 		if (ret)
436561a3b0e5SAndrew Rybchenko 			fprintf(stderr,
436661a3b0e5SAndrew Rybchenko 				"Warning: latencystats init() returned error %d\n",
436761a3b0e5SAndrew Rybchenko 				ret);
436861a3b0e5SAndrew Rybchenko 		fprintf(stderr, "Latencystats running on lcore %d\n",
436962d3216dSReshma Pattan 			latencystats_lcore_id);
437062d3216dSReshma Pattan 	}
437162d3216dSReshma Pattan #endif
437262d3216dSReshma Pattan 
43737e4441c8SRemy Horton 	/* Setup bitrate stats */
4374a8d0d473SBruce Richardson #ifdef RTE_LIB_BITRATESTATS
4375e25e6c70SRemy Horton 	if (bitrate_enabled != 0) {
43767e4441c8SRemy Horton 		bitrate_data = rte_stats_bitrate_create();
43777e4441c8SRemy Horton 		if (bitrate_data == NULL)
4378e25e6c70SRemy Horton 			rte_exit(EXIT_FAILURE,
4379e25e6c70SRemy Horton 				"Could not allocate bitrate data.\n");
43807e4441c8SRemy Horton 		rte_stats_bitrate_reg(bitrate_data);
4381e25e6c70SRemy Horton 	}
43827e4441c8SRemy Horton #endif
4383a8d0d473SBruce Richardson #ifdef RTE_LIB_CMDLINE
4384592ab76fSDavid Marchand 	if (init_cmdline() != 0)
4385592ab76fSDavid Marchand 		rte_exit(EXIT_FAILURE,
4386592ab76fSDavid Marchand 			"Could not initialise cmdline context.\n");
4387592ab76fSDavid Marchand 
438881ef862bSAllain Legacy 	if (strlen(cmdline_filename) != 0)
438981ef862bSAllain Legacy 		cmdline_read_from_file(cmdline_filename);
439081ef862bSAllain Legacy 
4391ca7feb22SCyril Chemparathy 	if (interactive == 1) {
4392ca7feb22SCyril Chemparathy 		if (auto_start) {
4393ca7feb22SCyril Chemparathy 			printf("Start automatic packet forwarding\n");
4394ca7feb22SCyril Chemparathy 			start_packet_forwarding(0);
4395ca7feb22SCyril Chemparathy 		}
4396af75078fSIntel 		prompt();
43970de738cfSJiayu Hu 		pmd_test_exit();
4398ca7feb22SCyril Chemparathy 	} else
43990d56cb81SThomas Monjalon #endif
44000d56cb81SThomas Monjalon 	{
4401af75078fSIntel 		char c;
4402af75078fSIntel 		int rc;
4403af75078fSIntel 
4404d9a191a0SPhil Yang 		f_quit = 0;
4405d9a191a0SPhil Yang 
4406af75078fSIntel 		printf("No commandline core given, start packet forwarding\n");
440799cabef0SPablo de Lara 		start_packet_forwarding(tx_first);
4408cfea1f30SPablo de Lara 		if (stats_period != 0) {
4409cfea1f30SPablo de Lara 			uint64_t prev_time = 0, cur_time, diff_time = 0;
4410cfea1f30SPablo de Lara 			uint64_t timer_period;
4411cfea1f30SPablo de Lara 
4412cfea1f30SPablo de Lara 			/* Convert to number of cycles */
4413cfea1f30SPablo de Lara 			timer_period = stats_period * rte_get_timer_hz();
4414cfea1f30SPablo de Lara 
4415d9a191a0SPhil Yang 			while (f_quit == 0) {
4416cfea1f30SPablo de Lara 				cur_time = rte_get_timer_cycles();
4417cfea1f30SPablo de Lara 				diff_time += cur_time - prev_time;
4418cfea1f30SPablo de Lara 
4419cfea1f30SPablo de Lara 				if (diff_time >= timer_period) {
4420cfea1f30SPablo de Lara 					print_stats();
4421cfea1f30SPablo de Lara 					/* Reset the timer */
4422cfea1f30SPablo de Lara 					diff_time = 0;
4423cfea1f30SPablo de Lara 				}
4424cfea1f30SPablo de Lara 				/* Sleep to avoid unnecessary checks */
4425cfea1f30SPablo de Lara 				prev_time = cur_time;
4426761f7ae1SJie Zhou 				rte_delay_us_sleep(US_PER_S);
4427cfea1f30SPablo de Lara 			}
4428cfea1f30SPablo de Lara 		}
4429cfea1f30SPablo de Lara 
4430af75078fSIntel 		printf("Press enter to exit\n");
4431af75078fSIntel 		rc = read(0, &c, 1);
4432d3a274ceSZhihong Wang 		pmd_test_exit();
4433af75078fSIntel 		if (rc < 0)
4434af75078fSIntel 			return 1;
4435af75078fSIntel 	}
4436af75078fSIntel 
44375e516c89SStephen Hemminger 	ret = rte_eal_cleanup();
44385e516c89SStephen Hemminger 	if (ret != 0)
44395e516c89SStephen Hemminger 		rte_exit(EXIT_FAILURE,
44405e516c89SStephen Hemminger 			 "EAL cleanup failed: %s\n", strerror(-ret));
44415e516c89SStephen Hemminger 
44425e516c89SStephen Hemminger 	return EXIT_SUCCESS;
4443af75078fSIntel }
4444